linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 1/3] ftrace: Factor out __ftrace_hash_move()
@ 2017-01-20  2:44 Namhyung Kim
  2017-01-20  2:44 ` [PATCH 2/3] ftrace: Expose ftrace_hash_empty and ftrace_lookup_ip Namhyung Kim
                   ` (2 more replies)
  0 siblings, 3 replies; 9+ messages in thread
From: Namhyung Kim @ 2017-01-20  2:44 UTC (permalink / raw)
  To: Steven Rostedt; +Cc: LKML, Ingo Molnar

The __ftrace_hash_move() is to allocates properly-sized hash and move
entries in the src ftrace_hash.  It will be used to set function graph
filters which has nothing to do with the dyn_ftrace records.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
 kernel/trace/ftrace.c | 40 ++++++++++++++++++++++++----------------
 1 file changed, 24 insertions(+), 16 deletions(-)

diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index eb230f06ba41..37b0e948d924 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1383,9 +1383,8 @@ ftrace_hash_rec_enable_modify(struct ftrace_ops *ops, int filter_hash);
 static int ftrace_hash_ipmodify_update(struct ftrace_ops *ops,
 				       struct ftrace_hash *new_hash);
 
-static int
-ftrace_hash_move(struct ftrace_ops *ops, int enable,
-		 struct ftrace_hash **dst, struct ftrace_hash *src)
+static struct ftrace_hash *
+__ftrace_hash_move(struct ftrace_hash *src)
 {
 	struct ftrace_func_entry *entry;
 	struct hlist_node *tn;
@@ -1393,21 +1392,13 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
 	struct ftrace_hash *new_hash;
 	int size = src->count;
 	int bits = 0;
-	int ret;
 	int i;
 
-	/* Reject setting notrace hash on IPMODIFY ftrace_ops */
-	if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
-		return -EINVAL;
-
 	/*
-	 * If the new source is empty, just free dst and assign it
-	 * the empty_hash.
+	 * If the new source is empty, just return the empty_hash.
 	 */
-	if (!src->count) {
-		new_hash = EMPTY_HASH;
-		goto update;
-	}
+	if (!src->count)
+		return EMPTY_HASH;
 
 	/*
 	 * Make the hash size about 1/2 the # found
@@ -1421,7 +1412,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
 
 	new_hash = alloc_ftrace_hash(bits);
 	if (!new_hash)
-		return -ENOMEM;
+		return NULL;
 
 	size = 1 << src->size_bits;
 	for (i = 0; i < size; i++) {
@@ -1432,7 +1423,24 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
 		}
 	}
 
-update:
+	return new_hash;
+}
+
+static int
+ftrace_hash_move(struct ftrace_ops *ops, int enable,
+		 struct ftrace_hash **dst, struct ftrace_hash *src)
+{
+	struct ftrace_hash *new_hash;
+	int ret;
+
+	/* Reject setting notrace hash on IPMODIFY ftrace_ops */
+	if (ops->flags & FTRACE_OPS_FL_IPMODIFY && !enable)
+		return -EINVAL;
+
+	new_hash = __ftrace_hash_move(src);
+	if (!new_hash)
+		return -ENOMEM;
+
 	/* Make sure this can be applied if it is IPMODIFY ftrace_ops */
 	if (enable) {
 		/* IPMODIFY should be updated only when filter_hash updating */
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/3] ftrace: Expose ftrace_hash_empty and ftrace_lookup_ip
  2017-01-20  2:44 [PATCH 1/3] ftrace: Factor out __ftrace_hash_move() Namhyung Kim
@ 2017-01-20  2:44 ` Namhyung Kim
  2017-01-20 19:54   ` Steven Rostedt
  2017-01-20  2:44 ` [PATCH 3/3] ftrace: Convert graph filter to use hash tables Namhyung Kim
  2017-01-20 17:25 ` [PATCH 1/3] ftrace: Factor out __ftrace_hash_move() Steven Rostedt
  2 siblings, 1 reply; 9+ messages in thread
From: Namhyung Kim @ 2017-01-20  2:44 UTC (permalink / raw)
  To: Steven Rostedt; +Cc: LKML, Ingo Molnar

It will be used when checking graph filter hashes later.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
 kernel/trace/ftrace.c | 14 +-------------
 kernel/trace/trace.h  | 14 ++++++++++++++
 2 files changed, 15 insertions(+), 13 deletions(-)

diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 37b0e948d924..0470e373b9b4 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1110,13 +1110,6 @@ struct ftrace_func_entry {
 	unsigned long ip;
 };
 
-struct ftrace_hash {
-	unsigned long		size_bits;
-	struct hlist_head	*buckets;
-	unsigned long		count;
-	struct rcu_head		rcu;
-};
-
 /*
  * We make these constant because no one should touch them,
  * but they are used as the default "empty hash", to avoid allocating
@@ -1192,12 +1185,7 @@ struct ftrace_page {
 static struct ftrace_page	*ftrace_pages_start;
 static struct ftrace_page	*ftrace_pages;
 
-static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
-{
-	return !hash || !hash->count;
-}
-
-static struct ftrace_func_entry *
+struct ftrace_func_entry *
 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
 {
 	unsigned long key;
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 1ea51ab53edf..431a39ba8eee 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -787,6 +787,20 @@ extern void __trace_graph_return(struct trace_array *tr,
 				 struct ftrace_graph_ret *trace,
 				 unsigned long flags, int pc);
 
+struct ftrace_hash {
+	unsigned long		size_bits;
+	struct hlist_head	*buckets;
+	unsigned long		count;
+	struct rcu_head		rcu;
+};
+
+struct ftrace_func_entry *
+ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
+
+static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
+{
+	return !hash || !hash->count;
+}
 
 #ifdef CONFIG_DYNAMIC_FTRACE
 /* TODO: make this variable */
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 3/3] ftrace: Convert graph filter to use hash tables
  2017-01-20  2:44 [PATCH 1/3] ftrace: Factor out __ftrace_hash_move() Namhyung Kim
  2017-01-20  2:44 ` [PATCH 2/3] ftrace: Expose ftrace_hash_empty and ftrace_lookup_ip Namhyung Kim
@ 2017-01-20  2:44 ` Namhyung Kim
  2017-01-20 17:25 ` [PATCH 1/3] ftrace: Factor out __ftrace_hash_move() Steven Rostedt
  2 siblings, 0 replies; 9+ messages in thread
From: Namhyung Kim @ 2017-01-20  2:44 UTC (permalink / raw)
  To: Steven Rostedt; +Cc: LKML, Ingo Molnar

Use ftrace_hash instead of a static array of a fixed size.  This is
useful when a graph filter pattern matches to a large number of
functions.  Now hash lookup is done with preemption disabled to protect
from the hash being changed/freed.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
 kernel/trace/ftrace.c | 190 +++++++++++++++++++++++++++++++++-----------------
 kernel/trace/trace.h  |  64 ++++++++---------
 2 files changed, 158 insertions(+), 96 deletions(-)

diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0470e373b9b4..2d554a02241d 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4378,7 +4378,7 @@ __setup("ftrace_filter=", set_ftrace_filter);
 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
 static char ftrace_graph_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
-static int ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer);
+static int ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer);
 
 static unsigned long save_global_trampoline;
 static unsigned long save_global_flags;
@@ -4401,18 +4401,17 @@ static void __init set_ftrace_early_graph(char *buf, int enable)
 {
 	int ret;
 	char *func;
-	unsigned long *table = ftrace_graph_funcs;
-	int *count = &ftrace_graph_count;
+	struct ftrace_hash *hash;
 
-	if (!enable) {
-		table = ftrace_graph_notrace_funcs;
-		count = &ftrace_graph_notrace_count;
-	}
+	if (enable)
+		hash = ftrace_graph_hash;
+	else
+		hash = ftrace_graph_notrace_hash;
 
 	while (buf) {
 		func = strsep(&buf, ",");
 		/* we allow only one expression at a time */
-		ret = ftrace_set_func(table, count, FTRACE_GRAPH_MAX_FUNCS, func);
+		ret = ftrace_graph_set_hash(hash, func);
 		if (ret)
 			printk(KERN_DEBUG "ftrace: function %s not "
 					  "traceable\n", func);
@@ -4536,15 +4535,20 @@ static const struct file_operations ftrace_notrace_fops = {
 
 static DEFINE_MUTEX(graph_lock);
 
-int ftrace_graph_count;
-int ftrace_graph_notrace_count;
-unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
-unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
+struct ftrace_hash *ftrace_graph_hash = EMPTY_HASH;
+struct ftrace_hash *ftrace_graph_notrace_hash = EMPTY_HASH;
+
+enum graph_filter_type {
+	GRAPH_FILTER_NOTRACE	= 0,
+	GRAPH_FILTER_FUNCTION,
+};
 
 struct ftrace_graph_data {
-	unsigned long *table;
-	size_t size;
-	int *count;
+	struct ftrace_hash *hash;
+	struct ftrace_func_entry *entry;
+	int idx;   /* for hash table iteration */
+	enum graph_filter_type type;
+	struct ftrace_hash *new_hash;
 	const struct seq_operations *seq_ops;
 };
 
@@ -4552,10 +4556,31 @@ static void *
 __g_next(struct seq_file *m, loff_t *pos)
 {
 	struct ftrace_graph_data *fgd = m->private;
+	struct ftrace_func_entry *entry = fgd->entry;
+	struct hlist_head *head;
+	int i, idx = fgd->idx;
 
-	if (*pos >= *fgd->count)
+	if (*pos >= fgd->hash->count)
 		return NULL;
-	return &fgd->table[*pos];
+
+	if (entry) {
+		hlist_for_each_entry_continue(entry, hlist) {
+			fgd->entry = entry;
+			return entry;
+		}
+
+		idx++;
+	}
+
+	for (i = idx; i < 1 << fgd->hash->size_bits; i++) {
+		head = &fgd->hash->buckets[i];
+		hlist_for_each_entry(entry, head, hlist) {
+			fgd->entry = entry;
+			fgd->idx = i;
+			return entry;
+		}
+	}
+	return NULL;
 }
 
 static void *
@@ -4572,9 +4597,11 @@ static void *g_start(struct seq_file *m, loff_t *pos)
 	mutex_lock(&graph_lock);
 
 	/* Nothing, tell g_show to print all functions are enabled */
-	if (!*fgd->count && !*pos)
+	if (ftrace_hash_empty(fgd->hash) && !*pos)
 		return (void *)1;
 
+	fgd->idx = 0;
+	fgd->entry = NULL;
 	return __g_next(m, pos);
 }
 
@@ -4585,22 +4612,22 @@ static void g_stop(struct seq_file *m, void *p)
 
 static int g_show(struct seq_file *m, void *v)
 {
-	unsigned long *ptr = v;
+	struct ftrace_func_entry *entry = v;
 
-	if (!ptr)
+	if (!entry)
 		return 0;
 
-	if (ptr == (unsigned long *)1) {
+	if (entry == (void *)1) {
 		struct ftrace_graph_data *fgd = m->private;
 
-		if (fgd->table == ftrace_graph_funcs)
+		if (fgd->type == GRAPH_FILTER_FUNCTION)
 			seq_puts(m, "#### all functions enabled ####\n");
 		else
 			seq_puts(m, "#### no functions disabled ####\n");
 		return 0;
 	}
 
-	seq_printf(m, "%ps\n", (void *)*ptr);
+	seq_printf(m, "%ps\n", (void *)entry->ip);
 
 	return 0;
 }
@@ -4617,24 +4644,37 @@ __ftrace_graph_open(struct inode *inode, struct file *file,
 		    struct ftrace_graph_data *fgd)
 {
 	int ret = 0;
+	struct ftrace_hash *new_hash = NULL;
 
-	mutex_lock(&graph_lock);
-	if ((file->f_mode & FMODE_WRITE) &&
-	    (file->f_flags & O_TRUNC)) {
-		*fgd->count = 0;
-		memset(fgd->table, 0, fgd->size * sizeof(*fgd->table));
+	if (file->f_mode & FMODE_WRITE) {
+		const int size_bits = FTRACE_HASH_DEFAULT_BITS;
+
+		if (file->f_flags & O_TRUNC)
+			new_hash = alloc_ftrace_hash(size_bits);
+		else
+			new_hash = alloc_and_copy_ftrace_hash(size_bits,
+							      fgd->hash);
+		if (!new_hash) {
+			ret = -ENOMEM;
+			goto out;
+		}
 	}
-	mutex_unlock(&graph_lock);
 
 	if (file->f_mode & FMODE_READ) {
-		ret = seq_open(file, fgd->seq_ops);
+		ret = seq_open(file, &ftrace_graph_seq_ops);
 		if (!ret) {
 			struct seq_file *m = file->private_data;
 			m->private = fgd;
+		} else {
+			/* Failed */
+			free_ftrace_hash(new_hash);
+			new_hash = NULL;
 		}
 	} else
 		file->private_data = fgd;
 
+out:
+	fgd->new_hash = new_hash;
 	return ret;
 }
 
@@ -4642,6 +4682,7 @@ static int
 ftrace_graph_open(struct inode *inode, struct file *file)
 {
 	struct ftrace_graph_data *fgd;
+	int ret;
 
 	if (unlikely(ftrace_disabled))
 		return -ENODEV;
@@ -4650,18 +4691,25 @@ ftrace_graph_open(struct inode *inode, struct file *file)
 	if (fgd == NULL)
 		return -ENOMEM;
 
-	fgd->table = ftrace_graph_funcs;
-	fgd->size = FTRACE_GRAPH_MAX_FUNCS;
-	fgd->count = &ftrace_graph_count;
+	mutex_lock(&graph_lock);
+
+	fgd->hash = ftrace_graph_hash;
+	fgd->type = GRAPH_FILTER_FUNCTION;
 	fgd->seq_ops = &ftrace_graph_seq_ops;
 
-	return __ftrace_graph_open(inode, file, fgd);
+	ret = __ftrace_graph_open(inode, file, fgd);
+	if (ret < 0)
+		kfree(fgd);
+
+	mutex_unlock(&graph_lock);
+	return ret;
 }
 
 static int
 ftrace_graph_notrace_open(struct inode *inode, struct file *file)
 {
 	struct ftrace_graph_data *fgd;
+	int ret;
 
 	if (unlikely(ftrace_disabled))
 		return -ENODEV;
@@ -4670,45 +4718,53 @@ ftrace_graph_notrace_open(struct inode *inode, struct file *file)
 	if (fgd == NULL)
 		return -ENOMEM;
 
-	fgd->table = ftrace_graph_notrace_funcs;
-	fgd->size = FTRACE_GRAPH_MAX_FUNCS;
-	fgd->count = &ftrace_graph_notrace_count;
+	mutex_lock(&graph_lock);
+
+	fgd->hash = ftrace_graph_notrace_hash;
+	fgd->type = GRAPH_FILTER_NOTRACE;
 	fgd->seq_ops = &ftrace_graph_seq_ops;
 
-	return __ftrace_graph_open(inode, file, fgd);
+	ret = __ftrace_graph_open(inode, file, fgd);
+	if (ret < 0)
+		kfree(fgd);
+
+	mutex_unlock(&graph_lock);
+	return ret;
 }
 
 static int
 ftrace_graph_release(struct inode *inode, struct file *file)
 {
+	struct ftrace_graph_data *fgd;
+
 	if (file->f_mode & FMODE_READ) {
 		struct seq_file *m = file->private_data;
 
-		kfree(m->private);
+		fgd = m->private;
 		seq_release(inode, file);
 	} else {
-		kfree(file->private_data);
+		fgd = file->private_data;
 	}
 
+	kfree(fgd->new_hash);
+	kfree(fgd);
+
 	return 0;
 }
 
 static int
-ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
+ftrace_graph_set_hash(struct ftrace_hash *hash, char *buffer)
 {
 	struct ftrace_glob func_g;
 	struct dyn_ftrace *rec;
 	struct ftrace_page *pg;
+	struct ftrace_func_entry *entry;
 	int fail = 1;
 	int not;
-	bool exists;
-	int i;
 
 	/* decode regex */
 	func_g.type = filter_parse_regex(buffer, strlen(buffer),
 					 &func_g.search, &not);
-	if (!not && *idx >= size)
-		return -EBUSY;
 
 	func_g.len = strlen(func_g.search);
 
@@ -4725,26 +4781,18 @@ ftrace_set_func(unsigned long *array, int *idx, int size, char *buffer)
 			continue;
 
 		if (ftrace_match_record(rec, &func_g, NULL, 0)) {
-			/* if it is in the array */
-			exists = false;
-			for (i = 0; i < *idx; i++) {
-				if (array[i] == rec->ip) {
-					exists = true;
-					break;
-				}
-			}
+			entry = ftrace_lookup_ip(hash, rec->ip);
 
 			if (!not) {
 				fail = 0;
-				if (!exists) {
-					array[(*idx)++] = rec->ip;
-					if (*idx >= size)
-						goto out;
-				}
+
+				if (entry)
+					continue;
+				if (add_hash_entry(hash, rec->ip) < 0)
+					goto out;
 			} else {
-				if (exists) {
-					array[i] = array[--(*idx)];
-					array[*idx] = 0;
+				if (entry) {
+					free_hash_entry(hash, entry);
 					fail = 0;
 				}
 			}
@@ -4766,6 +4814,7 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
 	struct trace_parser parser;
 	ssize_t read, ret = 0;
 	struct ftrace_graph_data *fgd = file->private_data;
+	struct ftrace_hash *old_hash, *new_hash;
 
 	if (!cnt)
 		return 0;
@@ -4781,10 +4830,25 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
 		mutex_lock(&graph_lock);
 
 		/* we allow only one expression at a time */
-		ret = ftrace_set_func(fgd->table, fgd->count, fgd->size,
-				      parser.buffer);
+		ret = ftrace_graph_set_hash(fgd->new_hash,
+					    parser.buffer);
+
+		old_hash = fgd->hash;
+		new_hash = __ftrace_hash_move(fgd->new_hash);
+		if (!new_hash)
+			ret = -ENOMEM;
+
+		if (fgd->type == GRAPH_FILTER_FUNCTION)
+			rcu_assign_pointer(ftrace_graph_hash, new_hash);
+		else
+			rcu_assign_pointer(ftrace_graph_notrace_hash, new_hash);
 
 		mutex_unlock(&graph_lock);
+
+		/* Wait till all users are no longer using the old hash */
+		synchronize_sched();
+
+		free_ftrace_hash(old_hash);
 	}
 
 	if (!ret)
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 431a39ba8eee..ff39e92bacc7 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -803,51 +803,49 @@ static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
 }
 
 #ifdef CONFIG_DYNAMIC_FTRACE
-/* TODO: make this variable */
-#define FTRACE_GRAPH_MAX_FUNCS		32
-extern int ftrace_graph_count;
-extern unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS];
-extern int ftrace_graph_notrace_count;
-extern unsigned long ftrace_graph_notrace_funcs[FTRACE_GRAPH_MAX_FUNCS];
+extern struct ftrace_hash *ftrace_graph_hash;
+extern struct ftrace_hash *ftrace_graph_notrace_hash;
 
 static inline int ftrace_graph_addr(unsigned long addr)
 {
-	int i;
-
-	if (!ftrace_graph_count)
-		return 1;
-
-	for (i = 0; i < ftrace_graph_count; i++) {
-		if (addr == ftrace_graph_funcs[i]) {
-			/*
-			 * If no irqs are to be traced, but a set_graph_function
-			 * is set, and called by an interrupt handler, we still
-			 * want to trace it.
-			 */
-			if (in_irq())
-				trace_recursion_set(TRACE_IRQ_BIT);
-			else
-				trace_recursion_clear(TRACE_IRQ_BIT);
-			return 1;
-		}
+	int ret = 0;
+
+	preempt_disable_notrace();
+
+	if (ftrace_hash_empty(ftrace_graph_hash)) {
+		ret = 1;
+		goto out;
 	}
 
-	return 0;
+	if (ftrace_lookup_ip(ftrace_graph_hash, addr)) {
+		/*
+		 * If no irqs are to be traced, but a set_graph_function
+		 * is set, and called by an interrupt handler, we still
+		 * want to trace it.
+		 */
+		if (in_irq())
+			trace_recursion_set(TRACE_IRQ_BIT);
+		else
+			trace_recursion_clear(TRACE_IRQ_BIT);
+		ret = 1;
+	}
+
+out:
+	preempt_enable_notrace();
+	return ret;
 }
 
 static inline int ftrace_graph_notrace_addr(unsigned long addr)
 {
-	int i;
+	int ret = 0;
 
-	if (!ftrace_graph_notrace_count)
-		return 0;
+	preempt_disable_notrace();
 
-	for (i = 0; i < ftrace_graph_notrace_count; i++) {
-		if (addr == ftrace_graph_notrace_funcs[i])
-			return 1;
-	}
+	if (ftrace_lookup_ip(ftrace_graph_notrace_hash, addr))
+		ret = 1;
 
-	return 0;
+	preempt_enable_notrace();
+	return ret;
 }
 #else
 static inline int ftrace_graph_addr(unsigned long addr)
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/3] ftrace: Factor out __ftrace_hash_move()
  2017-01-20  2:44 [PATCH 1/3] ftrace: Factor out __ftrace_hash_move() Namhyung Kim
  2017-01-20  2:44 ` [PATCH 2/3] ftrace: Expose ftrace_hash_empty and ftrace_lookup_ip Namhyung Kim
  2017-01-20  2:44 ` [PATCH 3/3] ftrace: Convert graph filter to use hash tables Namhyung Kim
@ 2017-01-20 17:25 ` Steven Rostedt
  2017-01-21  0:24   ` Namhyung Kim
  2 siblings, 1 reply; 9+ messages in thread
From: Steven Rostedt @ 2017-01-20 17:25 UTC (permalink / raw)
  To: Namhyung Kim; +Cc: LKML, Ingo Molnar

On Fri, 20 Jan 2017 11:44:45 +0900
Namhyung Kim <namhyung@kernel.org> wrote:

> The __ftrace_hash_move() is to allocates properly-sized hash and move
> entries in the src ftrace_hash.  It will be used to set function graph
> filters which has nothing to do with the dyn_ftrace records.
> 
> Signed-off-by: Namhyung Kim <namhyung@kernel.org>

Hmm, no cover letter?

Anyway, I applied the patches and I'm testing them now. I took a look
over them and they seem good, although, there's a few optimizations I
want to add. But I'll do that later.

Thanks!

-- Steve

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3] ftrace: Expose ftrace_hash_empty and ftrace_lookup_ip
  2017-01-20  2:44 ` [PATCH 2/3] ftrace: Expose ftrace_hash_empty and ftrace_lookup_ip Namhyung Kim
@ 2017-01-20 19:54   ` Steven Rostedt
  2017-01-21  0:27     ` Namhyung Kim
  0 siblings, 1 reply; 9+ messages in thread
From: Steven Rostedt @ 2017-01-20 19:54 UTC (permalink / raw)
  To: Namhyung Kim; +Cc: LKML, Ingo Molnar

On Fri, 20 Jan 2017 11:44:46 +0900
Namhyung Kim <namhyung@kernel.org> wrote:


> --- a/kernel/trace/trace.h
> +++ b/kernel/trace/trace.h
> @@ -787,6 +787,20 @@ extern void __trace_graph_return(struct trace_array *tr,
>  				 struct ftrace_graph_ret *trace,
>  				 unsigned long flags, int pc);
>  
> +struct ftrace_hash {
> +	unsigned long		size_bits;
> +	struct hlist_head	*buckets;
> +	unsigned long		count;
> +	struct rcu_head		rcu;
> +};
> +
> +struct ftrace_func_entry *
> +ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
> +
> +static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
> +{
> +	return !hash || !hash->count;
> +}

Note, I had to modify this patch and move this declaration outside of
the #ifdef CONFIG_FUNCTION_GRAPH_TRACER, as it failed to build when
function graph wasn't enabled. Function tracer uses this too.

-- Steve

>  
>  #ifdef CONFIG_DYNAMIC_FTRACE
>  /* TODO: make this variable */

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/3] ftrace: Factor out __ftrace_hash_move()
  2017-01-20 17:25 ` [PATCH 1/3] ftrace: Factor out __ftrace_hash_move() Steven Rostedt
@ 2017-01-21  0:24   ` Namhyung Kim
  0 siblings, 0 replies; 9+ messages in thread
From: Namhyung Kim @ 2017-01-21  0:24 UTC (permalink / raw)
  To: Steven Rostedt; +Cc: LKML, Ingo Molnar

Hi Steve,

On Fri, Jan 20, 2017 at 12:25:35PM -0500, Steven Rostedt wrote:
> On Fri, 20 Jan 2017 11:44:45 +0900
> Namhyung Kim <namhyung@kernel.org> wrote:
> 
> > The __ftrace_hash_move() is to allocates properly-sized hash and move
> > entries in the src ftrace_hash.  It will be used to set function graph
> > filters which has nothing to do with the dyn_ftrace records.
> > 
> > Signed-off-by: Namhyung Kim <namhyung@kernel.org>
> 
> Hmm, no cover letter?

Oh, I thought it's a conceptually simple and small patch series.  So I
didn't add a cover letter.  Do you prefer seeing a cover letter anyway?

> 
> Anyway, I applied the patches and I'm testing them now. I took a look
> over them and they seem good, although, there's a few optimizations I
> want to add. But I'll do that later.

Thanks, if you have anything for me to do, please let me know.
Namhyung

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3] ftrace: Expose ftrace_hash_empty and ftrace_lookup_ip
  2017-01-20 19:54   ` Steven Rostedt
@ 2017-01-21  0:27     ` Namhyung Kim
  2017-01-21  2:13       ` Steven Rostedt
  0 siblings, 1 reply; 9+ messages in thread
From: Namhyung Kim @ 2017-01-21  0:27 UTC (permalink / raw)
  To: Steven Rostedt; +Cc: LKML, Ingo Molnar

On Fri, Jan 20, 2017 at 02:54:20PM -0500, Steven Rostedt wrote:
> On Fri, 20 Jan 2017 11:44:46 +0900
> Namhyung Kim <namhyung@kernel.org> wrote:
> 
> 
> > --- a/kernel/trace/trace.h
> > +++ b/kernel/trace/trace.h
> > @@ -787,6 +787,20 @@ extern void __trace_graph_return(struct trace_array *tr,
> >  				 struct ftrace_graph_ret *trace,
> >  				 unsigned long flags, int pc);
> >  
> > +struct ftrace_hash {
> > +	unsigned long		size_bits;
> > +	struct hlist_head	*buckets;
> > +	unsigned long		count;
> > +	struct rcu_head		rcu;
> > +};
> > +
> > +struct ftrace_func_entry *
> > +ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip);
> > +
> > +static bool __always_inline ftrace_hash_empty(struct ftrace_hash *hash)
> > +{
> > +	return !hash || !hash->count;
> > +}
> 
> Note, I had to modify this patch and move this declaration outside of
> the #ifdef CONFIG_FUNCTION_GRAPH_TRACER, as it failed to build when
> function graph wasn't enabled. Function tracer uses this too.

Oops, my bad.

Did you modify it in your tree?  Or do you want me to resend v2?

Thanks,
Namhyung


> >  
> >  #ifdef CONFIG_DYNAMIC_FTRACE
> >  /* TODO: make this variable */
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3] ftrace: Expose ftrace_hash_empty and ftrace_lookup_ip
  2017-01-21  0:27     ` Namhyung Kim
@ 2017-01-21  2:13       ` Steven Rostedt
  2017-01-21  2:26         ` Namhyung Kim
  0 siblings, 1 reply; 9+ messages in thread
From: Steven Rostedt @ 2017-01-21  2:13 UTC (permalink / raw)
  To: Namhyung Kim; +Cc: LKML, Ingo Molnar

On Sat, 21 Jan 2017 09:27:06 +0900
Namhyung Kim <namhyung@kernel.org> wrote:


> > Note, I had to modify this patch and move this declaration outside of
> > the #ifdef CONFIG_FUNCTION_GRAPH_TRACER, as it failed to build when
> > function graph wasn't enabled. Function tracer uses this too.  
> 
> Oops, my bad.
> 
> Did you modify it in your tree?  Or do you want me to resend v2?

No, it's a trivial change and I made the modification and noted it in
the change log. You can see the change in my tree under the ftrace/core
branch. I'm taking off next week and I wanted to get this series tested
before I go. The tests are still running.

-- Steve

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 2/3] ftrace: Expose ftrace_hash_empty and ftrace_lookup_ip
  2017-01-21  2:13       ` Steven Rostedt
@ 2017-01-21  2:26         ` Namhyung Kim
  0 siblings, 0 replies; 9+ messages in thread
From: Namhyung Kim @ 2017-01-21  2:26 UTC (permalink / raw)
  To: Steven Rostedt; +Cc: LKML, Ingo Molnar

On Fri, Jan 20, 2017 at 09:13:17PM -0500, Steven Rostedt wrote:
> On Sat, 21 Jan 2017 09:27:06 +0900
> Namhyung Kim <namhyung@kernel.org> wrote:
> 
> 
> > > Note, I had to modify this patch and move this declaration outside of
> > > the #ifdef CONFIG_FUNCTION_GRAPH_TRACER, as it failed to build when
> > > function graph wasn't enabled. Function tracer uses this too.  
> > 
> > Oops, my bad.
> > 
> > Did you modify it in your tree?  Or do you want me to resend v2?
> 
> No, it's a trivial change and I made the modification and noted it in
> the change log. You can see the change in my tree under the ftrace/core
> branch. I'm taking off next week and I wanted to get this series tested
> before I go. The tests are still running.

Thanks for doing that!
Namhyung

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2017-01-21  2:28 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-01-20  2:44 [PATCH 1/3] ftrace: Factor out __ftrace_hash_move() Namhyung Kim
2017-01-20  2:44 ` [PATCH 2/3] ftrace: Expose ftrace_hash_empty and ftrace_lookup_ip Namhyung Kim
2017-01-20 19:54   ` Steven Rostedt
2017-01-21  0:27     ` Namhyung Kim
2017-01-21  2:13       ` Steven Rostedt
2017-01-21  2:26         ` Namhyung Kim
2017-01-20  2:44 ` [PATCH 3/3] ftrace: Convert graph filter to use hash tables Namhyung Kim
2017-01-20 17:25 ` [PATCH 1/3] ftrace: Factor out __ftrace_hash_move() Steven Rostedt
2017-01-21  0:24   ` Namhyung Kim

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).