linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] binder: replace kzalloc with kmem_cache
@ 2016-11-22 11:17 ` Ganesh Mahendran
  2016-11-22 13:53   ` Greg KH
  2016-11-22 14:48   ` [PATCH] binder: fix ifnullfree.cocci warnings kbuild test robot
  0 siblings, 2 replies; 6+ messages in thread
From: Ganesh Mahendran @ 2016-11-22 11:17 UTC (permalink / raw)
  To: gregkh, arve, riandrews; +Cc: linux-kernel, Ganesh Mahendran

This patch use kmem_cache to allocate/free binder objects.

It will have better memory efficiency. And we can also get
object usage details in /sys/kernel/slab/* for futher analysis.

Signed-off-by: Ganesh Mahendran <opensource.ganesh@gmail.com>
---
 drivers/android/binder.c | 127 ++++++++++++++++++++++++++++++++++++++---------
 1 file changed, 104 insertions(+), 23 deletions(-)

diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 3c71b98..f1f8362 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -54,6 +54,14 @@
 static HLIST_HEAD(binder_deferred_list);
 static HLIST_HEAD(binder_dead_nodes);
 
+static struct kmem_cache *binder_proc_cachep;
+static struct kmem_cache *binder_thread_cachep;
+static struct kmem_cache *binder_node_cachep;
+static struct kmem_cache *binder_ref_cachep;
+static struct kmem_cache *binder_transaction_cachep;
+static struct kmem_cache *binder_work_cachep;
+static struct kmem_cache *binder_ref_death_cachep;
+
 static struct dentry *binder_debugfs_dir_entry_root;
 static struct dentry *binder_debugfs_dir_entry_proc;
 static struct binder_node *binder_context_mgr_node;
@@ -902,7 +910,7 @@ static struct binder_node *binder_new_node(struct binder_proc *proc,
 			return NULL;
 	}
 
-	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	node = kmem_cache_zalloc(binder_node_cachep, GFP_KERNEL);
 	if (node == NULL)
 		return NULL;
 	binder_stats_created(BINDER_STAT_NODE);
@@ -992,7 +1000,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
 					     "dead node %d deleted\n",
 					     node->debug_id);
 			}
-			kfree(node);
+			kmem_cache_free(binder_node_cachep, node);
 			binder_stats_deleted(BINDER_STAT_NODE);
 		}
 	}
@@ -1043,7 +1051,7 @@ static struct binder_ref *binder_get_ref_for_node(struct binder_proc *proc,
 		else
 			return ref;
 	}
-	new_ref = kzalloc(sizeof(*ref), GFP_KERNEL);
+	new_ref = kmem_cache_zalloc(binder_ref_cachep, GFP_KERNEL);
 	if (new_ref == NULL)
 		return NULL;
 	binder_stats_created(BINDER_STAT_REF);
@@ -1108,10 +1116,10 @@ static void binder_delete_ref(struct binder_ref *ref)
 			     "%d delete ref %d desc %d has death notification\n",
 			      ref->proc->pid, ref->debug_id, ref->desc);
 		list_del(&ref->death->work.entry);
-		kfree(ref->death);
+		kmem_cache_free(binder_ref_death_cachep, ref->death);
 		binder_stats_deleted(BINDER_STAT_DEATH);
 	}
-	kfree(ref);
+	kmem_cache_free(binder_ref_cachep, ref);
 	binder_stats_deleted(BINDER_STAT_REF);
 }
 
@@ -1183,7 +1191,7 @@ static void binder_pop_transaction(struct binder_thread *target_thread,
 	t->need_reply = 0;
 	if (t->buffer)
 		t->buffer->transaction = NULL;
-	kfree(t);
+	kmem_cache_free(binder_transaction_cachep, t);
 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
 }
 
@@ -1444,14 +1452,14 @@ static void binder_transaction(struct binder_proc *proc,
 	e->to_proc = target_proc->pid;
 
 	/* TODO: reuse incoming transaction for reply */
-	t = kzalloc(sizeof(*t), GFP_KERNEL);
+	t = kmem_cache_zalloc(binder_transaction_cachep, GFP_KERNEL);
 	if (t == NULL) {
 		return_error = BR_FAILED_REPLY;
 		goto err_alloc_t_failed;
 	}
 	binder_stats_created(BINDER_STAT_TRANSACTION);
 
-	tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL);
+	tcomplete = kmem_cache_zalloc(binder_work_cachep, GFP_KERNEL);
 	if (tcomplete == NULL) {
 		return_error = BR_FAILED_REPLY;
 		goto err_alloc_tcomplete_failed;
@@ -1742,10 +1750,10 @@ static void binder_transaction(struct binder_proc *proc,
 	t->buffer->transaction = NULL;
 	binder_free_buf(target_proc, t->buffer);
 err_binder_alloc_buf_failed:
-	kfree(tcomplete);
+	kmem_cache_free(binder_work_cachep, tcomplete);
 	binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
 err_alloc_tcomplete_failed:
-	kfree(t);
+	kmem_cache_free(binder_transaction_cachep, t);
 	binder_stats_deleted(BINDER_STAT_TRANSACTION);
 err_alloc_t_failed:
 err_bad_call_stack:
@@ -2039,7 +2047,7 @@ static int binder_thread_write(struct binder_proc *proc,
 						proc->pid, thread->pid);
 					break;
 				}
-				death = kzalloc(sizeof(*death), GFP_KERNEL);
+				death = kmem_cache_zalloc(binder_ref_death_cachep, GFP_KERNEL);
 				if (death == NULL) {
 					thread->return_error = BR_ERROR;
 					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
@@ -2282,7 +2290,7 @@ static int binder_thread_read(struct binder_proc *proc,
 				     proc->pid, thread->pid);
 
 			list_del(&w->entry);
-			kfree(w);
+			kmem_cache_free(binder_work_cachep, w);
 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
 		} break;
 		case BINDER_WORK_NODE: {
@@ -2342,7 +2350,7 @@ static int binder_thread_read(struct binder_proc *proc,
 						     (u64)node->ptr,
 						     (u64)node->cookie);
 					rb_erase(&node->rb_node, &proc->nodes);
-					kfree(node);
+					kmem_cache_free(binder_node_cachep, node);
 					binder_stats_deleted(BINDER_STAT_NODE);
 				} else {
 					binder_debug(BINDER_DEBUG_INTERNAL_REFS,
@@ -2383,7 +2391,7 @@ static int binder_thread_read(struct binder_proc *proc,
 
 			if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) {
 				list_del(&w->entry);
-				kfree(death);
+				kmem_cache_free(binder_ref_death_cachep, death);
 				binder_stats_deleted(BINDER_STAT_DEATH);
 			} else
 				list_move(&w->entry, &proc->delivered_death);
@@ -2463,7 +2471,7 @@ static int binder_thread_read(struct binder_proc *proc,
 			thread->transaction_stack = t;
 		} else {
 			t->buffer->transaction = NULL;
-			kfree(t);
+			kmem_cache_free(binder_transaction_cachep, t);
 			binder_stats_deleted(BINDER_STAT_TRANSACTION);
 		}
 		break;
@@ -2508,14 +2516,14 @@ static void binder_release_work(struct list_head *list)
 					"undelivered transaction %d\n",
 					t->debug_id);
 				t->buffer->transaction = NULL;
-				kfree(t);
+				kmem_cache_free(binder_transaction_cachep, t);
 				binder_stats_deleted(BINDER_STAT_TRANSACTION);
 			}
 		} break;
 		case BINDER_WORK_TRANSACTION_COMPLETE: {
 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
 				"undelivered TRANSACTION_COMPLETE\n");
-			kfree(w);
+			kmem_cache_free(binder_work_cachep, w);
 			binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE);
 		} break;
 		case BINDER_WORK_DEAD_BINDER_AND_CLEAR:
@@ -2526,7 +2534,7 @@ static void binder_release_work(struct list_head *list)
 			binder_debug(BINDER_DEBUG_DEAD_TRANSACTION,
 				"undelivered death notification, %016llx\n",
 				(u64)death->cookie);
-			kfree(death);
+			kmem_cache_free(binder_ref_death_cachep, death);
 			binder_stats_deleted(BINDER_STAT_DEATH);
 		} break;
 		default:
@@ -2556,7 +2564,7 @@ static struct binder_thread *binder_get_thread(struct binder_proc *proc)
 			break;
 	}
 	if (*p == NULL) {
-		thread = kzalloc(sizeof(*thread), GFP_KERNEL);
+		thread = kmem_cache_zalloc(binder_thread_cachep, GFP_KERNEL);
 		if (thread == NULL)
 			return NULL;
 		binder_stats_created(BINDER_STAT_THREAD);
@@ -2609,7 +2617,7 @@ static int binder_free_thread(struct binder_proc *proc,
 	if (send_reply)
 		binder_send_failed_reply(send_reply, BR_DEAD_REPLY);
 	binder_release_work(&thread->todo);
-	kfree(thread);
+	kmem_cache_free(binder_thread_cachep, thread);
 	binder_stats_deleted(BINDER_STAT_THREAD);
 	return active_transactions;
 }
@@ -2973,7 +2981,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
 	binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_open: %d:%d\n",
 		     current->group_leader->pid, current->pid);
 
-	proc = kzalloc(sizeof(*proc), GFP_KERNEL);
+	proc = kmem_cache_zalloc(binder_proc_cachep, GFP_KERNEL);
 	if (proc == NULL)
 		return -ENOMEM;
 	get_task_struct(current);
@@ -3053,7 +3061,7 @@ static int binder_node_release(struct binder_node *node, int refs)
 	binder_release_work(&node->async_todo);
 
 	if (hlist_empty(&node->refs)) {
-		kfree(node);
+		kmem_cache_free(binder_node_cachep, node);
 		binder_stats_deleted(BINDER_STAT_NODE);
 
 		return refs;
@@ -3190,7 +3198,7 @@ static void binder_deferred_release(struct binder_proc *proc)
 		     __func__, proc->pid, threads, nodes, incoming_refs,
 		     outgoing_refs, active_transactions, buffers, page_count);
 
-	kfree(proc);
+	kmem_cache_free(binder_proc_cachep, proc);
 }
 
 static void binder_deferred_func(struct work_struct *work)
@@ -3691,10 +3699,83 @@ static int binder_transaction_log_show(struct seq_file *m, void *unused)
 BINDER_DEBUG_ENTRY(transactions);
 BINDER_DEBUG_ENTRY(transaction_log);
 
+static void binder_destroy_cache(void)
+{
+	if (binder_proc_cachep)
+		kmem_cache_destroy(binder_proc_cachep);
+
+	if (binder_thread_cachep)
+		kmem_cache_destroy(binder_thread_cachep);
+
+	if (binder_node_cachep)
+		kmem_cache_destroy(binder_node_cachep);
+
+	if (binder_ref_cachep)
+		kmem_cache_destroy(binder_ref_cachep);
+
+	if (binder_transaction_cachep)
+		kmem_cache_destroy(binder_transaction_cachep);
+
+	if (binder_work_cachep)
+		kmem_cache_destroy(binder_work_cachep);
+
+	if (binder_ref_death_cachep)
+		kmem_cache_destroy(binder_ref_death_cachep);
+}
+
+static int __init binder_create_cache(void)
+{
+	binder_proc_cachep = kmem_cache_create("binder_proc",
+					sizeof(struct binder_proc), 0, 0, NULL);
+	if (!binder_proc_cachep)
+		goto fail;
+
+	binder_thread_cachep = kmem_cache_create("binder_thread",
+					sizeof(struct binder_thread), 0, 0, NULL);
+	if (!binder_thread_cachep)
+		goto fail;
+
+	binder_node_cachep = kmem_cache_create("binder_node",
+					sizeof(struct binder_node), 0, 0, NULL);
+	if (!binder_node_cachep)
+		goto fail;
+
+	binder_ref_cachep = kmem_cache_create("binder_ref",
+					sizeof(struct binder_ref), 0, 0, NULL);
+	if (!binder_ref_cachep)
+		goto fail;
+
+	binder_transaction_cachep = kmem_cache_create("binder_transaction",
+					sizeof(struct binder_transaction), 0, 0, NULL);
+	if (!binder_transaction_cachep)
+		goto fail;
+
+	binder_work_cachep = kmem_cache_create("binder_work",
+					sizeof(struct binder_work), 0, 0, NULL);
+	if (!binder_work_cachep)
+		goto fail;
+
+	binder_ref_death_cachep = kmem_cache_create("binder_ref_death",
+					sizeof(struct binder_ref_death), 0, 0, NULL);
+	if (!binder_ref_death_cachep)
+		goto fail;
+
+	return 0;
+
+fail:
+	binder_destroy_cache();
+	return -ENOMEM;
+}
+
 static int __init binder_init(void)
 {
 	int ret;
 
+	if (binder_create_cache()) {
+		pr_err("binder cache creation failed\n");
+		return -ENOMEM;
+	}
+
 	binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
 	if (binder_debugfs_dir_entry_root)
 		binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH] binder: replace kzalloc with kmem_cache
  2016-11-22 11:17 ` Ganesh Mahendran
@ 2016-11-22 13:53   ` Greg KH
  2016-12-14  2:39     ` Ganesh Mahendran
  2016-11-22 14:48   ` [PATCH] binder: fix ifnullfree.cocci warnings kbuild test robot
  1 sibling, 1 reply; 6+ messages in thread
From: Greg KH @ 2016-11-22 13:53 UTC (permalink / raw)
  To: Ganesh Mahendran; +Cc: arve, riandrews, linux-kernel

On Tue, Nov 22, 2016 at 07:17:30PM +0800, Ganesh Mahendran wrote:
> This patch use kmem_cache to allocate/free binder objects.

Why do this?

> It will have better memory efficiency.

Really?  How?  It should be the same, if not a bit worse.  Have you
tested this?  What is the results?

> And we can also get object usage details in /sys/kernel/slab/* for
> futher analysis.

Why do we need this?  Who needs this information and what are you going
to do with it?

> Signed-off-by: Ganesh Mahendran <opensource.ganesh@gmail.com>
> ---
>  drivers/android/binder.c | 127 ++++++++++++++++++++++++++++++++++++++---------
>  1 file changed, 104 insertions(+), 23 deletions(-)
> 
> diff --git a/drivers/android/binder.c b/drivers/android/binder.c
> index 3c71b98..f1f8362 100644
> --- a/drivers/android/binder.c
> +++ b/drivers/android/binder.c
> @@ -54,6 +54,14 @@
>  static HLIST_HEAD(binder_deferred_list);
>  static HLIST_HEAD(binder_dead_nodes);
>  
> +static struct kmem_cache *binder_proc_cachep;
> +static struct kmem_cache *binder_thread_cachep;
> +static struct kmem_cache *binder_node_cachep;
> +static struct kmem_cache *binder_ref_cachep;
> +static struct kmem_cache *binder_transaction_cachep;
> +static struct kmem_cache *binder_work_cachep;
> +static struct kmem_cache *binder_ref_death_cachep;

That's a lot of different caches, are you sure they don't just all get
merged together anyway for most allocators?

Don't create lots of little caches for no good reason, and without any
benchmark numbers, I'd prefer to leave this alone.  You are going to
have to prove this is a win to allow this type of churn.

thanks,

greg k-h

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH] binder: fix ifnullfree.cocci warnings
  2016-11-22 11:17 ` Ganesh Mahendran
  2016-11-22 13:53   ` Greg KH
@ 2016-11-22 14:48   ` kbuild test robot
  1 sibling, 0 replies; 6+ messages in thread
From: kbuild test robot @ 2016-11-22 14:48 UTC (permalink / raw)
  To: Ganesh Mahendran
  Cc: kbuild-all, gregkh, arve, riandrews, linux-kernel, Ganesh Mahendran

drivers/android/binder.c:3705:2-20: WARNING: NULL check before freeing functions like kfree, debugfs_remove, debugfs_remove_recursive or usb_free_urb is not needed. Maybe consider reorganizing relevant code to avoid passing NULL values.
drivers/android/binder.c:3708:2-20: WARNING: NULL check before freeing functions like kfree, debugfs_remove, debugfs_remove_recursive or usb_free_urb is not needed. Maybe consider reorganizing relevant code to avoid passing NULL values.
drivers/android/binder.c:3711:2-20: WARNING: NULL check before freeing functions like kfree, debugfs_remove, debugfs_remove_recursive or usb_free_urb is not needed. Maybe consider reorganizing relevant code to avoid passing NULL values.
drivers/android/binder.c:3714:2-20: WARNING: NULL check before freeing functions like kfree, debugfs_remove, debugfs_remove_recursive or usb_free_urb is not needed. Maybe consider reorganizing relevant code to avoid passing NULL values.
drivers/android/binder.c:3717:2-20: WARNING: NULL check before freeing functions like kfree, debugfs_remove, debugfs_remove_recursive or usb_free_urb is not needed. Maybe consider reorganizing relevant code to avoid passing NULL values.
drivers/android/binder.c:3720:2-20: WARNING: NULL check before freeing functions like kfree, debugfs_remove, debugfs_remove_recursive or usb_free_urb is not needed. Maybe consider reorganizing relevant code to avoid passing NULL values.
drivers/android/binder.c:3723:2-20: WARNING: NULL check before freeing functions like kfree, debugfs_remove, debugfs_remove_recursive or usb_free_urb is not needed. Maybe consider reorganizing relevant code to avoid passing NULL values.

 NULL check before some freeing functions is not needed.

 Based on checkpatch warning
 "kfree(NULL) is safe this check is probably not required"
 and kfreeaddr.cocci by Julia Lawall.

Generated by: scripts/coccinelle/free/ifnullfree.cocci

CC: Ganesh Mahendran <opensource.ganesh@gmail.com>
Signed-off-by: Fengguang Wu <fengguang.wu@intel.com>
---

 binder.c |   21 +++++++--------------
 1 file changed, 7 insertions(+), 14 deletions(-)

--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3701,26 +3701,19 @@ BINDER_DEBUG_ENTRY(transaction_log);
 
 static void binder_destroy_cache(void)
 {
-	if (binder_proc_cachep)
-		kmem_cache_destroy(binder_proc_cachep);
+	kmem_cache_destroy(binder_proc_cachep);
 
-	if (binder_thread_cachep)
-		kmem_cache_destroy(binder_thread_cachep);
+	kmem_cache_destroy(binder_thread_cachep);
 
-	if (binder_node_cachep)
-		kmem_cache_destroy(binder_node_cachep);
+	kmem_cache_destroy(binder_node_cachep);
 
-	if (binder_ref_cachep)
-		kmem_cache_destroy(binder_ref_cachep);
+	kmem_cache_destroy(binder_ref_cachep);
 
-	if (binder_transaction_cachep)
-		kmem_cache_destroy(binder_transaction_cachep);
+	kmem_cache_destroy(binder_transaction_cachep);
 
-	if (binder_work_cachep)
-		kmem_cache_destroy(binder_work_cachep);
+	kmem_cache_destroy(binder_work_cachep);
 
-	if (binder_ref_death_cachep)
-		kmem_cache_destroy(binder_ref_death_cachep);
+	kmem_cache_destroy(binder_ref_death_cachep);
 }
 
 static int __init binder_create_cache(void)

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] binder: replace kzalloc with kmem_cache
  2016-11-22 11:17 ` Ganesh Mahendran
@ 2016-11-22 14:48 kbuild test robot
  2016-11-22 11:17 ` Ganesh Mahendran
  1 sibling, 1 reply; 6+ messages in thread
From: kbuild test robot @ 2016-11-22 14:48 UTC (permalink / raw)
  To: Ganesh Mahendran
  Cc: kbuild-all, gregkh, arve, riandrews, linux-kernel, Ganesh Mahendran

Hi Ganesh,

[auto build test WARNING on staging/staging-testing]
[also build test WARNING on v4.9-rc6 next-20161117]
[if your patch is applied to the wrong git tree, please drop us a note to help improve the system]

url:    https://github.com/0day-ci/linux/commits/Ganesh-Mahendran/binder-replace-kzalloc-with-kmem_cache/20161122-213614


coccinelle warnings: (new ones prefixed by >>)

>> drivers/android/binder.c:3705:2-20: WARNING: NULL check before freeing functions like kfree, debugfs_remove, debugfs_remove_recursive or usb_free_urb is not needed. Maybe consider reorganizing relevant code to avoid passing NULL values.
   drivers/android/binder.c:3708:2-20: WARNING: NULL check before freeing functions like kfree, debugfs_remove, debugfs_remove_recursive or usb_free_urb is not needed. Maybe consider reorganizing relevant code to avoid passing NULL values.
   drivers/android/binder.c:3711:2-20: WARNING: NULL check before freeing functions like kfree, debugfs_remove, debugfs_remove_recursive or usb_free_urb is not needed. Maybe consider reorganizing relevant code to avoid passing NULL values.
   drivers/android/binder.c:3714:2-20: WARNING: NULL check before freeing functions like kfree, debugfs_remove, debugfs_remove_recursive or usb_free_urb is not needed. Maybe consider reorganizing relevant code to avoid passing NULL values.
   drivers/android/binder.c:3717:2-20: WARNING: NULL check before freeing functions like kfree, debugfs_remove, debugfs_remove_recursive or usb_free_urb is not needed. Maybe consider reorganizing relevant code to avoid passing NULL values.
   drivers/android/binder.c:3720:2-20: WARNING: NULL check before freeing functions like kfree, debugfs_remove, debugfs_remove_recursive or usb_free_urb is not needed. Maybe consider reorganizing relevant code to avoid passing NULL values.
   drivers/android/binder.c:3723:2-20: WARNING: NULL check before freeing functions like kfree, debugfs_remove, debugfs_remove_recursive or usb_free_urb is not needed. Maybe consider reorganizing relevant code to avoid passing NULL values.

Please review and possibly fold the followup patch.

---
0-DAY kernel test infrastructure                Open Source Technology Center
https://lists.01.org/pipermail/kbuild-all                   Intel Corporation

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] binder: replace kzalloc with kmem_cache
  2016-11-22 13:53   ` Greg KH
@ 2016-12-14  2:39     ` Ganesh Mahendran
  2016-12-14 11:10       ` Greg KH
  0 siblings, 1 reply; 6+ messages in thread
From: Ganesh Mahendran @ 2016-12-14  2:39 UTC (permalink / raw)
  To: Greg KH; +Cc: arve, riandrews, linux-kernel

Hi, Greg:

Sorry for the late response.

On Tue, Nov 22, 2016 at 02:53:02PM +0100, Greg KH wrote:
> On Tue, Nov 22, 2016 at 07:17:30PM +0800, Ganesh Mahendran wrote:
> > This patch use kmem_cache to allocate/free binder objects.
> 
> Why do this?

I am not very familiar with kmem_cache. I think if we have thousands of
active binder objects in system, kmem_cache would be better.

Below is binder object number in my android system:
-----
$ cat /d/binder/stats
...
proc: active 100 total 6735
thread: active 1456 total 180807
node: active 5668 total 1027387
ref: active 7141 total 1214877
death: active 844 total 468056
transaction: active 0 total 54736890
transaction_complete: active 0 total 54736890
-----

binder objects are allocated/freed frequently.

> 
> > It will have better memory efficiency.
> 
> Really?  How?  It should be the same, if not a bit worse.  Have you
> tested this?  What is the results?

kzalloc will use object with size 2^n to store user data.
Take "struct binder_thread" as example, its size is 296 bytes.
If use kzalloc(), slab system will use 512 object size to store the 296
bytes. But if use kmem_cache to create a seperte(may be merged with other
slab allocator) allocator, it will use 304 object size to store the 296
bytes. Below is information get from /proc/slabinfo :
------
name          <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab> 
binder_thread      858          858       304         26           2

memmory efficiency is : (296 * 26) / (2 * 4096) = 93.9%

> 
> > And we can also get object usage details in /sys/kernel/slab/* for
> > futher analysis.
> 
> Why do we need this?  Who needs this information and what are you going
> to do with it?

This is only for debug purpuse to see how much memory is used by binder.

> 
> > Signed-off-by: Ganesh Mahendran <opensource.ganesh@gmail.com>
> > ---
> >  drivers/android/binder.c | 127 ++++++++++++++++++++++++++++++++++++++---------
> >  1 file changed, 104 insertions(+), 23 deletions(-)
> > 
> > diff --git a/drivers/android/binder.c b/drivers/android/binder.c
> > index 3c71b98..f1f8362 100644
> > --- a/drivers/android/binder.c
> > +++ b/drivers/android/binder.c
> > @@ -54,6 +54,14 @@
> >  static HLIST_HEAD(binder_deferred_list);
> >  static HLIST_HEAD(binder_dead_nodes);
> >  
> > +static struct kmem_cache *binder_proc_cachep;
> > +static struct kmem_cache *binder_thread_cachep;
> > +static struct kmem_cache *binder_node_cachep;
> > +static struct kmem_cache *binder_ref_cachep;
> > +static struct kmem_cache *binder_transaction_cachep;
> > +static struct kmem_cache *binder_work_cachep;
> > +static struct kmem_cache *binder_ref_death_cachep;
> 
> That's a lot of different caches, are you sure they don't just all get
> merged together anyway for most allocators?

If binder kmem_cache have the same flag with other allocator, it may be
merged with other allocator. But I think it would be better than using
kzalloc().

> 
> Don't create lots of little caches for no good reason, and without any
> benchmark numbers, I'd prefer to leave this alone.  You are going to
> have to prove this is a win to allow this type of churn.

I test binder with this patch. There is no performance regression.
---
I run 10 times with below command:
     $binderThroughputTest -w 100

               Before                after(with patch)
avg:           9848.4                 9878.8

Thanks.

> 
> thanks,
> 
> greg k-h

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH] binder: replace kzalloc with kmem_cache
  2016-12-14  2:39     ` Ganesh Mahendran
@ 2016-12-14 11:10       ` Greg KH
  0 siblings, 0 replies; 6+ messages in thread
From: Greg KH @ 2016-12-14 11:10 UTC (permalink / raw)
  To: Ganesh Mahendran; +Cc: arve, riandrews, linux-kernel

On Wed, Dec 14, 2016 at 10:39:16AM +0800, Ganesh Mahendran wrote:
> Hi, Greg:
> 
> Sorry for the late response.
> 
> On Tue, Nov 22, 2016 at 02:53:02PM +0100, Greg KH wrote:
> > On Tue, Nov 22, 2016 at 07:17:30PM +0800, Ganesh Mahendran wrote:
> > > This patch use kmem_cache to allocate/free binder objects.
> > 
> > Why do this?
> 
> I am not very familiar with kmem_cache. I think if we have thousands of
> active binder objects in system, kmem_cache would be better.

It shouldn't matter, if the memory allocator you are using is good.
This you have proven with your benchmarks :)

And, if you are not familiar with kmem_cache, I would not recommend
changing a core system api to use it just because you think it might be
nicer for some reason, that's not acceptable.

> Below is binder object number in my android system:
> -----
> $ cat /d/binder/stats
> ...
> proc: active 100 total 6735
> thread: active 1456 total 180807
> node: active 5668 total 1027387
> ref: active 7141 total 1214877
> death: active 844 total 468056
> transaction: active 0 total 54736890
> transaction_complete: active 0 total 54736890
> -----
> 
> binder objects are allocated/freed frequently.

Yes they are, and the memory allocator on your system handles that just
fine, as you see :)

> > > It will have better memory efficiency.
> > 
> > Really?  How?  It should be the same, if not a bit worse.  Have you
> > tested this?  What is the results?
> 
> kzalloc will use object with size 2^n to store user data.
> Take "struct binder_thread" as example, its size is 296 bytes.
> If use kzalloc(), slab system will use 512 object size to store the 296
> bytes. But if use kmem_cache to create a seperte(may be merged with other
> slab allocator) allocator, it will use 304 object size to store the 296
> bytes. Below is information get from /proc/slabinfo :
> ------
> name          <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab> 
> binder_thread      858          858       304         26           2
> 
> memmory efficiency is : (296 * 26) / (2 * 4096) = 93.9%

But, as you say, things will get merged by your slab allocator anyway,
so there's no real benefit in changing the code we have today.

> > > And we can also get object usage details in /sys/kernel/slab/* for
> > > futher analysis.
> > 
> > Why do we need this?  Who needs this information and what are you going
> > to do with it?
> 
> This is only for debug purpuse to see how much memory is used by binder.

But who really cares about that?  What can you do with that information?
The normal allocator debug statistics should provide you all of this
information already without anything needed to be added.

> > > Signed-off-by: Ganesh Mahendran <opensource.ganesh@gmail.com>
> > > ---
> > >  drivers/android/binder.c | 127 ++++++++++++++++++++++++++++++++++++++---------
> > >  1 file changed, 104 insertions(+), 23 deletions(-)
> > > 
> > > diff --git a/drivers/android/binder.c b/drivers/android/binder.c
> > > index 3c71b98..f1f8362 100644
> > > --- a/drivers/android/binder.c
> > > +++ b/drivers/android/binder.c
> > > @@ -54,6 +54,14 @@
> > >  static HLIST_HEAD(binder_deferred_list);
> > >  static HLIST_HEAD(binder_dead_nodes);
> > >  
> > > +static struct kmem_cache *binder_proc_cachep;
> > > +static struct kmem_cache *binder_thread_cachep;
> > > +static struct kmem_cache *binder_node_cachep;
> > > +static struct kmem_cache *binder_ref_cachep;
> > > +static struct kmem_cache *binder_transaction_cachep;
> > > +static struct kmem_cache *binder_work_cachep;
> > > +static struct kmem_cache *binder_ref_death_cachep;
> > 
> > That's a lot of different caches, are you sure they don't just all get
> > merged together anyway for most allocators?
> 
> If binder kmem_cache have the same flag with other allocator, it may be
> merged with other allocator. But I think it would be better than using
> kzalloc().

Nope, it should work the same.  And you showed that with your
benchmarks.

> > Don't create lots of little caches for no good reason, and without any
> > benchmark numbers, I'd prefer to leave this alone.  You are going to
> > have to prove this is a win to allow this type of churn.
> 
> I test binder with this patch. There is no performance regression.
> ---
> I run 10 times with below command:
>      $binderThroughputTest -w 100
> 
>                Before                after(with patch)
> avg:           9848.4                 9878.8

Great, you made a change to the code that didn't actually do anything :)

Sorry, for that reason alone, I can't take this patch, nor should you
want me to.

thanks,

greg k-h

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2016-12-14 11:16 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-11-22 14:48 [PATCH] binder: replace kzalloc with kmem_cache kbuild test robot
2016-11-22 11:17 ` Ganesh Mahendran
2016-11-22 13:53   ` Greg KH
2016-12-14  2:39     ` Ganesh Mahendran
2016-12-14 11:10       ` Greg KH
2016-11-22 14:48   ` [PATCH] binder: fix ifnullfree.cocci warnings kbuild test robot

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).