All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] mm: slab/slub: use page->list consistently instead of page->lru
@ 2013-12-11 22:36 ` Dave Hansen
  0 siblings, 0 replies; 12+ messages in thread
From: Dave Hansen @ 2013-12-11 22:36 UTC (permalink / raw)
  To: linux-kernel; +Cc: linux-mm, cl, kirill.shutemov, Andi Kleen, akpm, Dave Hansen


From: Dave Hansen <dave.hansen@linux.intel.com>

'struct page' has two list_head fields: 'lru' and 'list'.
Conveniently, they are unioned together.  This means that code
can use them interchangably, which gets horribly confusing like
with this nugget from slab.c:

>	list_del(&page->lru);
>	if (page->active == cachep->num)
>		list_add(&page->list, &n->slabs_full);

This patch makes the slab and slub code use page->list
universally instead of mixing ->list and ->lru.

It also adds some comments to attempt to keep new users from
picking up uses of ->list.

So, the new rule is: page->list is what the slabs use.  page->lru
is for everybody else.  This is a pretty arbitrary rule, but we
need _something_.  Maybe we should just axe the ->list one and
make the sl?bs use ->lru.

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
---

 linux.git-davehans/include/linux/mm_types.h |    5 ++
 linux.git-davehans/mm/slab.c                |   50 ++++++++++++++--------------
 2 files changed, 29 insertions(+), 26 deletions(-)

diff -puN include/linux/mm_types.h~make-slab-use-page-lru-vs-list-consistently include/linux/mm_types.h
--- linux.git/include/linux/mm_types.h~make-slab-use-page-lru-vs-list-consistently	2013-12-11 14:34:51.438183588 -0800
+++ linux.git-davehans/include/linux/mm_types.h	2013-12-11 14:34:51.445183899 -0800
@@ -123,6 +123,8 @@ struct page {
 	union {
 		struct list_head lru;	/* Pageout list, eg. active_list
 					 * protected by zone->lru_lock !
+					 * Can be used as a generic list
+					 * by the page owner.
 					 */
 		struct {		/* slub per cpu partial pages */
 			struct page *next;	/* Next partial slab */
@@ -135,7 +137,8 @@ struct page {
 #endif
 		};
 
-		struct list_head list;	/* slobs list of pages */
+		struct list_head list;	/* sl[aou]bs list of pages.
+					 * do not use outside of slabs */
 		struct slab *slab_page; /* slab fields */
 		struct rcu_head rcu_head;	/* Used by SLAB
 						 * when destroying via RCU
diff -puN mm/slab.c~make-slab-use-page-lru-vs-list-consistently mm/slab.c
--- linux.git/mm/slab.c~make-slab-use-page-lru-vs-list-consistently	2013-12-11 14:34:51.440183677 -0800
+++ linux.git-davehans/mm/slab.c	2013-12-11 14:34:51.444183855 -0800
@@ -765,15 +765,15 @@ static void recheck_pfmemalloc_active(st
 		return;
 
 	spin_lock_irqsave(&n->list_lock, flags);
-	list_for_each_entry(page, &n->slabs_full, lru)
+	list_for_each_entry(page, &n->slabs_full, list)
 		if (is_slab_pfmemalloc(page))
 			goto out;
 
-	list_for_each_entry(page, &n->slabs_partial, lru)
+	list_for_each_entry(page, &n->slabs_partial, list)
 		if (is_slab_pfmemalloc(page))
 			goto out;
 
-	list_for_each_entry(page, &n->slabs_free, lru)
+	list_for_each_entry(page, &n->slabs_free, list)
 		if (is_slab_pfmemalloc(page))
 			goto out;
 
@@ -1428,7 +1428,7 @@ void __init kmem_cache_init(void)
 {
 	int i;
 
-	BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
+	BUILD_BUG_ON(sizeof(((struct page *)NULL)->list) <
 					sizeof(struct rcu_head));
 	kmem_cache = &kmem_cache_boot;
 	setup_node_pointer(kmem_cache);
@@ -1624,15 +1624,15 @@ slab_out_of_memory(struct kmem_cache *ca
 			continue;
 
 		spin_lock_irqsave(&n->list_lock, flags);
-		list_for_each_entry(page, &n->slabs_full, lru) {
+		list_for_each_entry(page, &n->slabs_full, list) {
 			active_objs += cachep->num;
 			active_slabs++;
 		}
-		list_for_each_entry(page, &n->slabs_partial, lru) {
+		list_for_each_entry(page, &n->slabs_partial, list) {
 			active_objs += page->active;
 			active_slabs++;
 		}
-		list_for_each_entry(page, &n->slabs_free, lru)
+		list_for_each_entry(page, &n->slabs_free, list)
 			num_slabs++;
 
 		free_objects += n->free_objects;
@@ -2424,11 +2424,11 @@ static int drain_freelist(struct kmem_ca
 			goto out;
 		}
 
-		page = list_entry(p, struct page, lru);
+		page = list_entry(p, struct page, list);
 #if DEBUG
 		BUG_ON(page->active);
 #endif
-		list_del(&page->lru);
+		list_del(&page->list);
 		/*
 		 * Safe to drop the lock. The slab is no longer linked
 		 * to the cache.
@@ -2721,7 +2721,7 @@ static int cache_grow(struct kmem_cache
 	spin_lock(&n->list_lock);
 
 	/* Make slab active. */
-	list_add_tail(&page->lru, &(n->slabs_free));
+	list_add_tail(&page->list, &(n->slabs_free));
 	STATS_INC_GROWN(cachep);
 	n->free_objects += cachep->num;
 	spin_unlock(&n->list_lock);
@@ -2864,7 +2864,7 @@ retry:
 				goto must_grow;
 		}
 
-		page = list_entry(entry, struct page, lru);
+		page = list_entry(entry, struct page, list);
 		check_spinlock_acquired(cachep);
 
 		/*
@@ -2884,7 +2884,7 @@ retry:
 		}
 
 		/* move slabp to correct slabp list: */
-		list_del(&page->lru);
+		list_del(&page->list);
 		if (page->active == cachep->num)
 			list_add(&page->list, &n->slabs_full);
 		else
@@ -3163,7 +3163,7 @@ retry:
 			goto must_grow;
 	}
 
-	page = list_entry(entry, struct page, lru);
+	page = list_entry(entry, struct page, list);
 	check_spinlock_acquired_node(cachep, nodeid);
 
 	STATS_INC_NODEALLOCS(cachep);
@@ -3175,12 +3175,12 @@ retry:
 	obj = slab_get_obj(cachep, page, nodeid);
 	n->free_objects--;
 	/* move slabp to correct slabp list: */
-	list_del(&page->lru);
+	list_del(&page->list);
 
 	if (page->active == cachep->num)
-		list_add(&page->lru, &n->slabs_full);
+		list_add(&page->list, &n->slabs_full);
 	else
-		list_add(&page->lru, &n->slabs_partial);
+		list_add(&page->list, &n->slabs_partial);
 
 	spin_unlock(&n->list_lock);
 	goto done;
@@ -3337,7 +3337,7 @@ static void free_block(struct kmem_cache
 
 		page = virt_to_head_page(objp);
 		n = cachep->node[node];
-		list_del(&page->lru);
+		list_del(&page->list);
 		check_spinlock_acquired_node(cachep, node);
 		slab_put_obj(cachep, page, objp, node);
 		STATS_DEC_ACTIVE(cachep);
@@ -3355,14 +3355,14 @@ static void free_block(struct kmem_cache
 				 */
 				slab_destroy(cachep, page);
 			} else {
-				list_add(&page->lru, &n->slabs_free);
+				list_add(&page->list, &n->slabs_free);
 			}
 		} else {
 			/* Unconditionally move a slab to the end of the
 			 * partial list on free - maximum time for the
 			 * other objects to be freed, too.
 			 */
-			list_add_tail(&page->lru, &n->slabs_partial);
+			list_add_tail(&page->list, &n->slabs_partial);
 		}
 	}
 }
@@ -3404,7 +3404,7 @@ free_done:
 		while (p != &(n->slabs_free)) {
 			struct page *page;
 
-			page = list_entry(p, struct page, lru);
+			page = list_entry(p, struct page, list);
 			BUG_ON(page->active);
 
 			i++;
@@ -4029,13 +4029,13 @@ void get_slabinfo(struct kmem_cache *cac
 		check_irq_on();
 		spin_lock_irq(&n->list_lock);
 
-		list_for_each_entry(page, &n->slabs_full, lru) {
+		list_for_each_entry(page, &n->slabs_full, list) {
 			if (page->active != cachep->num && !error)
 				error = "slabs_full accounting error";
 			active_objs += cachep->num;
 			active_slabs++;
 		}
-		list_for_each_entry(page, &n->slabs_partial, lru) {
+		list_for_each_entry(page, &n->slabs_partial, list) {
 			if (page->active == cachep->num && !error)
 				error = "slabs_partial accounting error";
 			if (!page->active && !error)
@@ -4043,7 +4043,7 @@ void get_slabinfo(struct kmem_cache *cac
 			active_objs += page->active;
 			active_slabs++;
 		}
-		list_for_each_entry(page, &n->slabs_free, lru) {
+		list_for_each_entry(page, &n->slabs_free, list) {
 			if (page->active && !error)
 				error = "slabs_free accounting error";
 			num_slabs++;
@@ -4266,9 +4266,9 @@ static int leaks_show(struct seq_file *m
 		check_irq_on();
 		spin_lock_irq(&n->list_lock);
 
-		list_for_each_entry(page, &n->slabs_full, lru)
+		list_for_each_entry(page, &n->slabs_full, list)
 			handle_slab(x, cachep, page);
-		list_for_each_entry(page, &n->slabs_partial, lru)
+		list_for_each_entry(page, &n->slabs_partial, list)
 			handle_slab(x, cachep, page);
 		spin_unlock_irq(&n->list_lock);
 	}
_

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 1/2] mm: slab/slub: use page->list consistently instead of page->lru
@ 2013-12-11 22:36 ` Dave Hansen
  0 siblings, 0 replies; 12+ messages in thread
From: Dave Hansen @ 2013-12-11 22:36 UTC (permalink / raw)
  To: linux-kernel; +Cc: linux-mm, cl, kirill.shutemov, Andi Kleen, akpm, Dave Hansen


From: Dave Hansen <dave.hansen@linux.intel.com>

'struct page' has two list_head fields: 'lru' and 'list'.
Conveniently, they are unioned together.  This means that code
can use them interchangably, which gets horribly confusing like
with this nugget from slab.c:

>	list_del(&page->lru);
>	if (page->active == cachep->num)
>		list_add(&page->list, &n->slabs_full);

This patch makes the slab and slub code use page->list
universally instead of mixing ->list and ->lru.

It also adds some comments to attempt to keep new users from
picking up uses of ->list.

So, the new rule is: page->list is what the slabs use.  page->lru
is for everybody else.  This is a pretty arbitrary rule, but we
need _something_.  Maybe we should just axe the ->list one and
make the sl?bs use ->lru.

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
---

 linux.git-davehans/include/linux/mm_types.h |    5 ++
 linux.git-davehans/mm/slab.c                |   50 ++++++++++++++--------------
 2 files changed, 29 insertions(+), 26 deletions(-)

diff -puN include/linux/mm_types.h~make-slab-use-page-lru-vs-list-consistently include/linux/mm_types.h
--- linux.git/include/linux/mm_types.h~make-slab-use-page-lru-vs-list-consistently	2013-12-11 14:34:51.438183588 -0800
+++ linux.git-davehans/include/linux/mm_types.h	2013-12-11 14:34:51.445183899 -0800
@@ -123,6 +123,8 @@ struct page {
 	union {
 		struct list_head lru;	/* Pageout list, eg. active_list
 					 * protected by zone->lru_lock !
+					 * Can be used as a generic list
+					 * by the page owner.
 					 */
 		struct {		/* slub per cpu partial pages */
 			struct page *next;	/* Next partial slab */
@@ -135,7 +137,8 @@ struct page {
 #endif
 		};
 
-		struct list_head list;	/* slobs list of pages */
+		struct list_head list;	/* sl[aou]bs list of pages.
+					 * do not use outside of slabs */
 		struct slab *slab_page; /* slab fields */
 		struct rcu_head rcu_head;	/* Used by SLAB
 						 * when destroying via RCU
diff -puN mm/slab.c~make-slab-use-page-lru-vs-list-consistently mm/slab.c
--- linux.git/mm/slab.c~make-slab-use-page-lru-vs-list-consistently	2013-12-11 14:34:51.440183677 -0800
+++ linux.git-davehans/mm/slab.c	2013-12-11 14:34:51.444183855 -0800
@@ -765,15 +765,15 @@ static void recheck_pfmemalloc_active(st
 		return;
 
 	spin_lock_irqsave(&n->list_lock, flags);
-	list_for_each_entry(page, &n->slabs_full, lru)
+	list_for_each_entry(page, &n->slabs_full, list)
 		if (is_slab_pfmemalloc(page))
 			goto out;
 
-	list_for_each_entry(page, &n->slabs_partial, lru)
+	list_for_each_entry(page, &n->slabs_partial, list)
 		if (is_slab_pfmemalloc(page))
 			goto out;
 
-	list_for_each_entry(page, &n->slabs_free, lru)
+	list_for_each_entry(page, &n->slabs_free, list)
 		if (is_slab_pfmemalloc(page))
 			goto out;
 
@@ -1428,7 +1428,7 @@ void __init kmem_cache_init(void)
 {
 	int i;
 
-	BUILD_BUG_ON(sizeof(((struct page *)NULL)->lru) <
+	BUILD_BUG_ON(sizeof(((struct page *)NULL)->list) <
 					sizeof(struct rcu_head));
 	kmem_cache = &kmem_cache_boot;
 	setup_node_pointer(kmem_cache);
@@ -1624,15 +1624,15 @@ slab_out_of_memory(struct kmem_cache *ca
 			continue;
 
 		spin_lock_irqsave(&n->list_lock, flags);
-		list_for_each_entry(page, &n->slabs_full, lru) {
+		list_for_each_entry(page, &n->slabs_full, list) {
 			active_objs += cachep->num;
 			active_slabs++;
 		}
-		list_for_each_entry(page, &n->slabs_partial, lru) {
+		list_for_each_entry(page, &n->slabs_partial, list) {
 			active_objs += page->active;
 			active_slabs++;
 		}
-		list_for_each_entry(page, &n->slabs_free, lru)
+		list_for_each_entry(page, &n->slabs_free, list)
 			num_slabs++;
 
 		free_objects += n->free_objects;
@@ -2424,11 +2424,11 @@ static int drain_freelist(struct kmem_ca
 			goto out;
 		}
 
-		page = list_entry(p, struct page, lru);
+		page = list_entry(p, struct page, list);
 #if DEBUG
 		BUG_ON(page->active);
 #endif
-		list_del(&page->lru);
+		list_del(&page->list);
 		/*
 		 * Safe to drop the lock. The slab is no longer linked
 		 * to the cache.
@@ -2721,7 +2721,7 @@ static int cache_grow(struct kmem_cache
 	spin_lock(&n->list_lock);
 
 	/* Make slab active. */
-	list_add_tail(&page->lru, &(n->slabs_free));
+	list_add_tail(&page->list, &(n->slabs_free));
 	STATS_INC_GROWN(cachep);
 	n->free_objects += cachep->num;
 	spin_unlock(&n->list_lock);
@@ -2864,7 +2864,7 @@ retry:
 				goto must_grow;
 		}
 
-		page = list_entry(entry, struct page, lru);
+		page = list_entry(entry, struct page, list);
 		check_spinlock_acquired(cachep);
 
 		/*
@@ -2884,7 +2884,7 @@ retry:
 		}
 
 		/* move slabp to correct slabp list: */
-		list_del(&page->lru);
+		list_del(&page->list);
 		if (page->active == cachep->num)
 			list_add(&page->list, &n->slabs_full);
 		else
@@ -3163,7 +3163,7 @@ retry:
 			goto must_grow;
 	}
 
-	page = list_entry(entry, struct page, lru);
+	page = list_entry(entry, struct page, list);
 	check_spinlock_acquired_node(cachep, nodeid);
 
 	STATS_INC_NODEALLOCS(cachep);
@@ -3175,12 +3175,12 @@ retry:
 	obj = slab_get_obj(cachep, page, nodeid);
 	n->free_objects--;
 	/* move slabp to correct slabp list: */
-	list_del(&page->lru);
+	list_del(&page->list);
 
 	if (page->active == cachep->num)
-		list_add(&page->lru, &n->slabs_full);
+		list_add(&page->list, &n->slabs_full);
 	else
-		list_add(&page->lru, &n->slabs_partial);
+		list_add(&page->list, &n->slabs_partial);
 
 	spin_unlock(&n->list_lock);
 	goto done;
@@ -3337,7 +3337,7 @@ static void free_block(struct kmem_cache
 
 		page = virt_to_head_page(objp);
 		n = cachep->node[node];
-		list_del(&page->lru);
+		list_del(&page->list);
 		check_spinlock_acquired_node(cachep, node);
 		slab_put_obj(cachep, page, objp, node);
 		STATS_DEC_ACTIVE(cachep);
@@ -3355,14 +3355,14 @@ static void free_block(struct kmem_cache
 				 */
 				slab_destroy(cachep, page);
 			} else {
-				list_add(&page->lru, &n->slabs_free);
+				list_add(&page->list, &n->slabs_free);
 			}
 		} else {
 			/* Unconditionally move a slab to the end of the
 			 * partial list on free - maximum time for the
 			 * other objects to be freed, too.
 			 */
-			list_add_tail(&page->lru, &n->slabs_partial);
+			list_add_tail(&page->list, &n->slabs_partial);
 		}
 	}
 }
@@ -3404,7 +3404,7 @@ free_done:
 		while (p != &(n->slabs_free)) {
 			struct page *page;
 
-			page = list_entry(p, struct page, lru);
+			page = list_entry(p, struct page, list);
 			BUG_ON(page->active);
 
 			i++;
@@ -4029,13 +4029,13 @@ void get_slabinfo(struct kmem_cache *cac
 		check_irq_on();
 		spin_lock_irq(&n->list_lock);
 
-		list_for_each_entry(page, &n->slabs_full, lru) {
+		list_for_each_entry(page, &n->slabs_full, list) {
 			if (page->active != cachep->num && !error)
 				error = "slabs_full accounting error";
 			active_objs += cachep->num;
 			active_slabs++;
 		}
-		list_for_each_entry(page, &n->slabs_partial, lru) {
+		list_for_each_entry(page, &n->slabs_partial, list) {
 			if (page->active == cachep->num && !error)
 				error = "slabs_partial accounting error";
 			if (!page->active && !error)
@@ -4043,7 +4043,7 @@ void get_slabinfo(struct kmem_cache *cac
 			active_objs += page->active;
 			active_slabs++;
 		}
-		list_for_each_entry(page, &n->slabs_free, lru) {
+		list_for_each_entry(page, &n->slabs_free, list) {
 			if (page->active && !error)
 				error = "slabs_free accounting error";
 			num_slabs++;
@@ -4266,9 +4266,9 @@ static int leaks_show(struct seq_file *m
 		check_irq_on();
 		spin_lock_irq(&n->list_lock);
 
-		list_for_each_entry(page, &n->slabs_full, lru)
+		list_for_each_entry(page, &n->slabs_full, list)
 			handle_slab(x, cachep, page);
-		list_for_each_entry(page, &n->slabs_partial, lru)
+		list_for_each_entry(page, &n->slabs_partial, list)
 			handle_slab(x, cachep, page);
 		spin_unlock_irq(&n->list_lock);
 	}
_

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 2/2] mm: blk-mq: uses page->list incorrectly
  2013-12-11 22:36 ` Dave Hansen
@ 2013-12-11 22:36   ` Dave Hansen
  -1 siblings, 0 replies; 12+ messages in thread
From: Dave Hansen @ 2013-12-11 22:36 UTC (permalink / raw)
  To: linux-kernel; +Cc: linux-mm, cl, kirill.shutemov, Andi Kleen, akpm, Dave Hansen


From: Dave Hansen <dave.hansen@linux.intel.com>

'struct page' has two list_head fields: 'lru' and 'list'.
Conveniently, they are unioned together.  This means that code
can use them interchangably, which gets horribly confusing.

The blk-mq made the logical decision to try to use page->list.
But, that field was actually introduced just for the slub code.
->lru is the right field to use outside of slab/slub.

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
---

 linux.git-davehans/block/blk-mq.c |    6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff -puN block/blk-mq.c~blk-mq-uses-page-list-incorrectly block/blk-mq.c
--- linux.git/block/blk-mq.c~blk-mq-uses-page-list-incorrectly	2013-12-11 14:34:51.735196799 -0800
+++ linux.git-davehans/block/blk-mq.c	2013-12-11 14:34:51.739196977 -0800
@@ -1087,8 +1087,8 @@ static void blk_mq_free_rq_map(struct bl
 	struct page *page;
 
 	while (!list_empty(&hctx->page_list)) {
-		page = list_first_entry(&hctx->page_list, struct page, list);
-		list_del_init(&page->list);
+		page = list_first_entry(&hctx->page_list, struct page, lru);
+		list_del_init(&page->lru);
 		__free_pages(page, page->private);
 	}
 
@@ -1152,7 +1152,7 @@ static int blk_mq_init_rq_map(struct blk
 			break;
 
 		page->private = this_order;
-		list_add_tail(&page->list, &hctx->page_list);
+		list_add_tail(&page->lru, &hctx->page_list);
 
 		p = page_address(page);
 		entries_per_page = order_to_size(this_order) / rq_size;
_

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 2/2] mm: blk-mq: uses page->list incorrectly
@ 2013-12-11 22:36   ` Dave Hansen
  0 siblings, 0 replies; 12+ messages in thread
From: Dave Hansen @ 2013-12-11 22:36 UTC (permalink / raw)
  To: linux-kernel; +Cc: linux-mm, cl, kirill.shutemov, Andi Kleen, akpm, Dave Hansen


From: Dave Hansen <dave.hansen@linux.intel.com>

'struct page' has two list_head fields: 'lru' and 'list'.
Conveniently, they are unioned together.  This means that code
can use them interchangably, which gets horribly confusing.

The blk-mq made the logical decision to try to use page->list.
But, that field was actually introduced just for the slub code.
->lru is the right field to use outside of slab/slub.

Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
---

 linux.git-davehans/block/blk-mq.c |    6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff -puN block/blk-mq.c~blk-mq-uses-page-list-incorrectly block/blk-mq.c
--- linux.git/block/blk-mq.c~blk-mq-uses-page-list-incorrectly	2013-12-11 14:34:51.735196799 -0800
+++ linux.git-davehans/block/blk-mq.c	2013-12-11 14:34:51.739196977 -0800
@@ -1087,8 +1087,8 @@ static void blk_mq_free_rq_map(struct bl
 	struct page *page;
 
 	while (!list_empty(&hctx->page_list)) {
-		page = list_first_entry(&hctx->page_list, struct page, list);
-		list_del_init(&page->list);
+		page = list_first_entry(&hctx->page_list, struct page, lru);
+		list_del_init(&page->lru);
 		__free_pages(page, page->private);
 	}
 
@@ -1152,7 +1152,7 @@ static int blk_mq_init_rq_map(struct blk
 			break;
 
 		page->private = this_order;
-		list_add_tail(&page->list, &hctx->page_list);
+		list_add_tail(&page->lru, &hctx->page_list);
 
 		p = page_address(page);
 		entries_per_page = order_to_size(this_order) / rq_size;
_

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] mm: slab/slub: use page->list consistently instead of page->lru
  2013-12-11 22:36 ` Dave Hansen
@ 2013-12-11 22:45   ` David Rientjes
  -1 siblings, 0 replies; 12+ messages in thread
From: David Rientjes @ 2013-12-11 22:45 UTC (permalink / raw)
  To: Dave Hansen; +Cc: linux-kernel, linux-mm, cl, kirill.shutemov, Andi Kleen, akpm

On Wed, 11 Dec 2013, Dave Hansen wrote:

> From: Dave Hansen <dave.hansen@linux.intel.com>
> 
> 'struct page' has two list_head fields: 'lru' and 'list'.
> Conveniently, they are unioned together.  This means that code
> can use them interchangably, which gets horribly confusing like
> with this nugget from slab.c:
> 
> >	list_del(&page->lru);
> >	if (page->active == cachep->num)
> >		list_add(&page->list, &n->slabs_full);
> 
> This patch makes the slab and slub code use page->list
> universally instead of mixing ->list and ->lru.
> 
> It also adds some comments to attempt to keep new users from
> picking up uses of ->list.
> 
> So, the new rule is: page->list is what the slabs use.  page->lru
> is for everybody else.  This is a pretty arbitrary rule, but we
> need _something_.  Maybe we should just axe the ->list one and
> make the sl?bs use ->lru.
> 

I'd recommend this suggestion, I don't see why the slab allocators can't 
use a page->lru field to maintain their lists of slab pages and it makes 
the code much cleaner.  Anybody hacking thise code will know it's not 
really a lru and we're just reusing a field from struct page without 
adding unnecessary complexity.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] mm: slab/slub: use page->list consistently instead of page->lru
@ 2013-12-11 22:45   ` David Rientjes
  0 siblings, 0 replies; 12+ messages in thread
From: David Rientjes @ 2013-12-11 22:45 UTC (permalink / raw)
  To: Dave Hansen; +Cc: linux-kernel, linux-mm, cl, kirill.shutemov, Andi Kleen, akpm

On Wed, 11 Dec 2013, Dave Hansen wrote:

> From: Dave Hansen <dave.hansen@linux.intel.com>
> 
> 'struct page' has two list_head fields: 'lru' and 'list'.
> Conveniently, they are unioned together.  This means that code
> can use them interchangably, which gets horribly confusing like
> with this nugget from slab.c:
> 
> >	list_del(&page->lru);
> >	if (page->active == cachep->num)
> >		list_add(&page->list, &n->slabs_full);
> 
> This patch makes the slab and slub code use page->list
> universally instead of mixing ->list and ->lru.
> 
> It also adds some comments to attempt to keep new users from
> picking up uses of ->list.
> 
> So, the new rule is: page->list is what the slabs use.  page->lru
> is for everybody else.  This is a pretty arbitrary rule, but we
> need _something_.  Maybe we should just axe the ->list one and
> make the sl?bs use ->lru.
> 

I'd recommend this suggestion, I don't see why the slab allocators can't 
use a page->lru field to maintain their lists of slab pages and it makes 
the code much cleaner.  Anybody hacking thise code will know it's not 
really a lru and we're just reusing a field from struct page without 
adding unnecessary complexity.

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] mm: blk-mq: uses page->list incorrectly
  2013-12-11 22:36   ` Dave Hansen
@ 2013-12-11 22:47     ` David Rientjes
  -1 siblings, 0 replies; 12+ messages in thread
From: David Rientjes @ 2013-12-11 22:47 UTC (permalink / raw)
  To: Dave Hansen; +Cc: linux-kernel, linux-mm, cl, kirill.shutemov, Andi Kleen, akpm

On Wed, 11 Dec 2013, Dave Hansen wrote:

> 
> From: Dave Hansen <dave.hansen@linux.intel.com>
> 
> 'struct page' has two list_head fields: 'lru' and 'list'.
> Conveniently, they are unioned together.  This means that code
> can use them interchangably, which gets horribly confusing.
> 
> The blk-mq made the logical decision to try to use page->list.
> But, that field was actually introduced just for the slub code.
> ->lru is the right field to use outside of slab/slub.
> 
> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>

Acked-by: David Rientjes <rientjes@google.com>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/2] mm: blk-mq: uses page->list incorrectly
@ 2013-12-11 22:47     ` David Rientjes
  0 siblings, 0 replies; 12+ messages in thread
From: David Rientjes @ 2013-12-11 22:47 UTC (permalink / raw)
  To: Dave Hansen; +Cc: linux-kernel, linux-mm, cl, kirill.shutemov, Andi Kleen, akpm

On Wed, 11 Dec 2013, Dave Hansen wrote:

> 
> From: Dave Hansen <dave.hansen@linux.intel.com>
> 
> 'struct page' has two list_head fields: 'lru' and 'list'.
> Conveniently, they are unioned together.  This means that code
> can use them interchangably, which gets horribly confusing.
> 
> The blk-mq made the logical decision to try to use page->list.
> But, that field was actually introduced just for the slub code.
> ->lru is the right field to use outside of slab/slub.
> 
> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>

Acked-by: David Rientjes <rientjes@google.com>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* RE: [PATCH 2/2] mm: blk-mq: uses page->list incorrectly
  2013-12-11 22:36   ` Dave Hansen
@ 2013-12-12 13:16     ` Kirill A. Shutemov
  -1 siblings, 0 replies; 12+ messages in thread
From: Kirill A. Shutemov @ 2013-12-12 13:16 UTC (permalink / raw)
  To: Dave Hansen
  Cc: linux-kernel, linux-mm, cl, kirill.shutemov, Andi Kleen, akpm,
	Dave Hansen

Dave Hansen wrote:
> 
> From: Dave Hansen <dave.hansen@linux.intel.com>
> 
> 'struct page' has two list_head fields: 'lru' and 'list'.
> Conveniently, they are unioned together.  This means that code
> can use them interchangably, which gets horribly confusing.
> 
> The blk-mq made the logical decision to try to use page->list.
> But, that field was actually introduced just for the slub code.
> ->lru is the right field to use outside of slab/slub.
> 
> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>

Looks good to me.

Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>

for both.

-- 
 Kirill A. Shutemov

^ permalink raw reply	[flat|nested] 12+ messages in thread

* RE: [PATCH 2/2] mm: blk-mq: uses page->list incorrectly
@ 2013-12-12 13:16     ` Kirill A. Shutemov
  0 siblings, 0 replies; 12+ messages in thread
From: Kirill A. Shutemov @ 2013-12-12 13:16 UTC (permalink / raw)
  To: Dave Hansen; +Cc: linux-kernel, linux-mm, cl, kirill.shutemov, Andi Kleen, akpm

Dave Hansen wrote:
> 
> From: Dave Hansen <dave.hansen@linux.intel.com>
> 
> 'struct page' has two list_head fields: 'lru' and 'list'.
> Conveniently, they are unioned together.  This means that code
> can use them interchangably, which gets horribly confusing.
> 
> The blk-mq made the logical decision to try to use page->list.
> But, that field was actually introduced just for the slub code.
> ->lru is the right field to use outside of slab/slub.
> 
> Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>

Looks good to me.

Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>

for both.

-- 
 Kirill A. Shutemov

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] mm: slab/slub: use page->list consistently instead of page->lru
  2013-12-11 22:36 ` Dave Hansen
@ 2013-12-12 17:39   ` Christoph Lameter
  -1 siblings, 0 replies; 12+ messages in thread
From: Christoph Lameter @ 2013-12-12 17:39 UTC (permalink / raw)
  To: Dave Hansen
  Cc: linux-kernel, linux-mm, kirill.shutemov, Andi Kleen, akpm, Pekka Enberg

On Wed, 11 Dec 2013, Dave Hansen wrote:

> 'struct page' has two list_head fields: 'lru' and 'list'.
> Conveniently, they are unioned together.  This means that code
> can use them interchangably, which gets horribly confusing like
> with this nugget from slab.c:

Acked-by: Christoph Lameter <cl@linux.com>

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 1/2] mm: slab/slub: use page->list consistently instead of page->lru
@ 2013-12-12 17:39   ` Christoph Lameter
  0 siblings, 0 replies; 12+ messages in thread
From: Christoph Lameter @ 2013-12-12 17:39 UTC (permalink / raw)
  To: Dave Hansen
  Cc: linux-kernel, linux-mm, kirill.shutemov, Andi Kleen, akpm, Pekka Enberg

On Wed, 11 Dec 2013, Dave Hansen wrote:

> 'struct page' has two list_head fields: 'lru' and 'list'.
> Conveniently, they are unioned together.  This means that code
> can use them interchangably, which gets horribly confusing like
> with this nugget from slab.c:

Acked-by: Christoph Lameter <cl@linux.com>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2013-12-12 17:40 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2013-12-11 22:36 [PATCH 1/2] mm: slab/slub: use page->list consistently instead of page->lru Dave Hansen
2013-12-11 22:36 ` Dave Hansen
2013-12-11 22:36 ` [PATCH 2/2] mm: blk-mq: uses page->list incorrectly Dave Hansen
2013-12-11 22:36   ` Dave Hansen
2013-12-11 22:47   ` David Rientjes
2013-12-11 22:47     ` David Rientjes
2013-12-12 13:16   ` Kirill A. Shutemov
2013-12-12 13:16     ` Kirill A. Shutemov
2013-12-11 22:45 ` [PATCH 1/2] mm: slab/slub: use page->list consistently instead of page->lru David Rientjes
2013-12-11 22:45   ` David Rientjes
2013-12-12 17:39 ` Christoph Lameter
2013-12-12 17:39   ` Christoph Lameter

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.