All of lore.kernel.org
 help / color / mirror / Atom feed
From: daniel.m.jordan@oracle.com
To: linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: aaron.lu@intel.com, ak@linux.intel.com,
	akpm@linux-foundation.org, Dave.Dice@oracle.com,
	dave@stgolabs.net, khandual@linux.vnet.ibm.com,
	ldufour@linux.vnet.ibm.com, mgorman@suse.de, mhocko@kernel.org,
	pasha.tatashin@oracle.com, steven.sistare@oracle.com,
	yossi.lev@oracle.com
Subject: [RFC PATCH v1 10/13] mm: add LRU batch lock API's
Date: Wed, 31 Jan 2018 18:04:10 -0500	[thread overview]
Message-ID: <20180131230413.27653-11-daniel.m.jordan@oracle.com> (raw)
In-Reply-To: <20180131230413.27653-1-daniel.m.jordan@oracle.com>

Add the LRU batch locking API's themselves.  This adds the final piece
of infrastructure necessary for locking batches on an LRU list.

The API's lock a specific page on the LRU list, taking only the
appropriate LRU batch lock for a non-sentinel page and taking the
node's/memcg's lru_lock in addition for a sentinel page.

These interfaces are designed for performance: they minimize the number
of times we needlessly drop and then reacquire the same lock(s) when
used in a loop.  They're difficult to use but will do for a prototype.

Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
---
 include/linux/mm_inline.h | 58 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 58 insertions(+)

diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 1f1657c75b1b..11d9fcf93f2b 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -210,6 +210,64 @@ static __always_inline void lru_unlock_all(struct pglist_data *pgdat,
 		local_irq_enable();
 }
 
+static __always_inline spinlock_t *page_lru_batch_lock(struct page *page)
+{
+	return &page_pgdat(page)->lru_batch_locks[page->lru_batch].lock;
+}
+
+/**
+ * lru_batch_lock - lock an LRU list batch
+ */
+static __always_inline void lru_batch_lock(struct page *page,
+					   spinlock_t **locked_lru_batch,
+					   struct pglist_data **locked_pgdat,
+					   unsigned long *flags)
+{
+	spinlock_t *lru_batch = page_lru_batch_lock(page);
+	struct pglist_data *pgdat = page_pgdat(page);
+
+	VM_BUG_ON(*locked_pgdat && !page->lru_sentinel);
+
+	if (lru_batch != *locked_lru_batch) {
+		VM_BUG_ON(*locked_pgdat);
+		VM_BUG_ON(*locked_lru_batch);
+		spin_lock_irqsave(lru_batch, *flags);
+		*locked_lru_batch = lru_batch;
+		if (page->lru_sentinel) {
+			spin_lock(&pgdat->lru_lock);
+			*locked_pgdat = pgdat;
+		}
+	} else if (!*locked_pgdat && page->lru_sentinel) {
+		spin_lock(&pgdat->lru_lock);
+		*locked_pgdat = pgdat;
+	}
+}
+
+/**
+ * lru_batch_unlock - unlock an LRU list batch
+ */
+static __always_inline void lru_batch_unlock(struct page *page,
+					     spinlock_t **locked_lru_batch,
+					     struct pglist_data **locked_pgdat,
+					     unsigned long *flags)
+{
+	spinlock_t *lru_batch = (page) ? page_lru_batch_lock(page) : NULL;
+
+	VM_BUG_ON(!*locked_lru_batch);
+
+	if (lru_batch != *locked_lru_batch) {
+		if (*locked_pgdat) {
+			spin_unlock(&(*locked_pgdat)->lru_lock);
+			*locked_pgdat = NULL;
+		}
+		spin_unlock_irqrestore(*locked_lru_batch, *flags);
+		*locked_lru_batch = NULL;
+	} else if (*locked_pgdat && !page->lru_sentinel) {
+		spin_unlock(&(*locked_pgdat)->lru_lock);
+		*locked_pgdat = NULL;
+	}
+}
+
 /**
  * page_lru_base_type - which LRU list type should a page be on?
  * @page: the page to test
-- 
2.16.1

WARNING: multiple messages have this Message-ID (diff)
From: daniel.m.jordan@oracle.com
To: linux-mm@kvack.org, linux-kernel@vger.kernel.org
Cc: aaron.lu@intel.com, ak@linux.intel.com,
	akpm@linux-foundation.org, Dave.Dice@oracle.com,
	dave@stgolabs.net, khandual@linux.vnet.ibm.com,
	ldufour@linux.vnet.ibm.com, mgorman@suse.de, mhocko@kernel.org,
	pasha.tatashin@oracle.com, steven.sistare@oracle.com,
	yossi.lev@oracle.com
Subject: [RFC PATCH v1 10/13] mm: add LRU batch lock API's
Date: Wed, 31 Jan 2018 18:04:10 -0500	[thread overview]
Message-ID: <20180131230413.27653-11-daniel.m.jordan@oracle.com> (raw)
In-Reply-To: <20180131230413.27653-1-daniel.m.jordan@oracle.com>

Add the LRU batch locking API's themselves.  This adds the final piece
of infrastructure necessary for locking batches on an LRU list.

The API's lock a specific page on the LRU list, taking only the
appropriate LRU batch lock for a non-sentinel page and taking the
node's/memcg's lru_lock in addition for a sentinel page.

These interfaces are designed for performance: they minimize the number
of times we needlessly drop and then reacquire the same lock(s) when
used in a loop.  They're difficult to use but will do for a prototype.

Signed-off-by: Daniel Jordan <daniel.m.jordan@oracle.com>
---
 include/linux/mm_inline.h | 58 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 58 insertions(+)

diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 1f1657c75b1b..11d9fcf93f2b 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -210,6 +210,64 @@ static __always_inline void lru_unlock_all(struct pglist_data *pgdat,
 		local_irq_enable();
 }
 
+static __always_inline spinlock_t *page_lru_batch_lock(struct page *page)
+{
+	return &page_pgdat(page)->lru_batch_locks[page->lru_batch].lock;
+}
+
+/**
+ * lru_batch_lock - lock an LRU list batch
+ */
+static __always_inline void lru_batch_lock(struct page *page,
+					   spinlock_t **locked_lru_batch,
+					   struct pglist_data **locked_pgdat,
+					   unsigned long *flags)
+{
+	spinlock_t *lru_batch = page_lru_batch_lock(page);
+	struct pglist_data *pgdat = page_pgdat(page);
+
+	VM_BUG_ON(*locked_pgdat && !page->lru_sentinel);
+
+	if (lru_batch != *locked_lru_batch) {
+		VM_BUG_ON(*locked_pgdat);
+		VM_BUG_ON(*locked_lru_batch);
+		spin_lock_irqsave(lru_batch, *flags);
+		*locked_lru_batch = lru_batch;
+		if (page->lru_sentinel) {
+			spin_lock(&pgdat->lru_lock);
+			*locked_pgdat = pgdat;
+		}
+	} else if (!*locked_pgdat && page->lru_sentinel) {
+		spin_lock(&pgdat->lru_lock);
+		*locked_pgdat = pgdat;
+	}
+}
+
+/**
+ * lru_batch_unlock - unlock an LRU list batch
+ */
+static __always_inline void lru_batch_unlock(struct page *page,
+					     spinlock_t **locked_lru_batch,
+					     struct pglist_data **locked_pgdat,
+					     unsigned long *flags)
+{
+	spinlock_t *lru_batch = (page) ? page_lru_batch_lock(page) : NULL;
+
+	VM_BUG_ON(!*locked_lru_batch);
+
+	if (lru_batch != *locked_lru_batch) {
+		if (*locked_pgdat) {
+			spin_unlock(&(*locked_pgdat)->lru_lock);
+			*locked_pgdat = NULL;
+		}
+		spin_unlock_irqrestore(*locked_lru_batch, *flags);
+		*locked_lru_batch = NULL;
+	} else if (*locked_pgdat && !page->lru_sentinel) {
+		spin_unlock(&(*locked_pgdat)->lru_lock);
+		*locked_pgdat = NULL;
+	}
+}
+
 /**
  * page_lru_base_type - which LRU list type should a page be on?
  * @page: the page to test
-- 
2.16.1

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2018-01-31 23:12 UTC|newest]

Thread overview: 62+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-01-31 23:04 [RFC PATCH v1 00/13] lru_lock scalability daniel.m.jordan
2018-01-31 23:04 ` daniel.m.jordan
2018-01-31 23:04 ` [RFC PATCH v1 01/13] mm: add a percpu_pagelist_batch sysctl interface daniel.m.jordan
2018-01-31 23:04   ` daniel.m.jordan
2018-01-31 23:04 ` [RFC PATCH v1 02/13] mm: allow compaction to be disabled daniel.m.jordan
2018-01-31 23:04   ` daniel.m.jordan
2018-01-31 23:04 ` [RFC PATCH v1 03/13] mm: add lock array to pgdat and batch fields to struct page daniel.m.jordan
2018-01-31 23:04   ` daniel.m.jordan
2018-02-01 22:50   ` Tim Chen
2018-02-01 22:50     ` Tim Chen
2018-02-02  4:29     ` Daniel Jordan
2018-02-02  4:29       ` Daniel Jordan
2018-01-31 23:04 ` [RFC PATCH v1 04/13] mm: introduce struct lru_list_head in lruvec to hold per-LRU batch info daniel.m.jordan
2018-01-31 23:04   ` daniel.m.jordan
2018-01-31 23:04 ` [RFC PATCH v1 05/13] mm: add batching logic to add/delete/move API's daniel.m.jordan
2018-01-31 23:04   ` daniel.m.jordan
2018-01-31 23:04 ` [RFC PATCH v1 06/13] mm: add lru_[un]lock_all APIs daniel.m.jordan
2018-01-31 23:04   ` daniel.m.jordan
2018-01-31 23:04 ` [RFC PATCH v1 07/13] mm: convert to-be-refactored lru_lock callsites to lock-all API daniel.m.jordan
2018-01-31 23:04   ` daniel.m.jordan
2018-01-31 23:04 ` [RFC PATCH v1 08/13] mm: temporarily convert " daniel.m.jordan
2018-01-31 23:04   ` daniel.m.jordan
2018-01-31 23:04 ` [RFC PATCH v1 09/13] mm: introduce add-only version of pagevec_lru_move_fn daniel.m.jordan
2018-01-31 23:04   ` daniel.m.jordan
2018-01-31 23:04 ` daniel.m.jordan [this message]
2018-01-31 23:04   ` [RFC PATCH v1 10/13] mm: add LRU batch lock API's daniel.m.jordan
2018-01-31 23:04 ` [RFC PATCH v1 11/13] mm: use lru_batch locking in release_pages daniel.m.jordan
2018-01-31 23:04   ` daniel.m.jordan
2018-01-31 23:04 ` [RFC PATCH v1 12/13] mm: split up release_pages into non-sentinel and sentinel passes daniel.m.jordan
2018-01-31 23:04   ` daniel.m.jordan
2018-02-02 14:40   ` Laurent Dufour
2018-02-02 14:40     ` Laurent Dufour
2018-02-02 17:00     ` Laurent Dufour
2018-02-02 17:00       ` Laurent Dufour
2018-02-06 17:47       ` Daniel Jordan
2018-02-06 17:47         ` Daniel Jordan
2018-02-05  4:58   ` [lkp-robot] [mm] 44b163e12f: kernel_BUG_at_mm/swap.c kernel test robot
2018-02-05  4:58     ` kernel test robot
2018-01-31 23:04 ` [RFC PATCH v1 13/13] mm: splice local lists onto the front of the LRU daniel.m.jordan
2018-01-31 23:04   ` daniel.m.jordan
2018-02-01 23:30   ` Tim Chen
2018-02-01 23:30     ` Tim Chen
2018-02-02  5:17     ` Daniel Jordan
2018-02-02  5:17       ` Daniel Jordan
2018-02-02  5:21   ` Aaron Lu
2018-02-02  5:21     ` Aaron Lu
2018-02-06 17:38     ` Daniel Jordan
2018-02-06 17:38       ` Daniel Jordan
2018-02-02 15:22   ` Laurent Dufour
2018-02-02 15:22     ` Laurent Dufour
2018-02-06 18:18     ` Daniel Jordan
2018-02-06 18:18       ` Daniel Jordan
2018-02-01 15:54 ` [RFC PATCH v1 00/13] lru_lock scalability Steven Whitehouse
2018-02-01 15:54   ` Steven Whitehouse
2018-02-02  4:18   ` Daniel Jordan
2018-02-02  4:18     ` Daniel Jordan
2018-02-02 10:50     ` Steven Whitehouse
2018-02-02 10:50       ` Steven Whitehouse
2018-02-08 23:36 ` Andrew Morton
2018-02-08 23:36   ` Andrew Morton
2018-02-13 21:07   ` Daniel Jordan
2018-02-13 21:07     ` Daniel Jordan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180131230413.27653-11-daniel.m.jordan@oracle.com \
    --to=daniel.m.jordan@oracle.com \
    --cc=Dave.Dice@oracle.com \
    --cc=aaron.lu@intel.com \
    --cc=ak@linux.intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=dave@stgolabs.net \
    --cc=khandual@linux.vnet.ibm.com \
    --cc=ldufour@linux.vnet.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@suse.de \
    --cc=mhocko@kernel.org \
    --cc=pasha.tatashin@oracle.com \
    --cc=steven.sistare@oracle.com \
    --cc=yossi.lev@oracle.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.