All of lore.kernel.org
 help / color / mirror / Atom feed
From: Michel Lespinasse <walken@google.com>
To: Matthew Wilcox <willy@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	linux-mm <linux-mm@kvack.org>,
	LKML <linux-kernel@vger.kernel.org>,
	Peter Zijlstra <peterz@infradead.org>,
	Laurent Dufour <ldufour@linux.ibm.com>,
	Vlastimil Babka <vbabka@suse.cz>,
	Liam Howlett <Liam.Howlett@oracle.com>,
	Jerome Glisse <jglisse@redhat.com>,
	Davidlohr Bueso <dave@stgolabs.net>,
	David Rientjes <rientjes@google.com>,
	Hugh Dickins <hughd@google.com>, Ying Han <yinghan@google.com>,
	Jason Gunthorpe <jgg@ziepe.ca>,
	Daniel Jordan <daniel.m.jordan@oracle.com>
Subject: [PATCH v5.5 10/10] mmap locking API: rename mmap_sem to mmap_lock
Date: Thu, 23 Apr 2020 18:39:58 -0700	[thread overview]
Message-ID: <20200424013958.GC158937@google.com> (raw)
In-Reply-To: <20200424012612.GA158937@google.com>

Rename the mmap_sem field to mmap_lock. Any new uses of this lock
should now go through the new mmap locking api. The mmap_lock is
still implemented as a rwsem, though this could change in the future.

Signed-off-by: Michel Lespinasse <walken@google.com>
---
 arch/ia64/mm/fault.c                  |  4 +--
 arch/x86/mm/fault.c                   |  2 +-
 drivers/gpu/drm/etnaviv/etnaviv_gem.c |  2 +-
 include/linux/mm_types.h              |  2 +-
 include/linux/mmap_lock.h             | 38 +++++++++++++--------------
 mm/memory.c                           |  2 +-
 mm/mmap.c                             |  4 +--
 mm/mmu_notifier.c                     |  2 +-
 8 files changed, 28 insertions(+), 28 deletions(-)

diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 693f00b117e1..9b95050c2048 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -70,8 +70,8 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
 	mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
 		| (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
 
-	/* mmap_sem is performance critical.... */
-	prefetchw(&mm->mmap_sem);
+	/* mmap_lock is performance critical.... */
+	prefetchw(&mm->mmap_lock);
 
 	/*
 	 * If we're in an interrupt or have no user context, we must not take the fault..
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 181f66b9049f..35f530f9dfc0 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1522,7 +1522,7 @@ dotraplinkage void
 do_page_fault(struct pt_regs *regs, unsigned long hw_error_code,
 		unsigned long address)
 {
-	prefetchw(&current->mm->mmap_sem);
+	prefetchw(&current->mm->mmap_lock);
 	trace_page_fault_entries(regs, hw_error_code, address);
 
 	if (unlikely(kmmio_fault(regs, address)))
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index dc9ef302f517..701f3995f621 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -661,7 +661,7 @@ static int etnaviv_gem_userptr_get_pages(struct etnaviv_gem_object *etnaviv_obj)
 	struct etnaviv_gem_userptr *userptr = &etnaviv_obj->userptr;
 	int ret, pinned = 0, npages = etnaviv_obj->base.size >> PAGE_SHIFT;
 
-	might_lock_read(&current->mm->mmap_sem);
+	might_lock_read(&current->mm->mmap_lock);
 
 	if (userptr->mm != current->mm)
 		return -EPERM;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 4aba6c0c2ba8..d13b90399c16 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -436,7 +436,7 @@ struct mm_struct {
 		spinlock_t page_table_lock; /* Protects page tables and some
 					     * counters
 					     */
-		struct rw_semaphore mmap_sem;
+		struct rw_semaphore mmap_lock;
 
 		struct list_head mmlist; /* List of maybe swapped mm's.	These
 					  * are globally strung together off
diff --git a/include/linux/mmap_lock.h b/include/linux/mmap_lock.h
index 5bf7cee5d93b..9dc632add390 100644
--- a/include/linux/mmap_lock.h
+++ b/include/linux/mmap_lock.h
@@ -4,67 +4,67 @@
 #include <linux/mmdebug.h>
 
 #define MMAP_LOCK_INITIALIZER(name) \
-	.mmap_sem = __RWSEM_INITIALIZER(name.mmap_sem),
+	.mmap_lock = __RWSEM_INITIALIZER(name.mmap_lock),
 
 static inline void mmap_init_lock(struct mm_struct *mm)
 {
-	init_rwsem(&mm->mmap_sem);
+	init_rwsem(&mm->mmap_lock);
 }
 
 static inline void mmap_write_lock(struct mm_struct *mm)
 {
-	down_write(&mm->mmap_sem);
+	down_write(&mm->mmap_lock);
 }
 
 static inline void mmap_write_lock_nested(struct mm_struct *mm, int subclass)
 {
-	down_write_nested(&mm->mmap_sem, subclass);
+	down_write_nested(&mm->mmap_lock, subclass);
 }
 
 static inline int mmap_write_lock_killable(struct mm_struct *mm)
 {
-	return down_write_killable(&mm->mmap_sem);
+	return down_write_killable(&mm->mmap_lock);
 }
 
 static inline bool mmap_write_trylock(struct mm_struct *mm)
 {
-	return down_write_trylock(&mm->mmap_sem) != 0;
+	return down_write_trylock(&mm->mmap_lock) != 0;
 }
 
 static inline void mmap_write_unlock(struct mm_struct *mm)
 {
-	up_write(&mm->mmap_sem);
+	up_write(&mm->mmap_lock);
 }
 
 static inline void mmap_write_downgrade(struct mm_struct *mm)
 {
-	downgrade_write(&mm->mmap_sem);
+	downgrade_write(&mm->mmap_lock);
 }
 
 static inline void mmap_read_lock(struct mm_struct *mm)
 {
-	down_read(&mm->mmap_sem);
+	down_read(&mm->mmap_lock);
 }
 
 static inline int mmap_read_lock_killable(struct mm_struct *mm)
 {
-	return down_read_killable(&mm->mmap_sem);
+	return down_read_killable(&mm->mmap_lock);
 }
 
 static inline bool mmap_read_trylock(struct mm_struct *mm)
 {
-	return down_read_trylock(&mm->mmap_sem) != 0;
+	return down_read_trylock(&mm->mmap_lock) != 0;
 }
 
 static inline void mmap_read_unlock(struct mm_struct *mm)
 {
-	up_read(&mm->mmap_sem);
+	up_read(&mm->mmap_lock);
 }
 
 static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
 {
-	if (down_read_trylock(&mm->mmap_sem)) {
-		rwsem_release(&mm->mmap_sem.dep_map, _RET_IP_);
+	if (down_read_trylock(&mm->mmap_lock)) {
+		rwsem_release(&mm->mmap_lock.dep_map, _RET_IP_);
 		return true;
 	}
 	return false;
@@ -72,19 +72,19 @@ static inline bool mmap_read_trylock_non_owner(struct mm_struct *mm)
 
 static inline void mmap_read_unlock_non_owner(struct mm_struct *mm)
 {
-	up_read_non_owner(&mm->mmap_sem);
+	up_read_non_owner(&mm->mmap_lock);
 }
 
 static inline void mmap_assert_locked(struct mm_struct *mm)
 {
-	VM_BUG_ON_MM(!lockdep_is_held_type(&mm->mmap_sem, -1), mm);
-	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
+	VM_BUG_ON_MM(!lockdep_is_held_type(&mm->mmap_lock, -1), mm);
+	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
 }
 
 static inline void mmap_assert_write_locked(struct mm_struct *mm)
 {
-	VM_BUG_ON_MM(!lockdep_is_held_type(&mm->mmap_sem, 0), mm);
-	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_sem), mm);
+	VM_BUG_ON_MM(!lockdep_is_held_type(&mm->mmap_lock, 0), mm);
+	VM_BUG_ON_MM(!rwsem_is_locked(&mm->mmap_lock), mm);
 }
 
 #endif /* _LINUX_MMAP_LOCK_H */
diff --git a/mm/memory.c b/mm/memory.c
index 20f98ea8968e..c2963e7affa9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4811,7 +4811,7 @@ void __might_fault(const char *file, int line)
 	__might_sleep(file, line, 0);
 #if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
 	if (current->mm)
-		might_lock_read(&current->mm->mmap_sem);
+		might_lock_read(&current->mm->mmap_lock);
 #endif
 }
 EXPORT_SYMBOL(__might_fault);
diff --git a/mm/mmap.c b/mm/mmap.c
index 2f4ffccc5972..80a47031d5db 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3474,7 +3474,7 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
 		 * The LSB of head.next can't change from under us
 		 * because we hold the mm_all_locks_mutex.
 		 */
-		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
+		down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_lock);
 		/*
 		 * We can safely modify head.next after taking the
 		 * anon_vma->root->rwsem. If some other vma in this mm shares
@@ -3504,7 +3504,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
 		 */
 		if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
 			BUG();
-		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem);
+		down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_lock);
 	}
 }
 
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 24eb9d1ed0a7..2f348b6c9c9a 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -983,7 +983,7 @@ int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
 	struct mmu_notifier_subscriptions *subscriptions;
 	int ret;
 
-	might_lock(&mm->mmap_sem);
+	might_lock(&mm->mmap_lock);
 
 	subscriptions = smp_load_acquire(&mm->notifier_subscriptions);
 	if (!subscriptions || !subscriptions->has_itree) {
-- 
2.26.2.303.gf8c07b1a785-goog


  parent reply	other threads:[~2020-04-24  1:40 UTC|newest]

Thread overview: 65+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-04-22  0:14 [PATCH v5 00/10] Add a new mmap locking API wrapping mmap_sem calls Michel Lespinasse
2020-04-22  0:14 ` Michel Lespinasse
2020-04-22  0:14 ` [PATCH v5 01/10] mmap locking API: initial implementation as rwsem wrappers Michel Lespinasse
2020-04-22  0:14   ` Michel Lespinasse
2020-05-18  9:28   ` Vlastimil Babka
2020-05-18 13:18   ` Laurent Dufour
2020-04-22  0:14 ` [PATCH v5 02/10] MMU notifier: use the new mmap locking API Michel Lespinasse
2020-04-22  0:14   ` Michel Lespinasse
2020-05-18  9:28   ` Vlastimil Babka
2020-05-18 13:19   ` Laurent Dufour
2020-04-22  0:14 ` [PATCH v5 03/10] DMA reservations: " Michel Lespinasse
2020-04-22  0:14   ` Michel Lespinasse
2020-05-18  9:30   ` Vlastimil Babka
2020-05-18 13:20   ` Laurent Dufour
2020-04-22  0:14 ` [PATCH v5 04/10] mmap locking API: use coccinelle to convert mmap_sem rwsem call sites Michel Lespinasse
2020-04-22  0:14   ` Michel Lespinasse
2020-05-18  9:36   ` Vlastimil Babka
2020-05-18 13:21   ` Laurent Dufour
2020-04-22  0:14 ` [PATCH v5 05/10] mmap locking API: convert mmap_sem call sites missed by coccinelle Michel Lespinasse
2020-04-22  0:14   ` Michel Lespinasse
2020-05-18  9:44   ` Vlastimil Babka
2020-05-18 13:23   ` Laurent Dufour
2020-04-22  0:14 ` [PATCH v5 06/10] mmap locking API: convert nested write lock sites Michel Lespinasse
2020-04-22  0:14   ` Michel Lespinasse
2020-05-18 10:32   ` Vlastimil Babka
2020-05-19 12:54     ` Michel Lespinasse
2020-05-18 13:24   ` Laurent Dufour
2020-04-22  0:14 ` [PATCH v5 07/10] mmap locking API: add mmap_read_trylock_non_owner() Michel Lespinasse
2020-04-22  0:14   ` Michel Lespinasse
2020-05-18 10:42   ` Vlastimil Babka
2020-04-22  0:14 ` [PATCH v5 08/10] mmap locking API: add MMAP_LOCK_INITIALIZER Michel Lespinasse
2020-04-22  0:14   ` Michel Lespinasse
2020-05-18 10:45   ` Vlastimil Babka
2020-05-19 12:56     ` Michel Lespinasse
2020-05-18 13:33   ` Laurent Dufour
2020-04-22  0:14 ` [PATCH v5 09/10] mmap locking API: add mmap_assert_locked Michel Lespinasse
2020-04-22  0:14   ` Michel Lespinasse
2020-04-22  2:10   ` Michel Lespinasse
2020-04-22  2:10     ` Michel Lespinasse
2020-04-22  2:18     ` Matthew Wilcox
2020-04-24  1:44       ` Michel Lespinasse
2020-04-22  0:14 ` [PATCH v5 10/10] mmap locking API: rename mmap_sem to mmap_lock Michel Lespinasse
2020-04-22  0:14   ` Michel Lespinasse
2020-04-22  1:58   ` Matthew Wilcox
2020-04-22 22:54     ` Michel Lespinasse
2020-04-22 22:54       ` Michel Lespinasse
2020-04-23  1:59       ` Matthew Wilcox
2020-04-24  1:26         ` Michel Lespinasse
2020-04-24  1:38           ` [PATCH v5.5 09/10] mmap locking API: add mmap_assert_locked() and mmap_assert_write_locked() Michel Lespinasse
2020-05-18 11:01             ` Vlastimil Babka
2020-05-19 13:06               ` Michel Lespinasse
2020-04-24  1:39           ` Michel Lespinasse [this message]
2020-05-18 11:07             ` [PATCH v5.5 10/10] mmap locking API: rename mmap_sem to mmap_lock Vlastimil Babka
2020-05-19 13:12               ` Michel Lespinasse
2020-05-18 13:45             ` Laurent Dufour
2020-05-19 13:10               ` Michel Lespinasse
2020-05-19 13:20                 ` Laurent Dufour
2020-05-19 15:32                   ` Matthew Wilcox
2020-05-19 18:14                     ` John Hubbard
2020-05-20  2:39                       ` Michel Lespinasse
2020-05-20  2:39                         ` Michel Lespinasse
2020-05-20  7:32                         ` John Hubbard
2020-05-20  8:02                           ` Michel Lespinasse
2020-05-20  8:02                             ` Michel Lespinasse
2020-05-20 12:48                         ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200424013958.GC158937@google.com \
    --to=walken@google.com \
    --cc=Liam.Howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=daniel.m.jordan@oracle.com \
    --cc=dave@stgolabs.net \
    --cc=hughd@google.com \
    --cc=jgg@ziepe.ca \
    --cc=jglisse@redhat.com \
    --cc=ldufour@linux.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=peterz@infradead.org \
    --cc=rientjes@google.com \
    --cc=vbabka@suse.cz \
    --cc=willy@infradead.org \
    --cc=yinghan@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.