All of lore.kernel.org
 help / color / mirror / Atom feed
From: Hugh Dickins <hughd@google.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>,
	Mike Rapoport <rppt@kernel.org>,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>,
	Matthew Wilcox <willy@infradead.org>,
	David Hildenbrand <david@redhat.com>,
	Suren Baghdasaryan <surenb@google.com>,
	Qi Zheng <zhengqi.arch@bytedance.com>,
	Russell King <linux@armlinux.org.uk>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>,
	Geert Uytterhoeven <geert@linux-m68k.org>,
	Greg Ungerer <gerg@linux-m68k.org>,
	Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Helge Deller <deller@gmx.de>,
	John David Anglin <dave.anglin@bell.net>,
	"Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Alexandre Ghiti <alexghiti@rivosinc.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	Heiko Carstens <hca@linux.ibm.com>,
	Christian Borntraeger <borntraeger@linux.ibm.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>,
	"David S. Miller" <davem@davemloft.net>,
	Chris Zankel <chris@zankel.net>,
	Max Filippov <jcmvbkbc@gmail.com>,
	x86@kernel.org, linux-arm-kernel@lists.infradead.org,
	linux-ia64@vger.kernel.org, linux-m68k@lists.linux-m68k.org,
	linux-mips@vger.kernel.org, linux-parisc@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org, linux-riscv@lists.infradead.org,
	linux-s390@vger.kernel.org, linux-sh@vger.kernel.org,
	sparclinux@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org
Subject: [PATCH 16/23] s390: gmap use pte_unmap_unlock() not spin_unlock()
Date: Tue, 9 May 2023 22:02:32 -0700 (PDT)	[thread overview]
Message-ID: <5579873-d7b-65e-5de0-a2ba8a144e7@google.com> (raw)
In-Reply-To: <77a5d8c-406b-7068-4f17-23b7ac53bc83@google.com>

pte_alloc_map_lock() expects to be followed by pte_unmap_unlock(): to
keep balance in future, pass ptep as well as ptl to gmap_pte_op_end(),
and use pte_unmap_unlock() instead of direct spin_unlock() (even though
ptep ends up unused inside the macro).

Signed-off-by: Hugh Dickins <hughd@google.com>
---
 arch/s390/mm/gmap.c | 22 +++++++++++-----------
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index d198fc9475a2..638dcd9bc820 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -895,12 +895,12 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
 
 /**
  * gmap_pte_op_end - release the page table lock
- * @ptl: pointer to the spinlock pointer
+ * @ptep: pointer to the locked pte
+ * @ptl: pointer to the page table spinlock
  */
-static void gmap_pte_op_end(spinlock_t *ptl)
+static void gmap_pte_op_end(pte_t *ptep, spinlock_t *ptl)
 {
-	if (ptl)
-		spin_unlock(ptl);
+	pte_unmap_unlock(ptep, ptl);
 }
 
 /**
@@ -1011,7 +1011,7 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
 {
 	int rc;
 	pte_t *ptep;
-	spinlock_t *ptl = NULL;
+	spinlock_t *ptl;
 	unsigned long pbits = 0;
 
 	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
@@ -1025,7 +1025,7 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
 	pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
 	/* Protect and unlock. */
 	rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
-	gmap_pte_op_end(ptl);
+	gmap_pte_op_end(ptep, ptl);
 	return rc;
 }
 
@@ -1154,7 +1154,7 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
 				/* Do *NOT* clear the _PAGE_INVALID bit! */
 				rc = 0;
 			}
-			gmap_pte_op_end(ptl);
+			gmap_pte_op_end(ptep, ptl);
 		}
 		if (!rc)
 			break;
@@ -1248,7 +1248,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
 			if (!rc)
 				gmap_insert_rmap(sg, vmaddr, rmap);
 			spin_unlock(&sg->guest_table_lock);
-			gmap_pte_op_end(ptl);
+			gmap_pte_op_end(ptep, ptl);
 		}
 		radix_tree_preload_end();
 		if (rc) {
@@ -2156,7 +2156,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
 			tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
 			if (!tptep) {
 				spin_unlock(&sg->guest_table_lock);
-				gmap_pte_op_end(ptl);
+				gmap_pte_op_end(sptep, ptl);
 				radix_tree_preload_end();
 				break;
 			}
@@ -2167,7 +2167,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
 				rmap = NULL;
 				rc = 0;
 			}
-			gmap_pte_op_end(ptl);
+			gmap_pte_op_end(sptep, ptl);
 			spin_unlock(&sg->guest_table_lock);
 		}
 		radix_tree_preload_end();
@@ -2495,7 +2495,7 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
 				continue;
 			if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
 				set_bit(i, bitmap);
-			spin_unlock(ptl);
+			pte_unmap_unlock(ptep, ptl);
 		}
 	}
 	gmap_pmd_op_end(gmap, pmdp);
-- 
2.35.3


WARNING: multiple messages have this Message-ID (diff)
From: Hugh Dickins <hughd@google.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>,
	Mike Rapoport <rppt@kernel.org>,
	 "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>,
	 Matthew Wilcox <willy@infradead.org>,
	David Hildenbrand <david@redhat.com>,
	 Suren Baghdasaryan <surenb@google.com>,
	 Qi Zheng <zhengqi.arch@bytedance.com>,
	 Russell King <linux@armlinux.org.uk>,
	 Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>,
	 Geert Uytterhoeven <geert@linux-m68k.org>,
	 Greg Ungerer <gerg@linux-m68k.org>,
	Michal Simek <monstr@monstr.eu>,
	 Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	 Helge Deller <deller@gmx.de>,
	John David Anglin <dave.anglin@bell.net>,
	 "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>,
	 Michael Ellerman <mpe@ellerman.id.au>,
	 Alexandre Ghiti <alexghiti@rivosinc.com>,
	 Palmer Dabbelt <palmer@dabbelt.com>,
	Heiko Carstens <hca@linux.ibm.com>,
	 Christian Borntraeger <borntraeger@linux.ibm.com>,
	 Claudio Imbrenda <imbrenda@linux.ibm.com>,
	 John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>,
	 "David S. Miller" <davem@davemloft.net>,
	Chris Zankel <chris@zankel.net>,
	 Max Filippov <jcmvbkbc@gmail.com>,
	x86@kernel.org,  linux-arm-kernel@lists.infradead.org,
	linux-ia64@vger.kernel.org,  linux-m68k@lists.linux-m68k.org,
	linux-mips@vger.kernel.org,  linux-parisc@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org,  linux-riscv@lists.infradead.org,
	linux-s390@vger.kernel.org,  linux-sh@vger.kernel.org,
	sparclinux@vger.kernel.org,  linux-kernel@vger.kernel.org,
	linux-mm@kvack.org
Subject: [PATCH 16/23] s390: gmap use pte_unmap_unlock() not spin_unlock()
Date: Tue, 9 May 2023 22:02:32 -0700 (PDT)	[thread overview]
Message-ID: <5579873-d7b-65e-5de0-a2ba8a144e7@google.com> (raw)
In-Reply-To: <77a5d8c-406b-7068-4f17-23b7ac53bc83@google.com>

pte_alloc_map_lock() expects to be followed by pte_unmap_unlock(): to
keep balance in future, pass ptep as well as ptl to gmap_pte_op_end(),
and use pte_unmap_unlock() instead of direct spin_unlock() (even though
ptep ends up unused inside the macro).

Signed-off-by: Hugh Dickins <hughd@google.com>
---
 arch/s390/mm/gmap.c | 22 +++++++++++-----------
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index d198fc9475a2..638dcd9bc820 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -895,12 +895,12 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
 
 /**
  * gmap_pte_op_end - release the page table lock
- * @ptl: pointer to the spinlock pointer
+ * @ptep: pointer to the locked pte
+ * @ptl: pointer to the page table spinlock
  */
-static void gmap_pte_op_end(spinlock_t *ptl)
+static void gmap_pte_op_end(pte_t *ptep, spinlock_t *ptl)
 {
-	if (ptl)
-		spin_unlock(ptl);
+	pte_unmap_unlock(ptep, ptl);
 }
 
 /**
@@ -1011,7 +1011,7 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
 {
 	int rc;
 	pte_t *ptep;
-	spinlock_t *ptl = NULL;
+	spinlock_t *ptl;
 	unsigned long pbits = 0;
 
 	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
@@ -1025,7 +1025,7 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
 	pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
 	/* Protect and unlock. */
 	rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
-	gmap_pte_op_end(ptl);
+	gmap_pte_op_end(ptep, ptl);
 	return rc;
 }
 
@@ -1154,7 +1154,7 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
 				/* Do *NOT* clear the _PAGE_INVALID bit! */
 				rc = 0;
 			}
-			gmap_pte_op_end(ptl);
+			gmap_pte_op_end(ptep, ptl);
 		}
 		if (!rc)
 			break;
@@ -1248,7 +1248,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
 			if (!rc)
 				gmap_insert_rmap(sg, vmaddr, rmap);
 			spin_unlock(&sg->guest_table_lock);
-			gmap_pte_op_end(ptl);
+			gmap_pte_op_end(ptep, ptl);
 		}
 		radix_tree_preload_end();
 		if (rc) {
@@ -2156,7 +2156,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
 			tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
 			if (!tptep) {
 				spin_unlock(&sg->guest_table_lock);
-				gmap_pte_op_end(ptl);
+				gmap_pte_op_end(sptep, ptl);
 				radix_tree_preload_end();
 				break;
 			}
@@ -2167,7 +2167,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
 				rmap = NULL;
 				rc = 0;
 			}
-			gmap_pte_op_end(ptl);
+			gmap_pte_op_end(sptep, ptl);
 			spin_unlock(&sg->guest_table_lock);
 		}
 		radix_tree_preload_end();
@@ -2495,7 +2495,7 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
 				continue;
 			if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
 				set_bit(i, bitmap);
-			spin_unlock(ptl);
+			pte_unmap_unlock(ptep, ptl);
 		}
 	}
 	gmap_pmd_op_end(gmap, pmdp);
-- 
2.35.3


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

WARNING: multiple messages have this Message-ID (diff)
From: Hugh Dickins <hughd@google.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-ia64@vger.kernel.org, David Hildenbrand <david@redhat.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Qi Zheng <zhengqi.arch@bytedance.com>,
	linux-kernel@vger.kernel.org, Max Filippov <jcmvbkbc@gmail.com>,
	sparclinux@vger.kernel.org, linux-riscv@lists.infradead.org,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	Will Deacon <will@kernel.org>, Greg Ungerer <gerg@linux-m68k.org>,
	linux-s390@vger.kernel.org, linux-sh@vger.kernel.org,
	Helge Deller <deller@gmx.de>,
	x86@kernel.org, Russell King <linux@armlinux.org.uk>,
	Matthew Wilcox <willy@infradead.org>,
	Geert Uytterhoeven <geert@linux-m68k.org>,
	Christian Borntraeger <borntraeger@linux.ibm.com>,
	Alexandre Ghiti <alexghiti@rivosinc.com>,
	Heiko Carstens <hca@linux.ibm.com>,
	linux-m68k@lists.linux-m68k.org,
	John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>,
	John David Anglin <dave.anglin@bell.net>,
	Suren Baghdasaryan <surenb@google.com>,
	linux-arm-kernel@lists.infradead.org,
	Chris Zankel <chris@zankel.net>, Michal Simek <mons tr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	linux-parisc@vger.kernel.org, linux-mm@kvack.org,
	linux-mips@vger.kernel.org, Palmer Dabbelt <palmer@dabbelt.com>,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>,
	"Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>,
	linuxppc-dev@lists.ozlabs.org,
	"David S. Miller" <davem@davemloft.net>,
	Mike Rapoport <rppt@kernel.org>,
	Mike Kravetz <mike.kravetz@oracle.com>
Subject: [PATCH 16/23] s390: gmap use pte_unmap_unlock() not spin_unlock()
Date: Tue, 9 May 2023 22:02:32 -0700 (PDT)	[thread overview]
Message-ID: <5579873-d7b-65e-5de0-a2ba8a144e7@google.com> (raw)
In-Reply-To: <77a5d8c-406b-7068-4f17-23b7ac53bc83@google.com>

pte_alloc_map_lock() expects to be followed by pte_unmap_unlock(): to
keep balance in future, pass ptep as well as ptl to gmap_pte_op_end(),
and use pte_unmap_unlock() instead of direct spin_unlock() (even though
ptep ends up unused inside the macro).

Signed-off-by: Hugh Dickins <hughd@google.com>
---
 arch/s390/mm/gmap.c | 22 +++++++++++-----------
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index d198fc9475a2..638dcd9bc820 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -895,12 +895,12 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
 
 /**
  * gmap_pte_op_end - release the page table lock
- * @ptl: pointer to the spinlock pointer
+ * @ptep: pointer to the locked pte
+ * @ptl: pointer to the page table spinlock
  */
-static void gmap_pte_op_end(spinlock_t *ptl)
+static void gmap_pte_op_end(pte_t *ptep, spinlock_t *ptl)
 {
-	if (ptl)
-		spin_unlock(ptl);
+	pte_unmap_unlock(ptep, ptl);
 }
 
 /**
@@ -1011,7 +1011,7 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
 {
 	int rc;
 	pte_t *ptep;
-	spinlock_t *ptl = NULL;
+	spinlock_t *ptl;
 	unsigned long pbits = 0;
 
 	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
@@ -1025,7 +1025,7 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
 	pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
 	/* Protect and unlock. */
 	rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
-	gmap_pte_op_end(ptl);
+	gmap_pte_op_end(ptep, ptl);
 	return rc;
 }
 
@@ -1154,7 +1154,7 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
 				/* Do *NOT* clear the _PAGE_INVALID bit! */
 				rc = 0;
 			}
-			gmap_pte_op_end(ptl);
+			gmap_pte_op_end(ptep, ptl);
 		}
 		if (!rc)
 			break;
@@ -1248,7 +1248,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
 			if (!rc)
 				gmap_insert_rmap(sg, vmaddr, rmap);
 			spin_unlock(&sg->guest_table_lock);
-			gmap_pte_op_end(ptl);
+			gmap_pte_op_end(ptep, ptl);
 		}
 		radix_tree_preload_end();
 		if (rc) {
@@ -2156,7 +2156,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
 			tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
 			if (!tptep) {
 				spin_unlock(&sg->guest_table_lock);
-				gmap_pte_op_end(ptl);
+				gmap_pte_op_end(sptep, ptl);
 				radix_tree_preload_end();
 				break;
 			}
@@ -2167,7 +2167,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
 				rmap = NULL;
 				rc = 0;
 			}
-			gmap_pte_op_end(ptl);
+			gmap_pte_op_end(sptep, ptl);
 			spin_unlock(&sg->guest_table_lock);
 		}
 		radix_tree_preload_end();
@@ -2495,7 +2495,7 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
 				continue;
 			if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
 				set_bit(i, bitmap);
-			spin_unlock(ptl);
+			pte_unmap_unlock(ptep, ptl);
 		}
 	}
 	gmap_pmd_op_end(gmap, pmdp);
-- 
2.35.3


WARNING: multiple messages have this Message-ID (diff)
From: Hugh Dickins <hughd@google.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>,
	Mike Rapoport <rppt@kernel.org>,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>,
	Matthew Wilcox <willy@infradead.org>,
	David Hildenbrand <david@redhat.com>,
	Suren Baghdasaryan <surenb@google.com>,
	Qi Zheng <zhengqi.arch@bytedance.com>,
	Russell King <linux@armlinux.org.uk>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>,
	Geert Uytterhoeven <geert@linux-m68k.org>,
	Greg Ungerer <gerg@linux-m68k.org>,
	Michal Simek <monstr@monstr.eu>,
	Thomas Bogendoerfer <tsbogend@alpha.franken.de>,
	Helge Deller <deller@gmx.de>,
	John David Anglin <dave.anglin@bell.net>,
	"Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>,
	Michael Ellerman <mpe@ellerman.id.au>,
	Alexandre Ghiti <alexghiti@rivosinc.com>,
	Palmer Dabbelt <palmer@dabbelt.com>,
	Heiko Carstens <hca@linux.ibm.com>,
	Christian Borntraeger <borntraeger@linux.ibm.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>,
	"David S. Miller" <davem@davemloft.net>,
	Chris Zankel <chris@zankel.net>,
	Max Filippov <jcmvbkbc@gmail.com>,
	x86@kernel.org, linux-arm-kernel@lists.infradead.org,
	linux-ia64@vger.kernel.org, linux-m68k@lists.linux-m68k.org,
	linux-mips@vger.kernel.org, linux-parisc@vger.kernel.org,
	linuxppc-dev@lists.ozlabs.org, linux-riscv@lists.infradead.org,
	linux-s390@vger.kernel.org, linux-sh@vger.kernel.org,
	sparclinux@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-mm@kvack.org
Subject: [PATCH 16/23] s390: gmap use pte_unmap_unlock() not spin_unlock()
Date: Wed, 10 May 2023 05:02:32 +0000	[thread overview]
Message-ID: <5579873-d7b-65e-5de0-a2ba8a144e7@google.com> (raw)
In-Reply-To: <77a5d8c-406b-7068-4f17-23b7ac53bc83@google.com>

pte_alloc_map_lock() expects to be followed by pte_unmap_unlock(): to
keep balance in future, pass ptep as well as ptl to gmap_pte_op_end(),
and use pte_unmap_unlock() instead of direct spin_unlock() (even though
ptep ends up unused inside the macro).

Signed-off-by: Hugh Dickins <hughd@google.com>
---
 arch/s390/mm/gmap.c | 22 +++++++++++-----------
 1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index d198fc9475a2..638dcd9bc820 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -895,12 +895,12 @@ static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
 
 /**
  * gmap_pte_op_end - release the page table lock
- * @ptl: pointer to the spinlock pointer
+ * @ptep: pointer to the locked pte
+ * @ptl: pointer to the page table spinlock
  */
-static void gmap_pte_op_end(spinlock_t *ptl)
+static void gmap_pte_op_end(pte_t *ptep, spinlock_t *ptl)
 {
-	if (ptl)
-		spin_unlock(ptl);
+	pte_unmap_unlock(ptep, ptl);
 }
 
 /**
@@ -1011,7 +1011,7 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
 {
 	int rc;
 	pte_t *ptep;
-	spinlock_t *ptl = NULL;
+	spinlock_t *ptl;
 	unsigned long pbits = 0;
 
 	if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
@@ -1025,7 +1025,7 @@ static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
 	pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
 	/* Protect and unlock. */
 	rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
-	gmap_pte_op_end(ptl);
+	gmap_pte_op_end(ptep, ptl);
 	return rc;
 }
 
@@ -1154,7 +1154,7 @@ int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
 				/* Do *NOT* clear the _PAGE_INVALID bit! */
 				rc = 0;
 			}
-			gmap_pte_op_end(ptl);
+			gmap_pte_op_end(ptep, ptl);
 		}
 		if (!rc)
 			break;
@@ -1248,7 +1248,7 @@ static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
 			if (!rc)
 				gmap_insert_rmap(sg, vmaddr, rmap);
 			spin_unlock(&sg->guest_table_lock);
-			gmap_pte_op_end(ptl);
+			gmap_pte_op_end(ptep, ptl);
 		}
 		radix_tree_preload_end();
 		if (rc) {
@@ -2156,7 +2156,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
 			tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
 			if (!tptep) {
 				spin_unlock(&sg->guest_table_lock);
-				gmap_pte_op_end(ptl);
+				gmap_pte_op_end(sptep, ptl);
 				radix_tree_preload_end();
 				break;
 			}
@@ -2167,7 +2167,7 @@ int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
 				rmap = NULL;
 				rc = 0;
 			}
-			gmap_pte_op_end(ptl);
+			gmap_pte_op_end(sptep, ptl);
 			spin_unlock(&sg->guest_table_lock);
 		}
 		radix_tree_preload_end();
@@ -2495,7 +2495,7 @@ void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
 				continue;
 			if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
 				set_bit(i, bitmap);
-			spin_unlock(ptl);
+			pte_unmap_unlock(ptep, ptl);
 		}
 	}
 	gmap_pmd_op_end(gmap, pmdp);
-- 
2.35.3

  parent reply	other threads:[~2023-05-10  5:02 UTC|newest]

Thread overview: 209+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-10  4:39 [PATCH 00/23] arch: allow pte_offset_map[_lock]() to fail Hugh Dickins
2023-05-10  4:39 ` Hugh Dickins
2023-05-10  4:39 ` Hugh Dickins
2023-05-10  4:39 ` Hugh Dickins
2023-05-10  4:42 ` [PATCH 01/23] arm: " Hugh Dickins
2023-05-10  4:42   ` Hugh Dickins
2023-05-10  4:42   ` Hugh Dickins
2023-05-10  4:42   ` Hugh Dickins
2023-05-10 14:28   ` Matthew Wilcox
2023-05-10 14:28     ` Matthew Wilcox
2023-05-10 14:28     ` Matthew Wilcox
2023-05-10 14:28     ` Matthew Wilcox
2023-05-11  3:40     ` Hugh Dickins
2023-05-11  3:40       ` Hugh Dickins
2023-05-11  3:40       ` Hugh Dickins
2023-05-11  3:40       ` Hugh Dickins
2023-05-10  4:43 ` [PATCH 02/23] arm64: allow pte_offset_map() " Hugh Dickins
2023-05-10  4:43   ` Hugh Dickins
2023-05-10  4:43   ` Hugh Dickins
2023-05-10  4:43   ` Hugh Dickins
2023-05-25 16:37   ` Catalin Marinas
2023-05-25 16:37     ` Catalin Marinas
2023-05-25 16:37     ` Catalin Marinas
2023-05-25 16:37     ` Catalin Marinas
2023-05-25 16:37     ` Catalin Marinas
2023-05-10  4:45 ` [PATCH 03/23] arm64/hugetlb: pte_alloc_huge() pte_offset_huge() Hugh Dickins
2023-05-10  4:45   ` Hugh Dickins
2023-05-10  4:45   ` Hugh Dickins
2023-05-10  4:45   ` Hugh Dickins
2023-05-25 16:37   ` Catalin Marinas
2023-05-25 16:37     ` Catalin Marinas
2023-05-25 16:37     ` Catalin Marinas
2023-05-25 16:37     ` Catalin Marinas
2023-05-25 16:37     ` Catalin Marinas
2023-05-10  4:47 ` [PATCH 04/23] ia64/hugetlb: " Hugh Dickins
2023-05-10  4:47   ` Hugh Dickins
2023-05-10  4:47   ` Hugh Dickins
2023-05-10  4:47   ` Hugh Dickins
2023-05-10  4:48 ` [PATCH 05/23] m68k: allow pte_offset_map[_lock]() to fail Hugh Dickins
2023-05-10  4:48   ` Hugh Dickins
2023-05-10  4:48   ` Hugh Dickins
2023-05-10  4:48   ` Hugh Dickins
2023-05-10  7:13   ` Geert Uytterhoeven
2023-05-10  7:13     ` Geert Uytterhoeven
2023-05-10  7:13     ` Geert Uytterhoeven
2023-05-10  7:13     ` Geert Uytterhoeven
2023-05-11  2:57     ` Hugh Dickins
2023-05-11  2:57       ` Hugh Dickins
2023-05-11  2:57       ` Hugh Dickins
2023-05-11  2:57       ` Hugh Dickins
2023-05-11  6:53       ` Geert Uytterhoeven
2023-05-11  6:53         ` Geert Uytterhoeven
2023-05-11  6:53         ` Geert Uytterhoeven
2023-05-11  6:53         ` Geert Uytterhoeven
2023-05-10  4:49 ` [PATCH 06/23] microblaze: allow pte_offset_map() " Hugh Dickins
2023-05-10  4:49   ` Hugh Dickins
2023-05-10  4:49   ` Hugh Dickins
2023-05-10  4:49   ` Hugh Dickins
2023-05-10  4:51 ` [PATCH 07/23] mips: update_mmu_cache() can replace __update_tlb() Hugh Dickins
2023-05-10  4:51   ` Hugh Dickins
2023-05-10  4:51   ` Hugh Dickins
2023-05-10  4:51   ` Hugh Dickins
2023-05-10  4:52 ` [PATCH 08/23] parisc: add pte_unmap() to balance get_ptep() Hugh Dickins
2023-05-10  4:52   ` Hugh Dickins
2023-05-10  4:52   ` Hugh Dickins
2023-05-10  4:52   ` Hugh Dickins
2023-05-13 21:35   ` Helge Deller
2023-05-13 21:35     ` Helge Deller
2023-05-13 21:35     ` Helge Deller
2023-05-13 21:35     ` Helge Deller
2023-05-14 18:20     ` Hugh Dickins
2023-05-14 18:20       ` Hugh Dickins
2023-05-14 18:20       ` Hugh Dickins
2023-05-14 18:20       ` Hugh Dickins
2023-05-10  4:54 ` [PATCH 09/23] parisc: unmap_uncached_pte() use pte_offset_kernel() Hugh Dickins
2023-05-10  4:54   ` Hugh Dickins
2023-05-10  4:54   ` Hugh Dickins
2023-05-10  4:54   ` Hugh Dickins
2023-05-10  4:55 ` [PATCH 10/23] parisc/hugetlb: pte_alloc_huge() pte_offset_huge() Hugh Dickins
2023-05-10  4:55   ` Hugh Dickins
2023-05-10  4:55   ` Hugh Dickins
2023-05-10  4:55   ` Hugh Dickins
2023-05-10  4:56 ` [PATCH 11/23] powerpc: kvmppc_unmap_free_pmd() pte_offset_kernel() Hugh Dickins
2023-05-10  4:56   ` Hugh Dickins
2023-05-10  4:56   ` Hugh Dickins
2023-05-10  4:56   ` Hugh Dickins
2023-05-10  4:57 ` [PATCH 12/23] powerpc: allow pte_offset_map[_lock]() to fail Hugh Dickins
2023-05-10  4:57   ` Hugh Dickins
2023-05-10  4:57   ` Hugh Dickins
2023-05-10  4:57   ` Hugh Dickins
2023-05-10  4:58 ` [PATCH 13/23] powerpc/hugetlb: pte_alloc_huge() Hugh Dickins
2023-05-10  4:58   ` Hugh Dickins
2023-05-10  4:58   ` Hugh Dickins
2023-05-10  4:58   ` Hugh Dickins
2023-05-10  4:59 ` [PATCH 14/23] riscv/hugetlb: pte_alloc_huge() pte_offset_huge() Hugh Dickins
2023-05-10  4:59   ` Hugh Dickins
2023-05-10  4:59   ` Hugh Dickins
2023-05-10  4:59   ` Hugh Dickins
2023-05-10  8:01   ` Alexandre Ghiti
2023-05-10  8:01     ` Alexandre Ghiti
2023-05-10  8:01     ` Alexandre Ghiti
2023-05-10  8:01     ` Alexandre Ghiti
2023-05-10 14:01   ` Palmer Dabbelt
2023-05-10 14:01     ` Palmer Dabbelt
2023-05-10 14:01     ` Palmer Dabbelt
2023-05-10 14:01     ` Palmer Dabbelt
2023-05-10  5:01 ` [PATCH 15/23] s390: allow pte_offset_map_lock() to fail Hugh Dickins
2023-05-10  5:01   ` Hugh Dickins
2023-05-10  5:01   ` Hugh Dickins
2023-05-10  5:01   ` Hugh Dickins
2023-05-17 10:35   ` Claudio Imbrenda
2023-05-17 10:35     ` Claudio Imbrenda
2023-05-17 10:35     ` Claudio Imbrenda
2023-05-17 10:35     ` Claudio Imbrenda
2023-05-17 10:35     ` Claudio Imbrenda
2023-05-17 21:50     ` Hugh Dickins
2023-05-17 21:50       ` Hugh Dickins
2023-05-17 21:50       ` Hugh Dickins
2023-05-17 21:50       ` Hugh Dickins
2023-05-17 21:50       ` Hugh Dickins
2023-05-23 12:00       ` Claudio Imbrenda
2023-05-23 12:00         ` Claudio Imbrenda
2023-05-23 12:00         ` Claudio Imbrenda
2023-05-23 12:00         ` Claudio Imbrenda
2023-05-23 12:00         ` Claudio Imbrenda
2023-05-24  1:49         ` Hugh Dickins
2023-05-24  1:49           ` Hugh Dickins
2023-05-24  1:49           ` Hugh Dickins
2023-05-24  1:49           ` Hugh Dickins
2023-05-24  1:49           ` Hugh Dickins
2023-05-25  7:23           ` Claudio Imbrenda
2023-05-25  7:23             ` Claudio Imbrenda
2023-05-25  7:23             ` Claudio Imbrenda
2023-05-25  7:23             ` Claudio Imbrenda
2023-05-25  7:23             ` Claudio Imbrenda
2023-05-10  5:02 ` Hugh Dickins [this message]
2023-05-10  5:02   ` [PATCH 16/23] s390: gmap use pte_unmap_unlock() not spin_unlock() Hugh Dickins
2023-05-10  5:02   ` Hugh Dickins
2023-05-10  5:02   ` Hugh Dickins
2023-05-17 11:28   ` Alexander Gordeev
2023-05-17 11:28     ` Alexander Gordeev
2023-05-17 11:28     ` Alexander Gordeev
2023-05-17 11:28     ` Alexander Gordeev
2023-05-17 11:28     ` Alexander Gordeev
2023-05-10  5:03 ` [PATCH 17/23] sh/hugetlb: pte_alloc_huge() pte_offset_huge() Hugh Dickins
2023-05-10  5:03   ` Hugh Dickins
2023-05-10  5:03   ` Hugh Dickins
2023-05-10  5:03   ` Hugh Dickins
2023-05-10  5:04 ` [PATCH 18/23] sparc/hugetlb: " Hugh Dickins
2023-05-10  5:04   ` Hugh Dickins
2023-05-10  5:04   ` Hugh Dickins
2023-05-10  5:04   ` Hugh Dickins
2023-05-10  5:05 ` [PATCH 19/23] sparc: allow pte_offset_map() to fail Hugh Dickins
2023-05-10  5:05   ` Hugh Dickins
2023-05-10  5:05   ` Hugh Dickins
2023-05-10  5:05   ` Hugh Dickins
2023-05-10  5:07 ` [PATCH 20/23] sparc: iounit and iommu use pte_offset_kernel() Hugh Dickins
2023-05-10  5:07   ` Hugh Dickins
2023-05-10  5:07   ` Hugh Dickins
2023-05-10  5:07   ` Hugh Dickins
2023-05-10  5:08 ` [PATCH 21/23] x86: Allow get_locked_pte() to fail Hugh Dickins
2023-05-10  5:08   ` Hugh Dickins
2023-05-10  5:08   ` Hugh Dickins
2023-05-10  5:08   ` Hugh Dickins
2023-05-10  8:18   ` Peter Zijlstra
2023-05-10  8:18     ` Peter Zijlstra
2023-05-10  8:18     ` Peter Zijlstra
2023-05-10  8:18     ` Peter Zijlstra
2023-05-11  3:16     ` Hugh Dickins
2023-05-11  3:16       ` Hugh Dickins
2023-05-11  3:16       ` Hugh Dickins
2023-05-11  3:16       ` Hugh Dickins
2023-05-11  7:29       ` Peter Zijlstra
2023-05-11  7:29         ` Peter Zijlstra
2023-05-11  7:29         ` Peter Zijlstra
2023-05-11  7:29         ` Peter Zijlstra
2023-05-10  5:09 ` [PATCH 22/23] x86: sme_populate_pgd() use pte_offset_kernel() Hugh Dickins
2023-05-10  5:09   ` Hugh Dickins
2023-05-10  5:09   ` Hugh Dickins
2023-05-10  5:09   ` Hugh Dickins
2023-05-10  5:11 ` [PATCH 23/23] xtensa: add pte_unmap() to balance pte_offset_map() Hugh Dickins
2023-05-10  5:11   ` Hugh Dickins
2023-05-10  5:11   ` Hugh Dickins
2023-05-10  5:11   ` Hugh Dickins
2023-05-10  6:07 ` [PATCH 00/23] arch: allow pte_offset_map[_lock]() to fail Matthew Wilcox
2023-05-10  6:07   ` Matthew Wilcox
2023-05-10  6:07   ` Matthew Wilcox
2023-05-10  6:07   ` Matthew Wilcox
2023-05-11  4:35   ` Hugh Dickins
2023-05-11  4:35     ` Hugh Dickins
2023-05-11  4:35     ` Hugh Dickins
2023-05-11  4:35     ` Hugh Dickins
2023-05-11 14:02     ` Matthew Wilcox
2023-05-11 14:02       ` Matthew Wilcox
2023-05-11 14:02       ` Matthew Wilcox
2023-05-11 14:02       ` Matthew Wilcox
2023-05-11 22:37       ` Hugh Dickins
2023-05-11 22:37         ` Hugh Dickins
2023-05-11 22:37         ` Hugh Dickins
2023-05-11 22:37         ` Hugh Dickins
2023-05-12  3:38       ` Mike Rapoport
2023-05-12  3:38         ` Mike Rapoport
2023-05-12  3:38         ` Mike Rapoport
2023-05-12  3:38         ` Mike Rapoport
2023-05-16 10:41       ` Peter Zijlstra
2023-05-16 10:41         ` Peter Zijlstra
2023-05-16 10:41         ` Peter Zijlstra
2023-05-16 10:41         ` Peter Zijlstra
2023-05-16 10:41         ` Peter Zijlstra

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=5579873-d7b-65e-5de0-a2ba8a144e7@google.com \
    --to=hughd@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexghiti@rivosinc.com \
    --cc=aneesh.kumar@linux.ibm.com \
    --cc=borntraeger@linux.ibm.com \
    --cc=catalin.marinas@arm.com \
    --cc=chris@zankel.net \
    --cc=dave.anglin@bell.net \
    --cc=davem@davemloft.net \
    --cc=david@redhat.com \
    --cc=deller@gmx.de \
    --cc=geert@linux-m68k.org \
    --cc=gerg@linux-m68k.org \
    --cc=glaubitz@physik.fu-berlin.de \
    --cc=hca@linux.ibm.com \
    --cc=imbrenda@linux.ibm.com \
    --cc=jcmvbkbc@gmail.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-ia64@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-m68k@lists.linux-m68k.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-parisc@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=linux-sh@vger.kernel.org \
    --cc=linux@armlinux.org.uk \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mike.kravetz@oracle.com \
    --cc=monstr@monstr.eu \
    --cc=mpe@ellerman.id.au \
    --cc=palmer@dabbelt.com \
    --cc=rppt@kernel.org \
    --cc=sparclinux@vger.kernel.org \
    --cc=surenb@google.com \
    --cc=tsbogend@alpha.franken.de \
    --cc=will@kernel.org \
    --cc=willy@infradead.org \
    --cc=x86@kernel.org \
    --cc=zhengqi.arch@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.