All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Matthew Wilcox <willy@infradead.org>
Cc: linux-mm@kvack.org, linux-arch@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org, linux-csky@vger.kernel.org,
	linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev,
	linux-m68k@lists.linux-m68k.org, linux-mips@vger.kernel.org,
	linux-openrisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
	linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org,
	linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-um@lists.infradead.org, xen-devel@lists.xenproject.org,
	kvm@vger.kernel.org,
	"Vishal Moola (Oracle)" <vishal.moola@gmail.com>,
	David Hildenbrand <david@redhat.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>
Subject: [PATCH v2 03/34] s390: Use pt_frag_refcount for pagetables
Date: Mon,  1 May 2023 12:27:58 -0700	[thread overview]
Message-ID: <20230501192829.17086-4-vishal.moola@gmail.com> (raw)
In-Reply-To: <20230501192829.17086-1-vishal.moola@gmail.com>

s390 currently uses _refcount to identify fragmented page tables.
The page table struct already has a member pt_frag_refcount used by
powerpc, so have s390 use that instead of the _refcount field as well.
This improves the safety for _refcount and the page table tracking.

This also allows us to simplify the tracking since we can once again use
the lower byte of pt_frag_refcount instead of the upper byte of _refcount.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 arch/s390/mm/pgalloc.c | 38 +++++++++++++++-----------------------
 1 file changed, 15 insertions(+), 23 deletions(-)

diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 66ab68db9842..6b99932abc66 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -182,20 +182,17 @@ void page_table_free_pgste(struct page *page)
  * As follows from the above, no unallocated or fully allocated parent
  * pages are contained in mm_context_t::pgtable_list.
  *
- * The upper byte (bits 24-31) of the parent page _refcount is used
+ * The lower byte (bits 0-7) of the parent page pt_frag_refcount is used
  * for tracking contained 2KB-pgtables and has the following format:
  *
  *   PP  AA
- * 01234567    upper byte (bits 24-31) of struct page::_refcount
+ * 01234567    upper byte (bits 0-7) of struct page::pt_frag_refcount
  *   ||  ||
  *   ||  |+--- upper 2KB-pgtable is allocated
  *   ||  +---- lower 2KB-pgtable is allocated
  *   |+------- upper 2KB-pgtable is pending for removal
  *   +-------- lower 2KB-pgtable is pending for removal
  *
- * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
- * using _refcount is possible).
- *
  * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
  * The parent page is either:
  *   - added to mm_context_t::pgtable_list in case the second half of the
@@ -243,11 +240,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 		if (!list_empty(&mm->context.pgtable_list)) {
 			page = list_first_entry(&mm->context.pgtable_list,
 						struct page, lru);
-			mask = atomic_read(&page->_refcount) >> 24;
+			mask = atomic_read(&page->pt_frag_refcount);
 			/*
 			 * The pending removal bits must also be checked.
 			 * Failure to do so might lead to an impossible
-			 * value of (i.e 0x13 or 0x23) written to _refcount.
+			 * value of (i.e 0x13 or 0x23) written to
+			 * pt_frag_refcount.
 			 * Such values violate the assumption that pending and
 			 * allocation bits are mutually exclusive, and the rest
 			 * of the code unrails as result. That could lead to
@@ -259,8 +257,8 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 				bit = mask & 1;		/* =1 -> second 2K */
 				if (bit)
 					table += PTRS_PER_PTE;
-				atomic_xor_bits(&page->_refcount,
-							0x01U << (bit + 24));
+				atomic_xor_bits(&page->pt_frag_refcount,
+							0x01U << bit);
 				list_del(&page->lru);
 			}
 		}
@@ -281,12 +279,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 	table = (unsigned long *) page_to_virt(page);
 	if (mm_alloc_pgste(mm)) {
 		/* Return 4K page table with PGSTEs */
-		atomic_xor_bits(&page->_refcount, 0x03U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
 	} else {
 		/* Return the first 2K fragment of the page */
-		atomic_xor_bits(&page->_refcount, 0x01U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x01U);
 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
 		spin_lock_bh(&mm->context.lock);
 		list_add(&page->lru, &mm->context.pgtable_list);
@@ -323,22 +321,19 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
 		 * will happen outside of the critical section from this
 		 * function or from __tlb_remove_table()
 		 */
-		mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 		if (mask & 0x03U)
 			list_add(&page->lru, &mm->context.pgtable_list);
 		else
 			list_del(&page->lru);
 		spin_unlock_bh(&mm->context.lock);
-		mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x10U << bit);
 		if (mask != 0x00U)
 			return;
 		half = 0x01U << bit;
 	} else {
 		half = 0x03U;
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 	}
 
 	page_table_release_check(page, table, half, mask);
@@ -368,8 +363,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
 	 * outside of the critical section from __tlb_remove_table() or from
 	 * page_table_free()
 	 */
-	mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-	mask >>= 24;
+	mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 	if (mask & 0x03U)
 		list_add_tail(&page->lru, &mm->context.pgtable_list);
 	else
@@ -391,14 +385,12 @@ void __tlb_remove_table(void *_table)
 		return;
 	case 0x01U:	/* lower 2K of a 4K page table */
 	case 0x02U:	/* higher 2K of a 4K page table */
-		mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, mask << 4);
 		if (mask != 0x00U)
 			return;
 		break;
 	case 0x03U:	/* 4K page table with pgstes */
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		break;
 	}
 
-- 
2.39.2


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

WARNING: multiple messages have this Message-ID (diff)
From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Matthew Wilcox <willy@infradead.org>
Cc: linux-mm@kvack.org, linux-arch@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org, linux-csky@vger.kernel.org,
	linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev,
	linux-m68k@lists.linux-m68k.org, linux-mips@vger.kernel.org,
	linux-openrisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
	linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org,
	linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-um@lists.infradead.org, xen-devel@lists.xenproject.org,
	kvm@vger.kernel.org,
	"Vishal Moola (Oracle)" <vishal.moola@gmail.com>,
	David Hildenbrand <david@redhat.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>
Subject: [PATCH v2 03/34] s390: Use pt_frag_refcount for pagetables
Date: Mon,  1 May 2023 12:27:58 -0700	[thread overview]
Message-ID: <20230501192829.17086-4-vishal.moola@gmail.com> (raw)
In-Reply-To: <20230501192829.17086-1-vishal.moola@gmail.com>

s390 currently uses _refcount to identify fragmented page tables.
The page table struct already has a member pt_frag_refcount used by
powerpc, so have s390 use that instead of the _refcount field as well.
This improves the safety for _refcount and the page table tracking.

This also allows us to simplify the tracking since we can once again use
the lower byte of pt_frag_refcount instead of the upper byte of _refcount.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 arch/s390/mm/pgalloc.c | 38 +++++++++++++++-----------------------
 1 file changed, 15 insertions(+), 23 deletions(-)

diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 66ab68db9842..6b99932abc66 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -182,20 +182,17 @@ void page_table_free_pgste(struct page *page)
  * As follows from the above, no unallocated or fully allocated parent
  * pages are contained in mm_context_t::pgtable_list.
  *
- * The upper byte (bits 24-31) of the parent page _refcount is used
+ * The lower byte (bits 0-7) of the parent page pt_frag_refcount is used
  * for tracking contained 2KB-pgtables and has the following format:
  *
  *   PP  AA
- * 01234567    upper byte (bits 24-31) of struct page::_refcount
+ * 01234567    upper byte (bits 0-7) of struct page::pt_frag_refcount
  *   ||  ||
  *   ||  |+--- upper 2KB-pgtable is allocated
  *   ||  +---- lower 2KB-pgtable is allocated
  *   |+------- upper 2KB-pgtable is pending for removal
  *   +-------- lower 2KB-pgtable is pending for removal
  *
- * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
- * using _refcount is possible).
- *
  * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
  * The parent page is either:
  *   - added to mm_context_t::pgtable_list in case the second half of the
@@ -243,11 +240,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 		if (!list_empty(&mm->context.pgtable_list)) {
 			page = list_first_entry(&mm->context.pgtable_list,
 						struct page, lru);
-			mask = atomic_read(&page->_refcount) >> 24;
+			mask = atomic_read(&page->pt_frag_refcount);
 			/*
 			 * The pending removal bits must also be checked.
 			 * Failure to do so might lead to an impossible
-			 * value of (i.e 0x13 or 0x23) written to _refcount.
+			 * value of (i.e 0x13 or 0x23) written to
+			 * pt_frag_refcount.
 			 * Such values violate the assumption that pending and
 			 * allocation bits are mutually exclusive, and the rest
 			 * of the code unrails as result. That could lead to
@@ -259,8 +257,8 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 				bit = mask & 1;		/* =1 -> second 2K */
 				if (bit)
 					table += PTRS_PER_PTE;
-				atomic_xor_bits(&page->_refcount,
-							0x01U << (bit + 24));
+				atomic_xor_bits(&page->pt_frag_refcount,
+							0x01U << bit);
 				list_del(&page->lru);
 			}
 		}
@@ -281,12 +279,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 	table = (unsigned long *) page_to_virt(page);
 	if (mm_alloc_pgste(mm)) {
 		/* Return 4K page table with PGSTEs */
-		atomic_xor_bits(&page->_refcount, 0x03U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
 	} else {
 		/* Return the first 2K fragment of the page */
-		atomic_xor_bits(&page->_refcount, 0x01U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x01U);
 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
 		spin_lock_bh(&mm->context.lock);
 		list_add(&page->lru, &mm->context.pgtable_list);
@@ -323,22 +321,19 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
 		 * will happen outside of the critical section from this
 		 * function or from __tlb_remove_table()
 		 */
-		mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 		if (mask & 0x03U)
 			list_add(&page->lru, &mm->context.pgtable_list);
 		else
 			list_del(&page->lru);
 		spin_unlock_bh(&mm->context.lock);
-		mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x10U << bit);
 		if (mask != 0x00U)
 			return;
 		half = 0x01U << bit;
 	} else {
 		half = 0x03U;
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 	}
 
 	page_table_release_check(page, table, half, mask);
@@ -368,8 +363,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
 	 * outside of the critical section from __tlb_remove_table() or from
 	 * page_table_free()
 	 */
-	mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-	mask >>= 24;
+	mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 	if (mask & 0x03U)
 		list_add_tail(&page->lru, &mm->context.pgtable_list);
 	else
@@ -391,14 +385,12 @@ void __tlb_remove_table(void *_table)
 		return;
 	case 0x01U:	/* lower 2K of a 4K page table */
 	case 0x02U:	/* higher 2K of a 4K page table */
-		mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, mask << 4);
 		if (mask != 0x00U)
 			return;
 		break;
 	case 0x03U:	/* 4K page table with pgstes */
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		break;
 	}
 
-- 
2.39.2


WARNING: multiple messages have this Message-ID (diff)
From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Matthew Wilcox <willy@infradead.org>
Cc: linux-mm@kvack.org, linux-arch@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org, linux-csky@vger.kernel.org,
	linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev,
	linux-m68k@lists.linux-m68k.org, linux-mips@vger.kernel.org,
	linux-openrisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
	linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org,
	linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-um@lists.infradead.org, xen-devel@lists.xenproject.org,
	kvm@vger.kernel.org,
	"Vishal Moola (Oracle)" <vishal.moola@gmail.com>,
	David Hildenbrand <david@redhat.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>
Subject: [PATCH v2 03/34] s390: Use pt_frag_refcount for pagetables
Date: Mon,  1 May 2023 12:27:58 -0700	[thread overview]
Message-ID: <20230501192829.17086-4-vishal.moola@gmail.com> (raw)
In-Reply-To: <20230501192829.17086-1-vishal.moola@gmail.com>

s390 currently uses _refcount to identify fragmented page tables.
The page table struct already has a member pt_frag_refcount used by
powerpc, so have s390 use that instead of the _refcount field as well.
This improves the safety for _refcount and the page table tracking.

This also allows us to simplify the tracking since we can once again use
the lower byte of pt_frag_refcount instead of the upper byte of _refcount.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 arch/s390/mm/pgalloc.c | 38 +++++++++++++++-----------------------
 1 file changed, 15 insertions(+), 23 deletions(-)

diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 66ab68db9842..6b99932abc66 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -182,20 +182,17 @@ void page_table_free_pgste(struct page *page)
  * As follows from the above, no unallocated or fully allocated parent
  * pages are contained in mm_context_t::pgtable_list.
  *
- * The upper byte (bits 24-31) of the parent page _refcount is used
+ * The lower byte (bits 0-7) of the parent page pt_frag_refcount is used
  * for tracking contained 2KB-pgtables and has the following format:
  *
  *   PP  AA
- * 01234567    upper byte (bits 24-31) of struct page::_refcount
+ * 01234567    upper byte (bits 0-7) of struct page::pt_frag_refcount
  *   ||  ||
  *   ||  |+--- upper 2KB-pgtable is allocated
  *   ||  +---- lower 2KB-pgtable is allocated
  *   |+------- upper 2KB-pgtable is pending for removal
  *   +-------- lower 2KB-pgtable is pending for removal
  *
- * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
- * using _refcount is possible).
- *
  * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
  * The parent page is either:
  *   - added to mm_context_t::pgtable_list in case the second half of the
@@ -243,11 +240,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 		if (!list_empty(&mm->context.pgtable_list)) {
 			page = list_first_entry(&mm->context.pgtable_list,
 						struct page, lru);
-			mask = atomic_read(&page->_refcount) >> 24;
+			mask = atomic_read(&page->pt_frag_refcount);
 			/*
 			 * The pending removal bits must also be checked.
 			 * Failure to do so might lead to an impossible
-			 * value of (i.e 0x13 or 0x23) written to _refcount.
+			 * value of (i.e 0x13 or 0x23) written to
+			 * pt_frag_refcount.
 			 * Such values violate the assumption that pending and
 			 * allocation bits are mutually exclusive, and the rest
 			 * of the code unrails as result. That could lead to
@@ -259,8 +257,8 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 				bit = mask & 1;		/* =1 -> second 2K */
 				if (bit)
 					table += PTRS_PER_PTE;
-				atomic_xor_bits(&page->_refcount,
-							0x01U << (bit + 24));
+				atomic_xor_bits(&page->pt_frag_refcount,
+							0x01U << bit);
 				list_del(&page->lru);
 			}
 		}
@@ -281,12 +279,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 	table = (unsigned long *) page_to_virt(page);
 	if (mm_alloc_pgste(mm)) {
 		/* Return 4K page table with PGSTEs */
-		atomic_xor_bits(&page->_refcount, 0x03U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
 	} else {
 		/* Return the first 2K fragment of the page */
-		atomic_xor_bits(&page->_refcount, 0x01U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x01U);
 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
 		spin_lock_bh(&mm->context.lock);
 		list_add(&page->lru, &mm->context.pgtable_list);
@@ -323,22 +321,19 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
 		 * will happen outside of the critical section from this
 		 * function or from __tlb_remove_table()
 		 */
-		mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 		if (mask & 0x03U)
 			list_add(&page->lru, &mm->context.pgtable_list);
 		else
 			list_del(&page->lru);
 		spin_unlock_bh(&mm->context.lock);
-		mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x10U << bit);
 		if (mask != 0x00U)
 			return;
 		half = 0x01U << bit;
 	} else {
 		half = 0x03U;
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 	}
 
 	page_table_release_check(page, table, half, mask);
@@ -368,8 +363,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
 	 * outside of the critical section from __tlb_remove_table() or from
 	 * page_table_free()
 	 */
-	mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-	mask >>= 24;
+	mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 	if (mask & 0x03U)
 		list_add_tail(&page->lru, &mm->context.pgtable_list);
 	else
@@ -391,14 +385,12 @@ void __tlb_remove_table(void *_table)
 		return;
 	case 0x01U:	/* lower 2K of a 4K page table */
 	case 0x02U:	/* higher 2K of a 4K page table */
-		mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, mask << 4);
 		if (mask != 0x00U)
 			return;
 		break;
 	case 0x03U:	/* 4K page table with pgstes */
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		break;
 	}
 
-- 
2.39.2


_______________________________________________
linux-um mailing list
linux-um@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-um

WARNING: multiple messages have this Message-ID (diff)
From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Matthew Wilcox <willy@infradead.org>
Cc: linux-mm@kvack.org, linux-arch@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org, linux-csky@vger.kernel.org,
	linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev,
	linux-m68k@lists.linux-m68k.org, linux-mips@vger.kernel.org,
	linux-openrisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
	linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org,
	linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-um@lists.infradead.org, xen-devel@lists.xenproject.org,
	kvm@vger.kernel.org,
	"Vishal Moola (Oracle)" <vishal.moola@gmail.com>,
	David Hildenbrand <david@redhat.com>,
	Claudio Imbrenda <imbrenda@linux.ibm.com>
Subject: [PATCH v2 03/34] s390: Use pt_frag_refcount for pagetables
Date: Mon,  1 May 2023 12:27:58 -0700	[thread overview]
Message-ID: <20230501192829.17086-4-vishal.moola@gmail.com> (raw)
In-Reply-To: <20230501192829.17086-1-vishal.moola@gmail.com>

s390 currently uses _refcount to identify fragmented page tables.
The page table struct already has a member pt_frag_refcount used by
powerpc, so have s390 use that instead of the _refcount field as well.
This improves the safety for _refcount and the page table tracking.

This also allows us to simplify the tracking since we can once again use
the lower byte of pt_frag_refcount instead of the upper byte of _refcount.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 arch/s390/mm/pgalloc.c | 38 +++++++++++++++-----------------------
 1 file changed, 15 insertions(+), 23 deletions(-)

diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 66ab68db9842..6b99932abc66 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -182,20 +182,17 @@ void page_table_free_pgste(struct page *page)
  * As follows from the above, no unallocated or fully allocated parent
  * pages are contained in mm_context_t::pgtable_list.
  *
- * The upper byte (bits 24-31) of the parent page _refcount is used
+ * The lower byte (bits 0-7) of the parent page pt_frag_refcount is used
  * for tracking contained 2KB-pgtables and has the following format:
  *
  *   PP  AA
- * 01234567    upper byte (bits 24-31) of struct page::_refcount
+ * 01234567    upper byte (bits 0-7) of struct page::pt_frag_refcount
  *   ||  ||
  *   ||  |+--- upper 2KB-pgtable is allocated
  *   ||  +---- lower 2KB-pgtable is allocated
  *   |+------- upper 2KB-pgtable is pending for removal
  *   +-------- lower 2KB-pgtable is pending for removal
  *
- * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
- * using _refcount is possible).
- *
  * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
  * The parent page is either:
  *   - added to mm_context_t::pgtable_list in case the second half of the
@@ -243,11 +240,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 		if (!list_empty(&mm->context.pgtable_list)) {
 			page = list_first_entry(&mm->context.pgtable_list,
 						struct page, lru);
-			mask = atomic_read(&page->_refcount) >> 24;
+			mask = atomic_read(&page->pt_frag_refcount);
 			/*
 			 * The pending removal bits must also be checked.
 			 * Failure to do so might lead to an impossible
-			 * value of (i.e 0x13 or 0x23) written to _refcount.
+			 * value of (i.e 0x13 or 0x23) written to
+			 * pt_frag_refcount.
 			 * Such values violate the assumption that pending and
 			 * allocation bits are mutually exclusive, and the rest
 			 * of the code unrails as result. That could lead to
@@ -259,8 +257,8 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 				bit = mask & 1;		/* =1 -> second 2K */
 				if (bit)
 					table += PTRS_PER_PTE;
-				atomic_xor_bits(&page->_refcount,
-							0x01U << (bit + 24));
+				atomic_xor_bits(&page->pt_frag_refcount,
+							0x01U << bit);
 				list_del(&page->lru);
 			}
 		}
@@ -281,12 +279,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 	table = (unsigned long *) page_to_virt(page);
 	if (mm_alloc_pgste(mm)) {
 		/* Return 4K page table with PGSTEs */
-		atomic_xor_bits(&page->_refcount, 0x03U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
 	} else {
 		/* Return the first 2K fragment of the page */
-		atomic_xor_bits(&page->_refcount, 0x01U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x01U);
 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
 		spin_lock_bh(&mm->context.lock);
 		list_add(&page->lru, &mm->context.pgtable_list);
@@ -323,22 +321,19 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
 		 * will happen outside of the critical section from this
 		 * function or from __tlb_remove_table()
 		 */
-		mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 		if (mask & 0x03U)
 			list_add(&page->lru, &mm->context.pgtable_list);
 		else
 			list_del(&page->lru);
 		spin_unlock_bh(&mm->context.lock);
-		mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x10U << bit);
 		if (mask != 0x00U)
 			return;
 		half = 0x01U << bit;
 	} else {
 		half = 0x03U;
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 	}
 
 	page_table_release_check(page, table, half, mask);
@@ -368,8 +363,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
 	 * outside of the critical section from __tlb_remove_table() or from
 	 * page_table_free()
 	 */
-	mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-	mask >>= 24;
+	mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 	if (mask & 0x03U)
 		list_add_tail(&page->lru, &mm->context.pgtable_list);
 	else
@@ -391,14 +385,12 @@ void __tlb_remove_table(void *_table)
 		return;
 	case 0x01U:	/* lower 2K of a 4K page table */
 	case 0x02U:	/* higher 2K of a 4K page table */
-		mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, mask << 4);
 		if (mask != 0x00U)
 			return;
 		break;
 	case 0x03U:	/* 4K page table with pgstes */
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		break;
 	}
 
-- 
2.39.2


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

WARNING: multiple messages have this Message-ID (diff)
From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Matthew Wilcox <willy@infradead.org>
Cc: linux-arch@vger.kernel.org, linux-s390@vger.kernel.org,
	Claudio Imbrenda <imbrenda@linux.ibm.com>,
	kvm@vger.kernel.org, linux-openrisc@vger.kernel.org,
	linux-hexagon@vger.kernel.org, linux-sh@vger.kernel.org,
	linux-um@lists.infradead.org, linux-mips@vger.kernel.org,
	linux-csky@vger.kernel.org,
	"Vishal Moola \(Oracle\)" <vishal.moola@gmail.com>,
	linux-mm@kvack.org, linux-m68k@lists.linux-m68k.org,
	loongarch@lists.linux.dev, sparclinux@vger.kernel.org,
	xen-devel@lists.xenproject.org, linux-riscv@lists.infradead.org,
	David Hildenbrand <david@redhat.com>,
	linuxppc-dev@lists.ozlabs.org,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH v2 03/34] s390: Use pt_frag_refcount for pagetables
Date: Mon,  1 May 2023 12:27:58 -0700	[thread overview]
Message-ID: <20230501192829.17086-4-vishal.moola@gmail.com> (raw)
In-Reply-To: <20230501192829.17086-1-vishal.moola@gmail.com>

s390 currently uses _refcount to identify fragmented page tables.
The page table struct already has a member pt_frag_refcount used by
powerpc, so have s390 use that instead of the _refcount field as well.
This improves the safety for _refcount and the page table tracking.

This also allows us to simplify the tracking since we can once again use
the lower byte of pt_frag_refcount instead of the upper byte of _refcount.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 arch/s390/mm/pgalloc.c | 38 +++++++++++++++-----------------------
 1 file changed, 15 insertions(+), 23 deletions(-)

diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 66ab68db9842..6b99932abc66 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -182,20 +182,17 @@ void page_table_free_pgste(struct page *page)
  * As follows from the above, no unallocated or fully allocated parent
  * pages are contained in mm_context_t::pgtable_list.
  *
- * The upper byte (bits 24-31) of the parent page _refcount is used
+ * The lower byte (bits 0-7) of the parent page pt_frag_refcount is used
  * for tracking contained 2KB-pgtables and has the following format:
  *
  *   PP  AA
- * 01234567    upper byte (bits 24-31) of struct page::_refcount
+ * 01234567    upper byte (bits 0-7) of struct page::pt_frag_refcount
  *   ||  ||
  *   ||  |+--- upper 2KB-pgtable is allocated
  *   ||  +---- lower 2KB-pgtable is allocated
  *   |+------- upper 2KB-pgtable is pending for removal
  *   +-------- lower 2KB-pgtable is pending for removal
  *
- * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
- * using _refcount is possible).
- *
  * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
  * The parent page is either:
  *   - added to mm_context_t::pgtable_list in case the second half of the
@@ -243,11 +240,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 		if (!list_empty(&mm->context.pgtable_list)) {
 			page = list_first_entry(&mm->context.pgtable_list,
 						struct page, lru);
-			mask = atomic_read(&page->_refcount) >> 24;
+			mask = atomic_read(&page->pt_frag_refcount);
 			/*
 			 * The pending removal bits must also be checked.
 			 * Failure to do so might lead to an impossible
-			 * value of (i.e 0x13 or 0x23) written to _refcount.
+			 * value of (i.e 0x13 or 0x23) written to
+			 * pt_frag_refcount.
 			 * Such values violate the assumption that pending and
 			 * allocation bits are mutually exclusive, and the rest
 			 * of the code unrails as result. That could lead to
@@ -259,8 +257,8 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 				bit = mask & 1;		/* =1 -> second 2K */
 				if (bit)
 					table += PTRS_PER_PTE;
-				atomic_xor_bits(&page->_refcount,
-							0x01U << (bit + 24));
+				atomic_xor_bits(&page->pt_frag_refcount,
+							0x01U << bit);
 				list_del(&page->lru);
 			}
 		}
@@ -281,12 +279,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 	table = (unsigned long *) page_to_virt(page);
 	if (mm_alloc_pgste(mm)) {
 		/* Return 4K page table with PGSTEs */
-		atomic_xor_bits(&page->_refcount, 0x03U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
 	} else {
 		/* Return the first 2K fragment of the page */
-		atomic_xor_bits(&page->_refcount, 0x01U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x01U);
 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
 		spin_lock_bh(&mm->context.lock);
 		list_add(&page->lru, &mm->context.pgtable_list);
@@ -323,22 +321,19 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
 		 * will happen outside of the critical section from this
 		 * function or from __tlb_remove_table()
 		 */
-		mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 		if (mask & 0x03U)
 			list_add(&page->lru, &mm->context.pgtable_list);
 		else
 			list_del(&page->lru);
 		spin_unlock_bh(&mm->context.lock);
-		mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x10U << bit);
 		if (mask != 0x00U)
 			return;
 		half = 0x01U << bit;
 	} else {
 		half = 0x03U;
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 	}
 
 	page_table_release_check(page, table, half, mask);
@@ -368,8 +363,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
 	 * outside of the critical section from __tlb_remove_table() or from
 	 * page_table_free()
 	 */
-	mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-	mask >>= 24;
+	mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 	if (mask & 0x03U)
 		list_add_tail(&page->lru, &mm->context.pgtable_list);
 	else
@@ -391,14 +385,12 @@ void __tlb_remove_table(void *_table)
 		return;
 	case 0x01U:	/* lower 2K of a 4K page table */
 	case 0x02U:	/* higher 2K of a 4K page table */
-		mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, mask << 4);
 		if (mask != 0x00U)
 			return;
 		break;
 	case 0x03U:	/* 4K page table with pgstes */
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		break;
 	}
 
-- 
2.39.2


  parent reply	other threads:[~2023-05-01 19:28 UTC|newest]

Thread overview: 295+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-01 19:27 [PATCH v2 00/34] Split ptdesc from struct page Vishal Moola (Oracle)
2023-05-01 19:27 ` Vishal Moola (Oracle)
2023-05-01 19:27 ` Vishal Moola (Oracle)
2023-05-01 19:27 ` Vishal Moola (Oracle)
2023-05-01 19:27 ` Vishal Moola (Oracle)
2023-05-01 19:27 ` Vishal Moola (Oracle)
2023-05-01 19:27 ` [PATCH v2 01/34] mm: Add PAGE_TYPE_OP folio functions Vishal Moola (Oracle)
2023-05-01 19:27   ` Vishal Moola (Oracle)
2023-05-01 19:27   ` Vishal Moola (Oracle)
2023-05-01 19:27   ` Vishal Moola (Oracle)
2023-05-01 19:27   ` Vishal Moola (Oracle)
2023-05-25  8:55   ` Mike Rapoport
2023-05-25  8:55     ` Mike Rapoport
2023-05-25  8:55     ` Mike Rapoport
2023-05-25  8:55     ` Mike Rapoport
2023-05-25  8:55     ` Mike Rapoport
2023-05-25 17:00     ` Vishal Moola
2023-05-25 17:00       ` Vishal Moola
2023-05-25 17:00       ` Vishal Moola
2023-05-25 17:00       ` Vishal Moola
2023-05-25 17:00       ` Vishal Moola
2023-05-25 17:00       ` Vishal Moola
2023-05-25 20:20       ` Mike Rapoport
2023-05-25 20:20         ` Mike Rapoport
2023-05-25 20:20         ` Mike Rapoport
2023-05-25 20:20         ` Mike Rapoport
2023-05-25 20:20         ` Mike Rapoport
2023-05-25 20:20         ` Mike Rapoport
2023-05-25 20:38         ` Vishal Moola
2023-05-25 20:38           ` Vishal Moola
2023-05-25 20:38           ` Vishal Moola
2023-05-25 20:38           ` Vishal Moola
2023-05-25 20:38           ` Vishal Moola
2023-05-25 20:57           ` Matthew Wilcox
2023-05-25 20:57             ` Matthew Wilcox
2023-05-25 20:57             ` Matthew Wilcox
2023-05-25 20:57             ` Matthew Wilcox
2023-05-25 20:57             ` Matthew Wilcox
2023-05-01 19:27 ` [PATCH v2 02/34] s390: Use _pt_s390_gaddr for gmap address tracking Vishal Moola (Oracle)
2023-05-01 19:27   ` Vishal Moola (Oracle)
2023-05-01 19:27   ` Vishal Moola (Oracle)
2023-05-01 19:27   ` Vishal Moola (Oracle)
2023-05-01 19:27   ` Vishal Moola (Oracle)
2023-05-25  8:58   ` Mike Rapoport
2023-05-25  8:58     ` Mike Rapoport
2023-05-25  8:58     ` Mike Rapoport
2023-05-25  8:58     ` Mike Rapoport
2023-05-25  8:58     ` Mike Rapoport
2023-05-25 17:12     ` Vishal Moola
2023-05-25 17:12       ` Vishal Moola
2023-05-25 17:12       ` Vishal Moola
2023-05-25 17:12       ` Vishal Moola
2023-05-25 17:12       ` Vishal Moola
2023-05-25 17:12       ` Vishal Moola
2023-05-01 19:27 ` Vishal Moola (Oracle) [this message]
2023-05-01 19:27   ` [PATCH v2 03/34] s390: Use pt_frag_refcount for pagetables Vishal Moola (Oracle)
2023-05-01 19:27   ` Vishal Moola (Oracle)
2023-05-01 19:27   ` Vishal Moola (Oracle)
2023-05-01 19:27   ` Vishal Moola (Oracle)
2023-05-01 19:27 ` [PATCH v2 04/34] pgtable: Create struct ptdesc Vishal Moola (Oracle)
2023-05-01 19:27   ` Vishal Moola (Oracle)
2023-05-01 19:27   ` Vishal Moola (Oracle)
2023-05-01 19:27   ` Vishal Moola (Oracle)
2023-05-01 19:27   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 05/34] mm: add utility functions for ptdesc Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-25  9:09   ` Mike Rapoport
2023-05-25  9:09     ` Mike Rapoport
2023-05-25  9:09     ` Mike Rapoport
2023-05-25  9:09     ` Mike Rapoport
2023-05-25  9:09     ` Mike Rapoport
2023-05-25 18:04     ` Vishal Moola
2023-05-25 18:04       ` Vishal Moola
2023-05-25 18:04       ` Vishal Moola
2023-05-25 18:04       ` Vishal Moola
2023-05-25 18:04       ` Vishal Moola
2023-05-25 18:04       ` Vishal Moola
2023-05-25 20:25       ` Mike Rapoport
2023-05-25 20:25         ` Mike Rapoport
2023-05-25 20:25         ` Mike Rapoport
2023-05-25 20:25         ` Mike Rapoport
2023-05-25 20:25         ` Mike Rapoport
2023-05-25 20:25         ` Mike Rapoport
2023-05-25 20:53         ` Vishal Moola
2023-05-25 20:53           ` Vishal Moola
2023-05-25 20:53           ` Vishal Moola
2023-05-25 20:53           ` Vishal Moola
2023-05-25 20:53           ` Vishal Moola
2023-05-27 10:41           ` Mike Rapoport
2023-05-27 10:41             ` Mike Rapoport
2023-05-27 10:41             ` Mike Rapoport
2023-05-27 10:41             ` Mike Rapoport
2023-05-27 10:41             ` Mike Rapoport
2023-05-27 15:09             ` Matthew Wilcox
2023-05-27 15:09               ` Matthew Wilcox
2023-05-27 15:09               ` Matthew Wilcox
2023-05-27 15:09               ` Matthew Wilcox
2023-05-27 15:09               ` Matthew Wilcox
2023-05-28  5:47               ` Mike Rapoport
2023-05-28  5:47                 ` Mike Rapoport
2023-05-28  5:47                 ` Mike Rapoport
2023-05-28  5:47                 ` Mike Rapoport
2023-05-28  5:47                 ` Mike Rapoport
2023-05-01 19:28 ` [PATCH v2 06/34] mm: Convert pmd_pgtable_page() to pmd_ptdesc() Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 07/34] mm: Convert ptlock_alloc() to use ptdescs Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 08/34] mm: Convert ptlock_ptr() " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 09/34] mm: Convert pmd_ptlock_init() " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 10/34] mm: Convert ptlock_init() " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 11/34] mm: Convert pmd_ptlock_free() " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 12/34] mm: Convert ptlock_free() " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 13/34] mm: Create ptdesc equivalents for pgtable_{pte,pmd}_page_{ctor,dtor} Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-25  9:19   ` Mike Rapoport
2023-05-25  9:19     ` Mike Rapoport
2023-05-25  9:19     ` Mike Rapoport
2023-05-25  9:19     ` Mike Rapoport
2023-05-25  9:19     ` Mike Rapoport
2023-05-25 18:17     ` Vishal Moola
2023-05-25 18:17       ` Vishal Moola
2023-05-25 18:17       ` Vishal Moola
2023-05-25 18:17       ` Vishal Moola
2023-05-25 18:17       ` Vishal Moola
2023-05-25 18:17       ` Vishal Moola
2023-05-01 19:28 ` [PATCH v2 14/34] powerpc: Convert various functions to use ptdescs Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 15/34] x86: " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 16/34] s390: Convert various gmap " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 17/34] s390: Convert various pgalloc " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 18/34] mm: Remove page table members from struct page Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 19/34] pgalloc: Convert various functions to use ptdescs Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 20/34] arm: " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 21/34] arm64: " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-02  1:48   ` kernel test robot
2023-05-02  1:48     ` kernel test robot
2023-05-02  1:48     ` kernel test robot
2023-05-02  1:48     ` kernel test robot
2023-05-02  1:48     ` kernel test robot
2023-05-02  2:21   ` kernel test robot
2023-05-02  2:21     ` kernel test robot
2023-05-02  2:21     ` kernel test robot
2023-05-02  2:21     ` kernel test robot
2023-05-02  2:21     ` kernel test robot
2023-05-01 19:28 ` [PATCH v2 22/34] csky: Convert __pte_free_tlb() " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 23/34] hexagon: " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 24/34] loongarch: Convert various functions " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 25/34] m68k: " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 26/34] mips: " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 27/34] nios2: Convert __pte_free_tlb() " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 28/34] openrisc: " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 29/34] riscv: Convert alloc_{pmd, pte}_late() " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 20:59   ` Palmer Dabbelt
2023-05-01 20:59     ` Palmer Dabbelt
2023-05-01 20:59     ` Palmer Dabbelt
2023-05-01 20:59     ` Palmer Dabbelt
2023-05-01 20:59     ` Palmer Dabbelt
2023-05-01 20:59     ` Palmer Dabbelt
2023-05-01 19:28 ` [PATCH v2 30/34] sh: Convert pte_free_tlb() " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-06 11:35   ` John Paul Adrian Glaubitz
2023-05-06 11:35     ` John Paul Adrian Glaubitz
2023-05-06 11:35     ` John Paul Adrian Glaubitz
2023-05-06 11:35     ` John Paul Adrian Glaubitz
2023-05-06 11:35     ` John Paul Adrian Glaubitz
2023-05-15 19:11     ` Vishal Moola
2023-05-15 19:11       ` Vishal Moola
2023-05-15 19:11       ` Vishal Moola
2023-05-15 19:11       ` Vishal Moola
2023-05-15 19:11       ` Vishal Moola
2023-05-15 19:11       ` Vishal Moola
2023-05-01 19:28 ` [PATCH v2 31/34] sparc64: Convert various functions " Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 32/34] sparc: Convert pgtable_pte_page_{ctor, dtor}() to ptdesc equivalents Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 33/34] um: Convert {pmd, pte}_free_tlb() to use ptdescs Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 34/34] mm: Remove pgtable_{pmd, pte}_page_{ctor, dtor}() wrappers Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-01 19:28   ` Vishal Moola (Oracle)
2023-05-18 12:12 ` [PATCH v2 00/34] Split ptdesc from struct page Jason Gunthorpe
2023-05-18 12:12   ` Jason Gunthorpe
2023-05-18 12:12   ` Jason Gunthorpe
2023-05-18 12:12   ` Jason Gunthorpe
2023-05-18 12:12   ` Jason Gunthorpe
2023-05-18 12:12   ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230501192829.17086-4-vishal.moola@gmail.com \
    --to=vishal.moola@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=david@redhat.com \
    --cc=imbrenda@linux.ibm.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-csky@vger.kernel.org \
    --cc=linux-hexagon@vger.kernel.org \
    --cc=linux-m68k@lists.linux-m68k.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-openrisc@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=linux-sh@vger.kernel.org \
    --cc=linux-um@lists.infradead.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=loongarch@lists.linux.dev \
    --cc=sparclinux@vger.kernel.org \
    --cc=willy@infradead.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.