All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Matthew Wilcox <willy@infradead.org>
Cc: linux-mm@kvack.org, linux-arch@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org, linux-csky@vger.kernel.org,
	linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev,
	linux-m68k@lists.linux-m68k.org, linux-mips@vger.kernel.org,
	linux-openrisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
	linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org,
	linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-um@lists.infradead.org, xen-devel@lists.xenproject.org,
	kvm@vger.kernel.org,
	"Vishal Moola (Oracle)" <vishal.moola@gmail.com>
Subject: [PATCH 02/33] s390: Use pt_frag_refcount for pagetables
Date: Mon, 17 Apr 2023 13:50:17 -0700	[thread overview]
Message-ID: <20230417205048.15870-3-vishal.moola@gmail.com> (raw)
In-Reply-To: <20230417205048.15870-1-vishal.moola@gmail.com>

s390 currently uses _refcount to identify fragmented page tables.
The page table struct already has a member pt_frag_refcount used by
powerpc, so have s390 use that instead of the _refcount field as well.
This improves the safety for _refcount and the page table tracking.

This also allows us to simplify the tracking since we can once again use
the lower byte of pt_frag_refcount instead of the upper byte of _refcount.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 arch/s390/mm/pgalloc.c | 38 +++++++++++++++-----------------------
 1 file changed, 15 insertions(+), 23 deletions(-)

diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 66ab68db9842..6b99932abc66 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -182,20 +182,17 @@ void page_table_free_pgste(struct page *page)
  * As follows from the above, no unallocated or fully allocated parent
  * pages are contained in mm_context_t::pgtable_list.
  *
- * The upper byte (bits 24-31) of the parent page _refcount is used
+ * The lower byte (bits 0-7) of the parent page pt_frag_refcount is used
  * for tracking contained 2KB-pgtables and has the following format:
  *
  *   PP  AA
- * 01234567    upper byte (bits 24-31) of struct page::_refcount
+ * 01234567    upper byte (bits 0-7) of struct page::pt_frag_refcount
  *   ||  ||
  *   ||  |+--- upper 2KB-pgtable is allocated
  *   ||  +---- lower 2KB-pgtable is allocated
  *   |+------- upper 2KB-pgtable is pending for removal
  *   +-------- lower 2KB-pgtable is pending for removal
  *
- * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
- * using _refcount is possible).
- *
  * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
  * The parent page is either:
  *   - added to mm_context_t::pgtable_list in case the second half of the
@@ -243,11 +240,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 		if (!list_empty(&mm->context.pgtable_list)) {
 			page = list_first_entry(&mm->context.pgtable_list,
 						struct page, lru);
-			mask = atomic_read(&page->_refcount) >> 24;
+			mask = atomic_read(&page->pt_frag_refcount);
 			/*
 			 * The pending removal bits must also be checked.
 			 * Failure to do so might lead to an impossible
-			 * value of (i.e 0x13 or 0x23) written to _refcount.
+			 * value of (i.e 0x13 or 0x23) written to
+			 * pt_frag_refcount.
 			 * Such values violate the assumption that pending and
 			 * allocation bits are mutually exclusive, and the rest
 			 * of the code unrails as result. That could lead to
@@ -259,8 +257,8 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 				bit = mask & 1;		/* =1 -> second 2K */
 				if (bit)
 					table += PTRS_PER_PTE;
-				atomic_xor_bits(&page->_refcount,
-							0x01U << (bit + 24));
+				atomic_xor_bits(&page->pt_frag_refcount,
+							0x01U << bit);
 				list_del(&page->lru);
 			}
 		}
@@ -281,12 +279,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 	table = (unsigned long *) page_to_virt(page);
 	if (mm_alloc_pgste(mm)) {
 		/* Return 4K page table with PGSTEs */
-		atomic_xor_bits(&page->_refcount, 0x03U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
 	} else {
 		/* Return the first 2K fragment of the page */
-		atomic_xor_bits(&page->_refcount, 0x01U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x01U);
 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
 		spin_lock_bh(&mm->context.lock);
 		list_add(&page->lru, &mm->context.pgtable_list);
@@ -323,22 +321,19 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
 		 * will happen outside of the critical section from this
 		 * function or from __tlb_remove_table()
 		 */
-		mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 		if (mask & 0x03U)
 			list_add(&page->lru, &mm->context.pgtable_list);
 		else
 			list_del(&page->lru);
 		spin_unlock_bh(&mm->context.lock);
-		mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x10U << bit);
 		if (mask != 0x00U)
 			return;
 		half = 0x01U << bit;
 	} else {
 		half = 0x03U;
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 	}
 
 	page_table_release_check(page, table, half, mask);
@@ -368,8 +363,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
 	 * outside of the critical section from __tlb_remove_table() or from
 	 * page_table_free()
 	 */
-	mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-	mask >>= 24;
+	mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 	if (mask & 0x03U)
 		list_add_tail(&page->lru, &mm->context.pgtable_list);
 	else
@@ -391,14 +385,12 @@ void __tlb_remove_table(void *_table)
 		return;
 	case 0x01U:	/* lower 2K of a 4K page table */
 	case 0x02U:	/* higher 2K of a 4K page table */
-		mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, mask << 4);
 		if (mask != 0x00U)
 			return;
 		break;
 	case 0x03U:	/* 4K page table with pgstes */
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		break;
 	}
 
-- 
2.39.2


WARNING: multiple messages have this Message-ID (diff)
From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Matthew Wilcox <willy@infradead.org>
Cc: linux-mm@kvack.org, linux-arch@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org, linux-csky@vger.kernel.org,
	linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev,
	linux-m68k@lists.linux-m68k.org, linux-mips@vger.kernel.org,
	linux-openrisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
	linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org,
	linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-um@lists.infradead.org, xen-devel@lists.xenproject.org,
	kvm@vger.kernel.org,
	"Vishal Moola (Oracle)" <vishal.moola@gmail.com>
Subject: [PATCH 02/33] s390: Use pt_frag_refcount for pagetables
Date: Mon, 17 Apr 2023 13:50:17 -0700	[thread overview]
Message-ID: <20230417205048.15870-3-vishal.moola@gmail.com> (raw)
In-Reply-To: <20230417205048.15870-1-vishal.moola@gmail.com>

s390 currently uses _refcount to identify fragmented page tables.
The page table struct already has a member pt_frag_refcount used by
powerpc, so have s390 use that instead of the _refcount field as well.
This improves the safety for _refcount and the page table tracking.

This also allows us to simplify the tracking since we can once again use
the lower byte of pt_frag_refcount instead of the upper byte of _refcount.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 arch/s390/mm/pgalloc.c | 38 +++++++++++++++-----------------------
 1 file changed, 15 insertions(+), 23 deletions(-)

diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 66ab68db9842..6b99932abc66 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -182,20 +182,17 @@ void page_table_free_pgste(struct page *page)
  * As follows from the above, no unallocated or fully allocated parent
  * pages are contained in mm_context_t::pgtable_list.
  *
- * The upper byte (bits 24-31) of the parent page _refcount is used
+ * The lower byte (bits 0-7) of the parent page pt_frag_refcount is used
  * for tracking contained 2KB-pgtables and has the following format:
  *
  *   PP  AA
- * 01234567    upper byte (bits 24-31) of struct page::_refcount
+ * 01234567    upper byte (bits 0-7) of struct page::pt_frag_refcount
  *   ||  ||
  *   ||  |+--- upper 2KB-pgtable is allocated
  *   ||  +---- lower 2KB-pgtable is allocated
  *   |+------- upper 2KB-pgtable is pending for removal
  *   +-------- lower 2KB-pgtable is pending for removal
  *
- * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
- * using _refcount is possible).
- *
  * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
  * The parent page is either:
  *   - added to mm_context_t::pgtable_list in case the second half of the
@@ -243,11 +240,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 		if (!list_empty(&mm->context.pgtable_list)) {
 			page = list_first_entry(&mm->context.pgtable_list,
 						struct page, lru);
-			mask = atomic_read(&page->_refcount) >> 24;
+			mask = atomic_read(&page->pt_frag_refcount);
 			/*
 			 * The pending removal bits must also be checked.
 			 * Failure to do so might lead to an impossible
-			 * value of (i.e 0x13 or 0x23) written to _refcount.
+			 * value of (i.e 0x13 or 0x23) written to
+			 * pt_frag_refcount.
 			 * Such values violate the assumption that pending and
 			 * allocation bits are mutually exclusive, and the rest
 			 * of the code unrails as result. That could lead to
@@ -259,8 +257,8 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 				bit = mask & 1;		/* =1 -> second 2K */
 				if (bit)
 					table += PTRS_PER_PTE;
-				atomic_xor_bits(&page->_refcount,
-							0x01U << (bit + 24));
+				atomic_xor_bits(&page->pt_frag_refcount,
+							0x01U << bit);
 				list_del(&page->lru);
 			}
 		}
@@ -281,12 +279,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 	table = (unsigned long *) page_to_virt(page);
 	if (mm_alloc_pgste(mm)) {
 		/* Return 4K page table with PGSTEs */
-		atomic_xor_bits(&page->_refcount, 0x03U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
 	} else {
 		/* Return the first 2K fragment of the page */
-		atomic_xor_bits(&page->_refcount, 0x01U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x01U);
 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
 		spin_lock_bh(&mm->context.lock);
 		list_add(&page->lru, &mm->context.pgtable_list);
@@ -323,22 +321,19 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
 		 * will happen outside of the critical section from this
 		 * function or from __tlb_remove_table()
 		 */
-		mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 		if (mask & 0x03U)
 			list_add(&page->lru, &mm->context.pgtable_list);
 		else
 			list_del(&page->lru);
 		spin_unlock_bh(&mm->context.lock);
-		mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x10U << bit);
 		if (mask != 0x00U)
 			return;
 		half = 0x01U << bit;
 	} else {
 		half = 0x03U;
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 	}
 
 	page_table_release_check(page, table, half, mask);
@@ -368,8 +363,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
 	 * outside of the critical section from __tlb_remove_table() or from
 	 * page_table_free()
 	 */
-	mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-	mask >>= 24;
+	mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 	if (mask & 0x03U)
 		list_add_tail(&page->lru, &mm->context.pgtable_list);
 	else
@@ -391,14 +385,12 @@ void __tlb_remove_table(void *_table)
 		return;
 	case 0x01U:	/* lower 2K of a 4K page table */
 	case 0x02U:	/* higher 2K of a 4K page table */
-		mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, mask << 4);
 		if (mask != 0x00U)
 			return;
 		break;
 	case 0x03U:	/* 4K page table with pgstes */
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		break;
 	}
 
-- 
2.39.2


_______________________________________________
linux-um mailing list
linux-um@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-um

WARNING: multiple messages have this Message-ID (diff)
From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Matthew Wilcox <willy@infradead.org>
Cc: linux-mm@kvack.org, linux-arch@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org, linux-csky@vger.kernel.org,
	linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev,
	linux-m68k@lists.linux-m68k.org, linux-mips@vger.kernel.org,
	linux-openrisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
	linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org,
	linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-um@lists.infradead.org, xen-devel@lists.xenproject.org,
	kvm@vger.kernel.org,
	"Vishal Moola (Oracle)" <vishal.moola@gmail.com>
Subject: [PATCH 02/33] s390: Use pt_frag_refcount for pagetables
Date: Mon, 17 Apr 2023 13:50:17 -0700	[thread overview]
Message-ID: <20230417205048.15870-3-vishal.moola@gmail.com> (raw)
In-Reply-To: <20230417205048.15870-1-vishal.moola@gmail.com>

s390 currently uses _refcount to identify fragmented page tables.
The page table struct already has a member pt_frag_refcount used by
powerpc, so have s390 use that instead of the _refcount field as well.
This improves the safety for _refcount and the page table tracking.

This also allows us to simplify the tracking since we can once again use
the lower byte of pt_frag_refcount instead of the upper byte of _refcount.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 arch/s390/mm/pgalloc.c | 38 +++++++++++++++-----------------------
 1 file changed, 15 insertions(+), 23 deletions(-)

diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 66ab68db9842..6b99932abc66 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -182,20 +182,17 @@ void page_table_free_pgste(struct page *page)
  * As follows from the above, no unallocated or fully allocated parent
  * pages are contained in mm_context_t::pgtable_list.
  *
- * The upper byte (bits 24-31) of the parent page _refcount is used
+ * The lower byte (bits 0-7) of the parent page pt_frag_refcount is used
  * for tracking contained 2KB-pgtables and has the following format:
  *
  *   PP  AA
- * 01234567    upper byte (bits 24-31) of struct page::_refcount
+ * 01234567    upper byte (bits 0-7) of struct page::pt_frag_refcount
  *   ||  ||
  *   ||  |+--- upper 2KB-pgtable is allocated
  *   ||  +---- lower 2KB-pgtable is allocated
  *   |+------- upper 2KB-pgtable is pending for removal
  *   +-------- lower 2KB-pgtable is pending for removal
  *
- * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
- * using _refcount is possible).
- *
  * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
  * The parent page is either:
  *   - added to mm_context_t::pgtable_list in case the second half of the
@@ -243,11 +240,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 		if (!list_empty(&mm->context.pgtable_list)) {
 			page = list_first_entry(&mm->context.pgtable_list,
 						struct page, lru);
-			mask = atomic_read(&page->_refcount) >> 24;
+			mask = atomic_read(&page->pt_frag_refcount);
 			/*
 			 * The pending removal bits must also be checked.
 			 * Failure to do so might lead to an impossible
-			 * value of (i.e 0x13 or 0x23) written to _refcount.
+			 * value of (i.e 0x13 or 0x23) written to
+			 * pt_frag_refcount.
 			 * Such values violate the assumption that pending and
 			 * allocation bits are mutually exclusive, and the rest
 			 * of the code unrails as result. That could lead to
@@ -259,8 +257,8 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 				bit = mask & 1;		/* =1 -> second 2K */
 				if (bit)
 					table += PTRS_PER_PTE;
-				atomic_xor_bits(&page->_refcount,
-							0x01U << (bit + 24));
+				atomic_xor_bits(&page->pt_frag_refcount,
+							0x01U << bit);
 				list_del(&page->lru);
 			}
 		}
@@ -281,12 +279,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 	table = (unsigned long *) page_to_virt(page);
 	if (mm_alloc_pgste(mm)) {
 		/* Return 4K page table with PGSTEs */
-		atomic_xor_bits(&page->_refcount, 0x03U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
 	} else {
 		/* Return the first 2K fragment of the page */
-		atomic_xor_bits(&page->_refcount, 0x01U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x01U);
 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
 		spin_lock_bh(&mm->context.lock);
 		list_add(&page->lru, &mm->context.pgtable_list);
@@ -323,22 +321,19 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
 		 * will happen outside of the critical section from this
 		 * function or from __tlb_remove_table()
 		 */
-		mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 		if (mask & 0x03U)
 			list_add(&page->lru, &mm->context.pgtable_list);
 		else
 			list_del(&page->lru);
 		spin_unlock_bh(&mm->context.lock);
-		mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x10U << bit);
 		if (mask != 0x00U)
 			return;
 		half = 0x01U << bit;
 	} else {
 		half = 0x03U;
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 	}
 
 	page_table_release_check(page, table, half, mask);
@@ -368,8 +363,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
 	 * outside of the critical section from __tlb_remove_table() or from
 	 * page_table_free()
 	 */
-	mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-	mask >>= 24;
+	mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 	if (mask & 0x03U)
 		list_add_tail(&page->lru, &mm->context.pgtable_list);
 	else
@@ -391,14 +385,12 @@ void __tlb_remove_table(void *_table)
 		return;
 	case 0x01U:	/* lower 2K of a 4K page table */
 	case 0x02U:	/* higher 2K of a 4K page table */
-		mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, mask << 4);
 		if (mask != 0x00U)
 			return;
 		break;
 	case 0x03U:	/* 4K page table with pgstes */
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		break;
 	}
 
-- 
2.39.2


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

WARNING: multiple messages have this Message-ID (diff)
From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Matthew Wilcox <willy@infradead.org>
Cc: linux-mm@kvack.org, linux-arch@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org, linux-csky@vger.kernel.org,
	linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev,
	linux-m68k@lists.linux-m68k.org, linux-mips@vger.kernel.org,
	linux-openrisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
	linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org,
	linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
	linux-um@lists.infradead.org, xen-devel@lists.xenproject.org,
	kvm@vger.kernel.org,
	"Vishal Moola (Oracle)" <vishal.moola@gmail.com>
Subject: [PATCH 02/33] s390: Use pt_frag_refcount for pagetables
Date: Mon, 17 Apr 2023 13:50:17 -0700	[thread overview]
Message-ID: <20230417205048.15870-3-vishal.moola@gmail.com> (raw)
In-Reply-To: <20230417205048.15870-1-vishal.moola@gmail.com>

s390 currently uses _refcount to identify fragmented page tables.
The page table struct already has a member pt_frag_refcount used by
powerpc, so have s390 use that instead of the _refcount field as well.
This improves the safety for _refcount and the page table tracking.

This also allows us to simplify the tracking since we can once again use
the lower byte of pt_frag_refcount instead of the upper byte of _refcount.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 arch/s390/mm/pgalloc.c | 38 +++++++++++++++-----------------------
 1 file changed, 15 insertions(+), 23 deletions(-)

diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 66ab68db9842..6b99932abc66 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -182,20 +182,17 @@ void page_table_free_pgste(struct page *page)
  * As follows from the above, no unallocated or fully allocated parent
  * pages are contained in mm_context_t::pgtable_list.
  *
- * The upper byte (bits 24-31) of the parent page _refcount is used
+ * The lower byte (bits 0-7) of the parent page pt_frag_refcount is used
  * for tracking contained 2KB-pgtables and has the following format:
  *
  *   PP  AA
- * 01234567    upper byte (bits 24-31) of struct page::_refcount
+ * 01234567    upper byte (bits 0-7) of struct page::pt_frag_refcount
  *   ||  ||
  *   ||  |+--- upper 2KB-pgtable is allocated
  *   ||  +---- lower 2KB-pgtable is allocated
  *   |+------- upper 2KB-pgtable is pending for removal
  *   +-------- lower 2KB-pgtable is pending for removal
  *
- * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
- * using _refcount is possible).
- *
  * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
  * The parent page is either:
  *   - added to mm_context_t::pgtable_list in case the second half of the
@@ -243,11 +240,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 		if (!list_empty(&mm->context.pgtable_list)) {
 			page = list_first_entry(&mm->context.pgtable_list,
 						struct page, lru);
-			mask = atomic_read(&page->_refcount) >> 24;
+			mask = atomic_read(&page->pt_frag_refcount);
 			/*
 			 * The pending removal bits must also be checked.
 			 * Failure to do so might lead to an impossible
-			 * value of (i.e 0x13 or 0x23) written to _refcount.
+			 * value of (i.e 0x13 or 0x23) written to
+			 * pt_frag_refcount.
 			 * Such values violate the assumption that pending and
 			 * allocation bits are mutually exclusive, and the rest
 			 * of the code unrails as result. That could lead to
@@ -259,8 +257,8 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 				bit = mask & 1;		/* =1 -> second 2K */
 				if (bit)
 					table += PTRS_PER_PTE;
-				atomic_xor_bits(&page->_refcount,
-							0x01U << (bit + 24));
+				atomic_xor_bits(&page->pt_frag_refcount,
+							0x01U << bit);
 				list_del(&page->lru);
 			}
 		}
@@ -281,12 +279,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 	table = (unsigned long *) page_to_virt(page);
 	if (mm_alloc_pgste(mm)) {
 		/* Return 4K page table with PGSTEs */
-		atomic_xor_bits(&page->_refcount, 0x03U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
 	} else {
 		/* Return the first 2K fragment of the page */
-		atomic_xor_bits(&page->_refcount, 0x01U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x01U);
 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
 		spin_lock_bh(&mm->context.lock);
 		list_add(&page->lru, &mm->context.pgtable_list);
@@ -323,22 +321,19 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
 		 * will happen outside of the critical section from this
 		 * function or from __tlb_remove_table()
 		 */
-		mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 		if (mask & 0x03U)
 			list_add(&page->lru, &mm->context.pgtable_list);
 		else
 			list_del(&page->lru);
 		spin_unlock_bh(&mm->context.lock);
-		mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x10U << bit);
 		if (mask != 0x00U)
 			return;
 		half = 0x01U << bit;
 	} else {
 		half = 0x03U;
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 	}
 
 	page_table_release_check(page, table, half, mask);
@@ -368,8 +363,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
 	 * outside of the critical section from __tlb_remove_table() or from
 	 * page_table_free()
 	 */
-	mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-	mask >>= 24;
+	mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 	if (mask & 0x03U)
 		list_add_tail(&page->lru, &mm->context.pgtable_list);
 	else
@@ -391,14 +385,12 @@ void __tlb_remove_table(void *_table)
 		return;
 	case 0x01U:	/* lower 2K of a 4K page table */
 	case 0x02U:	/* higher 2K of a 4K page table */
-		mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, mask << 4);
 		if (mask != 0x00U)
 			return;
 		break;
 	case 0x03U:	/* 4K page table with pgstes */
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		break;
 	}
 
-- 
2.39.2


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

WARNING: multiple messages have this Message-ID (diff)
From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Matthew Wilcox <willy@infradead.org>
Cc: linux-arch@vger.kernel.org, linux-s390@vger.kernel.org,
	kvm@vger.kernel.org, linux-openrisc@vger.kernel.org,
	linux-hexagon@vger.kernel.org, linux-sh@vger.kernel.org,
	linux-um@lists.infradead.org, linux-mips@vger.kernel.org,
	linux-csky@vger.kernel.org,
	"Vishal Moola \(Oracle\)" <vishal.moola@gmail.com>,
	linux-mm@kvack.org, linux-m68k@lists.linux-m68k.org,
	loongarch@lists.linux.dev, sparclinux@vger.kernel.org,
	xen-devel@lists.xenproject.org, linux-riscv@lists.infradead.org,
	linuxppc-dev@lists.ozlabs.org,
	linux-arm-kernel@lists.infradead.org
Subject: [PATCH 02/33] s390: Use pt_frag_refcount for pagetables
Date: Mon, 17 Apr 2023 13:50:17 -0700	[thread overview]
Message-ID: <20230417205048.15870-3-vishal.moola@gmail.com> (raw)
In-Reply-To: <20230417205048.15870-1-vishal.moola@gmail.com>

s390 currently uses _refcount to identify fragmented page tables.
The page table struct already has a member pt_frag_refcount used by
powerpc, so have s390 use that instead of the _refcount field as well.
This improves the safety for _refcount and the page table tracking.

This also allows us to simplify the tracking since we can once again use
the lower byte of pt_frag_refcount instead of the upper byte of _refcount.

Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
 arch/s390/mm/pgalloc.c | 38 +++++++++++++++-----------------------
 1 file changed, 15 insertions(+), 23 deletions(-)

diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 66ab68db9842..6b99932abc66 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -182,20 +182,17 @@ void page_table_free_pgste(struct page *page)
  * As follows from the above, no unallocated or fully allocated parent
  * pages are contained in mm_context_t::pgtable_list.
  *
- * The upper byte (bits 24-31) of the parent page _refcount is used
+ * The lower byte (bits 0-7) of the parent page pt_frag_refcount is used
  * for tracking contained 2KB-pgtables and has the following format:
  *
  *   PP  AA
- * 01234567    upper byte (bits 24-31) of struct page::_refcount
+ * 01234567    upper byte (bits 0-7) of struct page::pt_frag_refcount
  *   ||  ||
  *   ||  |+--- upper 2KB-pgtable is allocated
  *   ||  +---- lower 2KB-pgtable is allocated
  *   |+------- upper 2KB-pgtable is pending for removal
  *   +-------- lower 2KB-pgtable is pending for removal
  *
- * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why
- * using _refcount is possible).
- *
  * When 2KB-pgtable is allocated the corresponding AA bit is set to 1.
  * The parent page is either:
  *   - added to mm_context_t::pgtable_list in case the second half of the
@@ -243,11 +240,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 		if (!list_empty(&mm->context.pgtable_list)) {
 			page = list_first_entry(&mm->context.pgtable_list,
 						struct page, lru);
-			mask = atomic_read(&page->_refcount) >> 24;
+			mask = atomic_read(&page->pt_frag_refcount);
 			/*
 			 * The pending removal bits must also be checked.
 			 * Failure to do so might lead to an impossible
-			 * value of (i.e 0x13 or 0x23) written to _refcount.
+			 * value of (i.e 0x13 or 0x23) written to
+			 * pt_frag_refcount.
 			 * Such values violate the assumption that pending and
 			 * allocation bits are mutually exclusive, and the rest
 			 * of the code unrails as result. That could lead to
@@ -259,8 +257,8 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 				bit = mask & 1;		/* =1 -> second 2K */
 				if (bit)
 					table += PTRS_PER_PTE;
-				atomic_xor_bits(&page->_refcount,
-							0x01U << (bit + 24));
+				atomic_xor_bits(&page->pt_frag_refcount,
+							0x01U << bit);
 				list_del(&page->lru);
 			}
 		}
@@ -281,12 +279,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
 	table = (unsigned long *) page_to_virt(page);
 	if (mm_alloc_pgste(mm)) {
 		/* Return 4K page table with PGSTEs */
-		atomic_xor_bits(&page->_refcount, 0x03U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
 		memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
 	} else {
 		/* Return the first 2K fragment of the page */
-		atomic_xor_bits(&page->_refcount, 0x01U << 24);
+		atomic_xor_bits(&page->pt_frag_refcount, 0x01U);
 		memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
 		spin_lock_bh(&mm->context.lock);
 		list_add(&page->lru, &mm->context.pgtable_list);
@@ -323,22 +321,19 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
 		 * will happen outside of the critical section from this
 		 * function or from __tlb_remove_table()
 		 */
-		mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 		if (mask & 0x03U)
 			list_add(&page->lru, &mm->context.pgtable_list);
 		else
 			list_del(&page->lru);
 		spin_unlock_bh(&mm->context.lock);
-		mask = atomic_xor_bits(&page->_refcount, 0x10U << (bit + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x10U << bit);
 		if (mask != 0x00U)
 			return;
 		half = 0x01U << bit;
 	} else {
 		half = 0x03U;
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 	}
 
 	page_table_release_check(page, table, half, mask);
@@ -368,8 +363,7 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
 	 * outside of the critical section from __tlb_remove_table() or from
 	 * page_table_free()
 	 */
-	mask = atomic_xor_bits(&page->_refcount, 0x11U << (bit + 24));
-	mask >>= 24;
+	mask = atomic_xor_bits(&page->pt_frag_refcount, 0x11U << bit);
 	if (mask & 0x03U)
 		list_add_tail(&page->lru, &mm->context.pgtable_list);
 	else
@@ -391,14 +385,12 @@ void __tlb_remove_table(void *_table)
 		return;
 	case 0x01U:	/* lower 2K of a 4K page table */
 	case 0x02U:	/* higher 2K of a 4K page table */
-		mask = atomic_xor_bits(&page->_refcount, mask << (4 + 24));
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, mask << 4);
 		if (mask != 0x00U)
 			return;
 		break;
 	case 0x03U:	/* 4K page table with pgstes */
-		mask = atomic_xor_bits(&page->_refcount, 0x03U << 24);
-		mask >>= 24;
+		mask = atomic_xor_bits(&page->pt_frag_refcount, 0x03U);
 		break;
 	}
 
-- 
2.39.2


  parent reply	other threads:[~2023-04-17 20:52 UTC|newest]

Thread overview: 218+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-17 20:50 [PATCH 00/33] Split ptdesc from struct page Vishal Moola (Oracle)
2023-04-17 20:50 ` Vishal Moola (Oracle)
2023-04-17 20:50 ` Vishal Moola (Oracle)
2023-04-17 20:50 ` Vishal Moola (Oracle)
2023-04-17 20:50 ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 01/33] s390: Use _pt_s390_gaddr for gmap address tracking Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-18 15:45   ` David Hildenbrand
2023-04-18 15:45     ` David Hildenbrand
2023-04-18 15:45     ` David Hildenbrand
2023-04-18 15:45     ` David Hildenbrand
2023-04-18 15:45     ` David Hildenbrand
2023-04-18 21:33     ` Vishal Moola
2023-04-18 21:33       ` Vishal Moola
2023-04-18 21:33       ` Vishal Moola
2023-04-18 21:33       ` Vishal Moola
2023-04-18 21:33       ` Vishal Moola
2023-04-18 21:33       ` Vishal Moola
2023-04-19  7:54       ` David Hildenbrand
2023-04-19  7:54         ` David Hildenbrand
2023-04-19  7:54         ` David Hildenbrand
2023-04-19  7:54         ` David Hildenbrand
2023-04-19  7:54         ` David Hildenbrand
2023-04-19  7:54         ` David Hildenbrand
2023-04-20 23:32         ` Vishal Moola
2023-04-20 23:32           ` Vishal Moola
2023-04-20 23:32           ` Vishal Moola
2023-04-20 23:32           ` Vishal Moola
2023-04-20 23:32           ` Vishal Moola
2023-04-17 20:50 ` Vishal Moola (Oracle) [this message]
2023-04-17 20:50   ` [PATCH 02/33] s390: Use pt_frag_refcount for pagetables Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 03/33] pgtable: Create struct ptdesc Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 04/33] mm: add utility functions for ptdesc Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-18  1:22   ` kernel test robot
2023-04-18  1:22     ` kernel test robot
2023-04-18  1:22     ` kernel test robot
2023-04-18  1:22     ` kernel test robot
2023-04-18  1:22     ` kernel test robot
2023-04-19 13:33   ` [PATCH 4/33] " Vernon Yang
2023-04-19 13:33     ` Vernon Yang
2023-04-19 13:33     ` Vernon Yang
2023-04-19 13:33     ` Vernon Yang
2023-04-19 13:33     ` Vernon Yang
2023-04-19 20:29     ` Vishal Moola
2023-04-19 20:29       ` Vishal Moola
2023-04-19 20:29       ` Vishal Moola
2023-04-19 20:29       ` Vishal Moola
2023-04-19 20:29       ` Vishal Moola
2023-04-19 20:29       ` Vishal Moola
2023-04-17 20:50 ` [PATCH 05/33] mm: Convert pmd_pgtable_page() to pmd_ptdesc() Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 06/33] mm: Convert ptlock_alloc() to use ptdescs Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 07/33] mm: Convert ptlock_ptr() " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 08/33] mm: Convert pmd_ptlock_init() " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 09/33] mm: Convert ptlock_init() " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 10/33] mm: Convert pmd_ptlock_free() " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 11/33] mm: Convert ptlock_free() " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 12/33] mm: Create ptdesc equivalents for pgtable_{pte,pmd}_page_{ctor,dtor} Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-18  2:13   ` kernel test robot
2023-04-18  2:13     ` kernel test robot
2023-04-18  2:13     ` kernel test robot
2023-04-18  2:13     ` kernel test robot
2023-04-18  2:13     ` kernel test robot
2023-04-17 20:50 ` [PATCH 13/33] powerpc: Convert various functions to use ptdescs Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 14/33] x86: " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 15/33] s390: Convert various gmap " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 16/33] s390: Convert various pgalloc " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 17/33] mm: Remove page table members from struct page Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 18/33] pgalloc: Convert various functions to use ptdescs Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 19/33] arm: " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 20/33] arm64: " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 21/33] csky: Convert __pte_free_tlb() " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 22/33] hexagon: " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 23/33] loongarch: Convert various functions " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 24/33] m68k: " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 22:18   ` kernel test robot
2023-04-17 22:18     ` kernel test robot
2023-04-17 22:18     ` kernel test robot
2023-04-17 22:18     ` kernel test robot
2023-04-17 22:18     ` kernel test robot
2023-04-17 20:50 ` [PATCH 25/33] mips: " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 26/33] nios2: Convert __pte_free_tlb() " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 27/33] openrisc: " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 28/33] riscv: Convert alloc_{pmd, pte}_late() " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 29/33] sh: Convert pte_free_tlb() " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 30/33] sparc64: Convert various functions " Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 31/33] sparc: Convert pgtable_pte_page_{ctor, dtor}() to ptdesc equivalents Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 32/33] um: Convert {pmd, pte}_free_tlb() to use ptdescs Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50 ` [PATCH 33/33] mm: Remove pgtable_{pmd, pte}_page_{ctor, dtor}() wrappers Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)
2023-04-17 20:50   ` Vishal Moola (Oracle)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230417205048.15870-3-vishal.moola@gmail.com \
    --to=vishal.moola@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-csky@vger.kernel.org \
    --cc=linux-hexagon@vger.kernel.org \
    --cc=linux-m68k@lists.linux-m68k.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-openrisc@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=linux-sh@vger.kernel.org \
    --cc=linux-um@lists.infradead.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=loongarch@lists.linux.dev \
    --cc=sparclinux@vger.kernel.org \
    --cc=willy@infradead.org \
    --cc=xen-devel@lists.xenproject.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.