From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
To: Andrew Morton <akpm@linux-foundation.org>,
Matthew Wilcox <willy@infradead.org>
Cc: linux-mm@kvack.org, linux-arch@vger.kernel.org,
linux-arm-kernel@lists.infradead.org, linux-csky@vger.kernel.org,
linux-hexagon@vger.kernel.org, loongarch@lists.linux.dev,
linux-m68k@lists.linux-m68k.org, linux-mips@vger.kernel.org,
linux-openrisc@vger.kernel.org, linuxppc-dev@lists.ozlabs.org,
linux-riscv@lists.infradead.org, linux-s390@vger.kernel.org,
linux-sh@vger.kernel.org, sparclinux@vger.kernel.org,
linux-um@lists.infradead.org, xen-devel@lists.xenproject.org,
kvm@vger.kernel.org,
"Vishal Moola (Oracle)" <vishal.moola@gmail.com>,
David Hildenbrand <david@redhat.com>,
Claudio Imbrenda <imbrenda@linux.ibm.com>
Subject: [PATCH v2 02/34] s390: Use _pt_s390_gaddr for gmap address tracking
Date: Mon, 1 May 2023 12:27:57 -0700 [thread overview]
Message-ID: <20230501192829.17086-3-vishal.moola@gmail.com> (raw)
In-Reply-To: <20230501192829.17086-1-vishal.moola@gmail.com>
s390 uses page->index to keep track of page tables for the guest address
space. In an attempt to consolidate the usage of page fields in s390,
replace _pt_pad_2 with _pt_s390_gaddr to replace page->index in gmap.
This will help with the splitting of struct ptdesc from struct page, as
well as allow s390 to use _pt_frag_refcount for fragmented page table
tracking.
Since page->_pt_s390_gaddr aliases with mapping, ensure its set to NULL
before freeing the pages as well.
This also reverts commit 7e25de77bc5ea ("s390/mm: use pmd_pgtable_page()
helper in __gmap_segment_gaddr()") which had s390 use
pmd_pgtable_page() to get a gmap page table, as pmd_pgtable_page()
should be used for more generic process page tables.
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
arch/s390/mm/gmap.c | 56 +++++++++++++++++++++++++++-------------
include/linux/mm_types.h | 2 +-
2 files changed, 39 insertions(+), 19 deletions(-)
diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c
index dfe905c7bd8e..a9e8b1805894 100644
--- a/arch/s390/mm/gmap.c
+++ b/arch/s390/mm/gmap.c
@@ -70,7 +70,7 @@ static struct gmap *gmap_alloc(unsigned long limit)
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
goto out_free;
- page->index = 0;
+ page->_pt_s390_gaddr = 0;
list_add(&page->lru, &gmap->crst_list);
table = page_to_virt(page);
crst_table_init(table, etype);
@@ -187,16 +187,20 @@ static void gmap_free(struct gmap *gmap)
if (!(gmap_is_shadow(gmap) && gmap->removed))
gmap_flush_tlb(gmap);
/* Free all segment & region tables. */
- list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
+ list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
+ }
gmap_radix_tree_free(&gmap->guest_to_host);
gmap_radix_tree_free(&gmap->host_to_guest);
/* Free additional data for a shadow gmap */
if (gmap_is_shadow(gmap)) {
/* Free all page tables. */
- list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
+ list_for_each_entry_safe(page, next, &gmap->pt_list, lru) {
+ page->_pt_s390_gaddr = 0;
page_table_free_pgste(page);
+ }
gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
/* Release reference to the parent */
gmap_put(gmap->parent);
@@ -318,12 +322,14 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
list_add(&page->lru, &gmap->crst_list);
*table = __pa(new) | _REGION_ENTRY_LENGTH |
(*table & _REGION_ENTRY_TYPE_MASK);
- page->index = gaddr;
+ page->_pt_s390_gaddr = gaddr;
page = NULL;
}
spin_unlock(&gmap->guest_table_lock);
- if (page)
+ if (page) {
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
+ }
return 0;
}
@@ -336,12 +342,14 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
static unsigned long __gmap_segment_gaddr(unsigned long *entry)
{
struct page *page;
- unsigned long offset;
+ unsigned long offset, mask;
offset = (unsigned long) entry / sizeof(unsigned long);
offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
- page = pmd_pgtable_page((pmd_t *) entry);
- return page->index + offset;
+ mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
+ page = virt_to_page((void *)((unsigned long) entry & mask));
+
+ return page->_pt_s390_gaddr + offset;
}
/**
@@ -1351,6 +1359,7 @@ static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
/* Free page table */
page = phys_to_page(pgt);
list_del(&page->lru);
+ page->_pt_s390_gaddr = 0;
page_table_free_pgste(page);
}
@@ -1379,6 +1388,7 @@ static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
/* Free page table */
page = phys_to_page(pgt);
list_del(&page->lru);
+ page->_pt_s390_gaddr = 0;
page_table_free_pgste(page);
}
}
@@ -1409,6 +1419,7 @@ static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
/* Free segment table */
page = phys_to_page(sgt);
list_del(&page->lru);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
}
@@ -1437,6 +1448,7 @@ static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
/* Free segment table */
page = phys_to_page(sgt);
list_del(&page->lru);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
}
}
@@ -1467,6 +1479,7 @@ static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
/* Free region 3 table */
page = phys_to_page(r3t);
list_del(&page->lru);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
}
@@ -1495,6 +1508,7 @@ static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
/* Free region 3 table */
page = phys_to_page(r3t);
list_del(&page->lru);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
}
}
@@ -1525,6 +1539,7 @@ static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
/* Free region 2 table */
page = phys_to_page(r2t);
list_del(&page->lru);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
}
@@ -1557,6 +1572,7 @@ static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
/* Free region 2 table */
page = phys_to_page(r2t);
list_del(&page->lru);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
}
}
@@ -1762,9 +1778,9 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
return -ENOMEM;
- page->index = r2t & _REGION_ENTRY_ORIGIN;
+ page->_pt_s390_gaddr = r2t & _REGION_ENTRY_ORIGIN;
if (fake)
- page->index |= GMAP_SHADOW_FAKE_TABLE;
+ page->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
s_r2t = page_to_phys(page);
/* Install shadow region second table */
spin_lock(&sg->guest_table_lock);
@@ -1814,6 +1830,7 @@ int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
return rc;
out_free:
spin_unlock(&sg->guest_table_lock);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
return rc;
}
@@ -1846,9 +1863,9 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
return -ENOMEM;
- page->index = r3t & _REGION_ENTRY_ORIGIN;
+ page->_pt_s390_gaddr = r3t & _REGION_ENTRY_ORIGIN;
if (fake)
- page->index |= GMAP_SHADOW_FAKE_TABLE;
+ page->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
s_r3t = page_to_phys(page);
/* Install shadow region second table */
spin_lock(&sg->guest_table_lock);
@@ -1898,6 +1915,7 @@ int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
return rc;
out_free:
spin_unlock(&sg->guest_table_lock);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
return rc;
}
@@ -1930,9 +1948,9 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER);
if (!page)
return -ENOMEM;
- page->index = sgt & _REGION_ENTRY_ORIGIN;
+ page->_pt_s390_gaddr = sgt & _REGION_ENTRY_ORIGIN;
if (fake)
- page->index |= GMAP_SHADOW_FAKE_TABLE;
+ page->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
s_sgt = page_to_phys(page);
/* Install shadow region second table */
spin_lock(&sg->guest_table_lock);
@@ -1982,6 +2000,7 @@ int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
return rc;
out_free:
spin_unlock(&sg->guest_table_lock);
+ page->_pt_s390_gaddr = 0;
__free_pages(page, CRST_ALLOC_ORDER);
return rc;
}
@@ -2014,9 +2033,9 @@ int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
/* Shadow page tables are full pages (pte+pgste) */
page = pfn_to_page(*table >> PAGE_SHIFT);
- *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
+ *pgt = page->_pt_s390_gaddr & ~GMAP_SHADOW_FAKE_TABLE;
*dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
- *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
+ *fake = !!(page->_pt_s390_gaddr & GMAP_SHADOW_FAKE_TABLE);
rc = 0;
} else {
rc = -EAGAIN;
@@ -2054,9 +2073,9 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
page = page_table_alloc_pgste(sg->mm);
if (!page)
return -ENOMEM;
- page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
+ page->_pt_s390_gaddr = pgt & _SEGMENT_ENTRY_ORIGIN;
if (fake)
- page->index |= GMAP_SHADOW_FAKE_TABLE;
+ page->_pt_s390_gaddr |= GMAP_SHADOW_FAKE_TABLE;
s_pgt = page_to_phys(page);
/* Install shadow page table */
spin_lock(&sg->guest_table_lock);
@@ -2101,6 +2120,7 @@ int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
return rc;
out_free:
spin_unlock(&sg->guest_table_lock);
+ page->_pt_s390_gaddr = 0;
page_table_free_pgste(page);
return rc;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 306a3d1a0fa6..6161fe1ae5b8 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -144,7 +144,7 @@ struct page {
struct { /* Page table pages */
unsigned long _pt_pad_1; /* compound_head */
pgtable_t pmd_huge_pte; /* protected by page->ptl */
- unsigned long _pt_pad_2; /* mapping */
+ unsigned long _pt_s390_gaddr; /* mapping */
union {
struct mm_struct *pt_mm; /* x86 pgds only */
atomic_t pt_frag_refcount; /* powerpc */
--
2.39.2
_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv
next prev parent reply other threads:[~2023-05-01 19:28 UTC|newest]
Thread overview: 57+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-01 19:27 [PATCH v2 00/34] Split ptdesc from struct page Vishal Moola (Oracle)
2023-05-01 19:27 ` [PATCH v2 01/34] mm: Add PAGE_TYPE_OP folio functions Vishal Moola (Oracle)
2023-05-25 8:55 ` Mike Rapoport
2023-05-25 17:00 ` Vishal Moola
2023-05-25 20:20 ` Mike Rapoport
2023-05-25 20:38 ` Vishal Moola
2023-05-25 20:57 ` Matthew Wilcox
2023-05-01 19:27 ` Vishal Moola (Oracle) [this message]
2023-05-25 8:58 ` [PATCH v2 02/34] s390: Use _pt_s390_gaddr for gmap address tracking Mike Rapoport
2023-05-25 17:12 ` Vishal Moola
2023-05-01 19:27 ` [PATCH v2 03/34] s390: Use pt_frag_refcount for pagetables Vishal Moola (Oracle)
2023-05-01 19:27 ` [PATCH v2 04/34] pgtable: Create struct ptdesc Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 05/34] mm: add utility functions for ptdesc Vishal Moola (Oracle)
2023-05-25 9:09 ` Mike Rapoport
2023-05-25 18:04 ` Vishal Moola
2023-05-25 20:25 ` Mike Rapoport
2023-05-25 20:53 ` Vishal Moola
2023-05-27 10:41 ` Mike Rapoport
2023-05-27 15:09 ` Matthew Wilcox
2023-05-28 5:47 ` Mike Rapoport
2023-05-01 19:28 ` [PATCH v2 06/34] mm: Convert pmd_pgtable_page() to pmd_ptdesc() Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 07/34] mm: Convert ptlock_alloc() to use ptdescs Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 08/34] mm: Convert ptlock_ptr() " Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 09/34] mm: Convert pmd_ptlock_init() " Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 10/34] mm: Convert ptlock_init() " Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 11/34] mm: Convert pmd_ptlock_free() " Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 12/34] mm: Convert ptlock_free() " Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 13/34] mm: Create ptdesc equivalents for pgtable_{pte,pmd}_page_{ctor,dtor} Vishal Moola (Oracle)
2023-05-25 9:19 ` Mike Rapoport
2023-05-25 18:17 ` Vishal Moola
2023-05-01 19:28 ` [PATCH v2 14/34] powerpc: Convert various functions to use ptdescs Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 15/34] x86: " Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 16/34] s390: Convert various gmap " Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 17/34] s390: Convert various pgalloc " Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 18/34] mm: Remove page table members from struct page Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 19/34] pgalloc: Convert various functions to use ptdescs Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 20/34] arm: " Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 21/34] arm64: " Vishal Moola (Oracle)
2023-05-02 1:48 ` kernel test robot
2023-05-02 2:21 ` kernel test robot
2023-05-01 19:28 ` [PATCH v2 22/34] csky: Convert __pte_free_tlb() " Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 23/34] hexagon: " Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 24/34] loongarch: Convert various functions " Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 25/34] m68k: " Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 26/34] mips: " Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 27/34] nios2: Convert __pte_free_tlb() " Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 28/34] openrisc: " Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 29/34] riscv: Convert alloc_{pmd, pte}_late() " Vishal Moola (Oracle)
2023-05-01 20:59 ` Palmer Dabbelt
2023-05-01 19:28 ` [PATCH v2 30/34] sh: Convert pte_free_tlb() " Vishal Moola (Oracle)
2023-05-06 11:35 ` John Paul Adrian Glaubitz
2023-05-15 19:11 ` Vishal Moola
2023-05-01 19:28 ` [PATCH v2 31/34] sparc64: Convert various functions " Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 32/34] sparc: Convert pgtable_pte_page_{ctor, dtor}() to ptdesc equivalents Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 33/34] um: Convert {pmd, pte}_free_tlb() to use ptdescs Vishal Moola (Oracle)
2023-05-01 19:28 ` [PATCH v2 34/34] mm: Remove pgtable_{pmd, pte}_page_{ctor, dtor}() wrappers Vishal Moola (Oracle)
2023-05-18 12:12 ` [PATCH v2 00/34] Split ptdesc from struct page Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230501192829.17086-3-vishal.moola@gmail.com \
--to=vishal.moola@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=david@redhat.com \
--cc=imbrenda@linux.ibm.com \
--cc=kvm@vger.kernel.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-csky@vger.kernel.org \
--cc=linux-hexagon@vger.kernel.org \
--cc=linux-m68k@lists.linux-m68k.org \
--cc=linux-mips@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-openrisc@vger.kernel.org \
--cc=linux-riscv@lists.infradead.org \
--cc=linux-s390@vger.kernel.org \
--cc=linux-sh@vger.kernel.org \
--cc=linux-um@lists.infradead.org \
--cc=linuxppc-dev@lists.ozlabs.org \
--cc=loongarch@lists.linux.dev \
--cc=sparclinux@vger.kernel.org \
--cc=willy@infradead.org \
--cc=xen-devel@lists.xenproject.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).