From mboxrd@z Thu Jan 1 00:00:00 1970 From: Christophe Leroy Date: Sun, 16 Feb 2020 10:41:07 +0000 Subject: Re: [PATCH v2 07/13] powerpc: add support for folded p4d page tables Message-Id: List-Id: References: <20200216081843.28670-1-rppt@kernel.org> <20200216081843.28670-8-rppt@kernel.org> In-Reply-To: <20200216081843.28670-8-rppt@kernel.org> MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 8bit To: Mike Rapoport , linux-kernel@vger.kernel.org Cc: Rich Felker , linux-ia64@vger.kernel.org, Geert Uytterhoeven , linux-sh@vger.kernel.org, Benjamin Herrenschmidt , linux-mm@kvack.org, Paul Mackerras , linux-hexagon@vger.kernel.org, Will Deacon , kvmarm@lists.cs.columbia.edu, Jonas Bonn , linux-arch@vger.kernel.org, Brian Cain , Marc Zyngier , Russell King , Ley Foon Tan , Mike Rapoport , Catalin Marinas , uclinux-h8-devel@lists.sourceforge.jp, Fenghua Yu , Arnd Bergmann , kvm-ppc@vger.kernel.org, Stefan Kristiansson , openrisc@lists.librecores.org, Stafford Horne , Guan Xuetao , linux-arm-kernel@lists.infradead.org, Tony Luck , Yoshinori Sato , Michael Ellerman , nios2-dev@lists.rocketboards.org, Andrew Morton , linuxppc-dev@lists.ozlabs.org Le 16/02/2020 à 09:18, Mike Rapoport a écrit : > From: Mike Rapoport > > Implement primitives necessary for the 4th level folding, add walks of p4d > level where appropriate and replace 5level-fixup.h with pgtable-nop4d.h. I don't think it is worth adding all this additionnals walks of p4d, this patch could be limited to changes like: - pud = pud_offset(pgd, gpa); + pud = pud_offset(p4d_offset(pgd, gpa), gpa); The additionnal walks should be added through another patch the day powerpc need them. See below for more comments. > > Signed-off-by: Mike Rapoport > Tested-by: Christophe Leroy # 8xx and 83xx > --- > arch/powerpc/include/asm/book3s/32/pgtable.h | 1 - > arch/powerpc/include/asm/book3s/64/hash.h | 4 +- > arch/powerpc/include/asm/book3s/64/pgalloc.h | 4 +- > arch/powerpc/include/asm/book3s/64/pgtable.h | 58 ++++++++++-------- > arch/powerpc/include/asm/book3s/64/radix.h | 6 +- > arch/powerpc/include/asm/nohash/32/pgtable.h | 1 - > arch/powerpc/include/asm/nohash/64/pgalloc.h | 2 +- > .../include/asm/nohash/64/pgtable-4k.h | 32 +++++----- > arch/powerpc/include/asm/nohash/64/pgtable.h | 6 +- > arch/powerpc/include/asm/pgtable.h | 8 +++ > arch/powerpc/kvm/book3s_64_mmu_radix.c | 59 ++++++++++++++++--- > arch/powerpc/lib/code-patching.c | 7 ++- > arch/powerpc/mm/book3s32/mmu.c | 2 +- > arch/powerpc/mm/book3s32/tlb.c | 4 +- > arch/powerpc/mm/book3s64/hash_pgtable.c | 4 +- > arch/powerpc/mm/book3s64/radix_pgtable.c | 19 ++++-- > arch/powerpc/mm/book3s64/subpage_prot.c | 6 +- > arch/powerpc/mm/hugetlbpage.c | 28 +++++---- > arch/powerpc/mm/kasan/kasan_init_32.c | 8 +-- > arch/powerpc/mm/mem.c | 4 +- > arch/powerpc/mm/nohash/40x.c | 4 +- > arch/powerpc/mm/nohash/book3e_pgtable.c | 15 +++-- > arch/powerpc/mm/pgtable.c | 25 +++++++- > arch/powerpc/mm/pgtable_32.c | 28 +++++---- > arch/powerpc/mm/pgtable_64.c | 10 ++-- > arch/powerpc/mm/ptdump/hashpagetable.c | 20 ++++++- > arch/powerpc/mm/ptdump/ptdump.c | 22 ++++++- > arch/powerpc/xmon/xmon.c | 17 +++++- > 28 files changed, 284 insertions(+), 120 deletions(-) > > diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h > index 5b39c11e884a..39ec11371be0 100644 > --- a/arch/powerpc/include/asm/book3s/32/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h > @@ -2,7 +2,6 @@ > #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H > #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H > > -#define __ARCH_USE_5LEVEL_HACK > #include > > #include > diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h > index 2781ebf6add4..876d1528c2cf 100644 > --- a/arch/powerpc/include/asm/book3s/64/hash.h > +++ b/arch/powerpc/include/asm/book3s/64/hash.h > @@ -134,9 +134,9 @@ static inline int get_region_id(unsigned long ea) > > #define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS) > #define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS) > -static inline int hash__pgd_bad(pgd_t pgd) > +static inline int hash__p4d_bad(p4d_t p4d) > { > - return (pgd_val(pgd) = 0); > + return (p4d_val(p4d) = 0); > } > #ifdef CONFIG_STRICT_KERNEL_RWX > extern void hash__mark_rodata_ro(void); > diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h > index a41e91bd0580..69c5b051734f 100644 > --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h > +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h > @@ -85,9 +85,9 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) > kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); > } > > -static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) > +static inline void p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud) > { > - *pgd = __pgd(__pgtable_ptr_val(pud) | PGD_VAL_BITS); > + *pgd = __p4d(__pgtable_ptr_val(pud) | PGD_VAL_BITS); > } > > static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) > diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h > index 201a69e6a355..ddddbafff0ab 100644 > --- a/arch/powerpc/include/asm/book3s/64/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h > @@ -2,7 +2,7 @@ > #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ > #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ > > -#include > +#include > > #ifndef __ASSEMBLY__ > #include > @@ -251,7 +251,7 @@ extern unsigned long __pmd_frag_size_shift; > /* Bits to mask out from a PUD to get to the PMD page */ > #define PUD_MASKED_BITS 0xc0000000000000ffUL > /* Bits to mask out from a PGD to get to the PUD page */ > -#define PGD_MASKED_BITS 0xc0000000000000ffUL > +#define P4D_MASKED_BITS 0xc0000000000000ffUL > > /* > * Used as an indicator for rcu callback functions > @@ -949,54 +949,60 @@ static inline bool pud_access_permitted(pud_t pud, bool write) > return pte_access_permitted(pud_pte(pud), write); > } > > -#define pgd_write(pgd) pte_write(pgd_pte(pgd)) > +#define __p4d_raw(x) ((p4d_t) { __pgd_raw(x) }) > +static inline __be64 p4d_raw(p4d_t x) > +{ > + return pgd_raw(x.pgd); > +} > + Shouldn't this be defined in asm/pgtable-be-types.h, just like other __pxx_raw() ? > +#define p4d_write(p4d) pte_write(p4d_pte(p4d)) > > -static inline void pgd_clear(pgd_t *pgdp) > +static inline void p4d_clear(p4d_t *p4dp) > { > - *pgdp = __pgd(0); > + *p4dp = __p4d(0); > } > > -static inline int pgd_none(pgd_t pgd) > +static inline int p4d_none(p4d_t p4d) > { > - return !pgd_raw(pgd); > + return !p4d_raw(p4d); > } > > -static inline int pgd_present(pgd_t pgd) > +static inline int p4d_present(p4d_t p4d) > { > - return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); > + return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PRESENT)); > } > > -static inline pte_t pgd_pte(pgd_t pgd) > +static inline pte_t p4d_pte(p4d_t p4d) > { > - return __pte_raw(pgd_raw(pgd)); > + return __pte_raw(p4d_raw(p4d)); > } > > -static inline pgd_t pte_pgd(pte_t pte) > +static inline p4d_t pte_p4d(pte_t pte) > { > - return __pgd_raw(pte_raw(pte)); > + return __p4d_raw(pte_raw(pte)); > } > > -static inline int pgd_bad(pgd_t pgd) > +static inline int p4d_bad(p4d_t p4d) > { > if (radix_enabled()) > - return radix__pgd_bad(pgd); > - return hash__pgd_bad(pgd); > + return radix__p4d_bad(p4d); > + return hash__p4d_bad(p4d); > } > > -#define pgd_access_permitted pgd_access_permitted > -static inline bool pgd_access_permitted(pgd_t pgd, bool write) > +#define p4d_access_permitted p4d_access_permitted > +static inline bool p4d_access_permitted(p4d_t p4d, bool write) > { > - return pte_access_permitted(pgd_pte(pgd), write); > + return pte_access_permitted(p4d_pte(p4d), write); > } > > -extern struct page *pgd_page(pgd_t pgd); > +extern struct page *p4d_page(p4d_t p4d); > > /* Pointers in the page table tree are physical addresses */ > #define __pgtable_ptr_val(ptr) __pa(ptr) > > #define pmd_page_vaddr(pmd) __va(pmd_val(pmd) & ~PMD_MASKED_BITS) > #define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS) > -#define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS) > +#define p4d_page_vaddr(p4d) __va(p4d_val(p4d) & ~P4D_MASKED_BITS) > > #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) > #define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1)) > @@ -1010,8 +1016,8 @@ extern struct page *pgd_page(pgd_t pgd); > > #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) > > -#define pud_offset(pgdp, addr) \ > - (((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr)) > +#define pud_offset(p4dp, addr) \ > + (((pud_t *) p4d_page_vaddr(*(p4dp))) + pud_index(addr)) > #define pmd_offset(pudp,addr) \ > (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr)) > #define pte_offset_kernel(dir,addr) \ > @@ -1368,6 +1374,12 @@ static inline bool pud_is_leaf(pud_t pud) > return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); > } > > +#define p4d_is_leaf p4d_is_leaf > +static inline bool p4d_is_leaf(p4d_t p4d) > +{ > + return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PTE)); > +} > + > #define pgd_is_leaf pgd_is_leaf > #define pgd_leaf pgd_is_leaf > static inline bool pgd_is_leaf(pgd_t pgd) [...] > diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h > index 8cc543ed114c..0a05fddd7881 100644 > --- a/arch/powerpc/include/asm/pgtable.h > +++ b/arch/powerpc/include/asm/pgtable.h > @@ -139,6 +139,14 @@ static inline bool pud_is_leaf(pud_t pud) > } > #endif > > +#ifndef p4d_is_leaf > +#define p4d_is_leaf p4d_is_leaf > +static inline bool p4d_is_leaf(p4d_t p4d) > +{ > + return false; > +} > +#endif > + > #ifndef pgd_is_leaf > #define pgd_is_leaf pgd_is_leaf > static inline bool pgd_is_leaf(pgd_t pgd) > diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c > index 803940d79b73..5aacfa0b27ef 100644 > --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c > +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c > @@ -494,17 +494,39 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, > pud_free(kvm->mm, pud); > } > > +static void kvmppc_unmap_free_p4d(struct kvm *kvm, p4d_t *p4d, > + unsigned int lpid) > +{ > + unsigned long iu; > + p4d_t *p = p4d; > + > + for (iu = 0; iu < PTRS_PER_P4D; ++iu, ++p) { > + if (!p4d_present(*p)) > + continue; > + if (p4d_is_leaf(*p)) { > + p4d_clear(p); > + } else { > + pud_t *pud; > + > + pud = pud_offset(p, 0); > + kvmppc_unmap_free_pud(kvm, pud, lpid); > + p4d_clear(p); > + } > + } > + p4d_free(kvm->mm, p4d); > +} > + > void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid) > { > unsigned long ig; > > for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) { > - pud_t *pud; > + p4d_t *p4d; > > if (!pgd_present(*pgd)) > continue; > - pud = pud_offset(pgd, 0); > - kvmppc_unmap_free_pud(kvm, pud, lpid); > + p4d = p4d_offset(pgd, 0); > + kvmppc_unmap_free_p4d(kvm, p4d, lpid); > pgd_clear(pgd); > } > } > @@ -566,6 +588,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > unsigned long *rmapp, struct rmap_nested **n_rmap) > { > pgd_t *pgd; > + p4d_t *p4d, *new_p4d = NULL; > pud_t *pud, *new_pud = NULL; > pmd_t *pmd, *new_pmd = NULL; > pte_t *ptep, *new_ptep = NULL; > @@ -573,9 +596,15 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > > /* Traverse the guest's 2nd-level tree, allocate new levels needed */ > pgd = pgtable + pgd_index(gpa); > - pud = NULL; > + p4d = NULL; > if (pgd_present(*pgd)) > - pud = pud_offset(pgd, gpa); > + p4d = p4d_offset(pgd, gpa); > + else > + new_p4d = p4d_alloc_one(kvm->mm, gpa); > + > + pud = NULL; > + if (p4d_present(*p4d)) > + pud = pud_offset(p4d, gpa); Is it worth adding all this new code ? My understanding is that the series objective is to get rid of __ARCH_HAS_5LEVEL_HACK, to to add support for 5 levels to an architecture that not need it (at least for now). If we want to add support for 5 levels, it can be done later in another patch. Here I think your change could be limited to: - pud = pud_offset(pgd, gpa); + pud = pud_offset(p4d_offset(pgd, gpa), gpa); > else > new_pud = pud_alloc_one(kvm->mm, gpa); > > @@ -597,12 +626,18 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > /* Now traverse again under the lock and change the tree */ > ret = -ENOMEM; > if (pgd_none(*pgd)) { > + if (!new_p4d) > + goto out_unlock; > + pgd_populate(kvm->mm, pgd, new_p4d); > + new_p4d = NULL; > + } > + if (p4d_none(*p4d)) { > if (!new_pud) > goto out_unlock; > - pgd_populate(kvm->mm, pgd, new_pud); > + p4d_populate(kvm->mm, p4d, new_pud); > new_pud = NULL; > } > - pud = pud_offset(pgd, gpa); > + pud = pud_offset(p4d, gpa); > if (pud_is_leaf(*pud)) { > unsigned long hgpa = gpa & PUD_MASK; > > @@ -1220,6 +1255,7 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf, > pgd_t *pgt; > struct kvm_nested_guest *nested; > pgd_t pgd, *pgdp; > + p4d_t p4d, *p4dp; > pud_t pud, *pudp; > pmd_t pmd, *pmdp; > pte_t *ptep; > @@ -1298,7 +1334,14 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf, > continue; > } > > - pudp = pud_offset(&pgd, gpa); > + p4dp = p4d_offset(&pgd, gpa); > + p4d = READ_ONCE(*p4dp); > + if (!(p4d_val(p4d) & _PAGE_PRESENT)) { > + gpa = (gpa & P4D_MASK) + P4D_SIZE; > + continue; > + } > + > + pudp = pud_offset(&p4d, gpa); Same, here you are forcing a useless read with READ_ONCE(). Your change could be limited to - pudp = pud_offset(&pgd, gpa); + pudp = pud_offset(p4d_offset(&pgd, gpa), gpa); This comment applies to many other places. > pud = READ_ONCE(*pudp); > if (!(pud_val(pud) & _PAGE_PRESENT)) { > gpa = (gpa & PUD_MASK) + PUD_SIZE; > diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c > index 3345f039a876..7a59f6863cec 100644 > --- a/arch/powerpc/lib/code-patching.c > +++ b/arch/powerpc/lib/code-patching.c > @@ -107,13 +107,18 @@ static inline int unmap_patch_area(unsigned long addr) > pte_t *ptep; > pmd_t *pmdp; > pud_t *pudp; > + p4d_t *p4dp; > pgd_t *pgdp; > > pgdp = pgd_offset_k(addr); > if (unlikely(!pgdp)) > return -EINVAL; > > - pudp = pud_offset(pgdp, addr); > + p4dp = p4d_offset(pgdp, addr); > + if (unlikely(!p4dp)) > + return -EINVAL; > + > + pudp = pud_offset(p4dp, addr); > if (unlikely(!pudp)) > return -EINVAL; > > diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c > index 0a1c65a2c565..b2fc3e71165c 100644 > --- a/arch/powerpc/mm/book3s32/mmu.c > +++ b/arch/powerpc/mm/book3s32/mmu.c > @@ -312,7 +312,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea) > > if (!Hash) > return; > - pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea); > + pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, ea), ea), ea), ea); If we continue like this, in ten years this like is going to be many kilometers long. I think the above would be worth a generic helper. > if (!pmd_none(*pmd)) > add_hash_page(mm->context.id, ea, pmd_val(*pmd)); > } > diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c > index 2fcd321040ff..175bc33b41b7 100644 > --- a/arch/powerpc/mm/book3s32/tlb.c > +++ b/arch/powerpc/mm/book3s32/tlb.c > @@ -87,7 +87,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start, > if (start >= end) > return; > end = (end - 1) | ~PAGE_MASK; > - pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start); > + pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, start), start), start), start); > for (;;) { > pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1; > if (pmd_end > end) > @@ -145,7 +145,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) > return; > } > mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; > - pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr); > + pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr), vmaddr); > if (!pmd_none(*pmd)) > flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); > } > diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c > index 64733b9cb20a..9cd15937e88a 100644 > --- a/arch/powerpc/mm/book3s64/hash_pgtable.c > +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c > @@ -148,6 +148,7 @@ void hash__vmemmap_remove_mapping(unsigned long start, > int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) > { > pgd_t *pgdp; > + p4d_t *p4dp; > pud_t *pudp; > pmd_t *pmdp; > pte_t *ptep; > @@ -155,7 +156,8 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) > BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE); > if (slab_is_available()) { > pgdp = pgd_offset_k(ea); > - pudp = pud_alloc(&init_mm, pgdp, ea); > + p4dp = p4d_offset(pgdp, ea); > + pudp = pud_alloc(&init_mm, p4dp, ea); Could be a single line, without a new var. - pudp = pud_alloc(&init_mm, pgdp, ea); + pudp = pud_alloc(&init_mm, p4d_offset(pgdp, ea), ea); Same kind of comments as already done apply to the rest. Christophe From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-8.4 required=3.0 tests=DKIM_SIGNED,DKIM_VALID, DKIM_VALID_AU,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_PATCH,MAILING_LIST_MULTI, SIGNED_OFF_BY,SPF_HELO_NONE,SPF_PASS,USER_AGENT_SANE_1 autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 5AB70C76199 for ; Sun, 16 Feb 2020 10:41:15 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 0A7F320718 for ; Sun, 16 Feb 2020 10:41:15 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=pass (1024-bit key) header.d=c-s.fr header.i=@c-s.fr header.b="v+gbcEDk" Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1727958AbgBPKlO (ORCPT ); Sun, 16 Feb 2020 05:41:14 -0500 Received: from pegase1.c-s.fr ([93.17.236.30]:28261 "EHLO pegase1.c-s.fr" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726774AbgBPKlN (ORCPT ); Sun, 16 Feb 2020 05:41:13 -0500 Received: from localhost (mailhub1-int [192.168.12.234]) by localhost (Postfix) with ESMTP id 48L3Yv0sRkz9tyM7; Sun, 16 Feb 2020 11:41:07 +0100 (CET) Authentication-Results: localhost; dkim=pass reason="1024-bit key; insecure key" header.d=c-s.fr header.i=@c-s.fr header.b=v+gbcEDk; dkim-adsp=pass; dkim-atps=neutral X-Virus-Scanned: Debian amavisd-new at c-s.fr Received: from pegase1.c-s.fr ([192.168.12.234]) by localhost (pegase1.c-s.fr [192.168.12.234]) (amavisd-new, port 10024) with ESMTP id WyvC_D0c5SGS; Sun, 16 Feb 2020 11:41:07 +0100 (CET) Received: from messagerie.si.c-s.fr (messagerie.si.c-s.fr [192.168.25.192]) by pegase1.c-s.fr (Postfix) with ESMTP id 48L3Yt6byhz9tyM6; Sun, 16 Feb 2020 11:41:06 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=c-s.fr; s=mail; t=1581849666; bh=uYX8/YhQnPSB2yIUz2Iiabdy4hBMITqWSMKEeNzcsUE=; h=Subject:To:Cc:References:From:Date:In-Reply-To:From; b=v+gbcEDki2kN7vHEJxzn4fWBIL7Q/3I+0FBhUByo9drALwmhlhMysLr6CcMG4Tb/X Z4bTovlfUg5KRdTCIWxMkP3mPM9tSoSf43EJfHHltAPtJWCtCrqKOA8Gx1u5xXKGgL NDEaCjEHzsYa4iDa+yCT8tNN28WzZnaXEMbkJvBY= Received: from localhost (localhost [127.0.0.1]) by messagerie.si.c-s.fr (Postfix) with ESMTP id D90528B784; Sun, 16 Feb 2020 11:41:09 +0100 (CET) X-Virus-Scanned: amavisd-new at c-s.fr Received: from messagerie.si.c-s.fr ([127.0.0.1]) by localhost (messagerie.si.c-s.fr [127.0.0.1]) (amavisd-new, port 10023) with ESMTP id VGVzW0Ckh2uU; Sun, 16 Feb 2020 11:41:09 +0100 (CET) Received: from [192.168.4.90] (unknown [192.168.4.90]) by messagerie.si.c-s.fr (Postfix) with ESMTP id B4E908B755; Sun, 16 Feb 2020 11:41:07 +0100 (CET) Subject: Re: [PATCH v2 07/13] powerpc: add support for folded p4d page tables To: Mike Rapoport , linux-kernel@vger.kernel.org Cc: Andrew Morton , Arnd Bergmann , Benjamin Herrenschmidt , Brian Cain , Catalin Marinas , Fenghua Yu , Geert Uytterhoeven , Guan Xuetao , James Morse , Jonas Bonn , Julien Thierry , Ley Foon Tan , Marc Zyngier , Michael Ellerman , Paul Mackerras , Rich Felker , Russell King , Stafford Horne , Stefan Kristiansson , Suzuki K Poulose , Tony Luck , Will Deacon , Yoshinori Sato , kvmarm@lists.cs.columbia.edu, kvm-ppc@vger.kernel.org, linux-arch@vger.kernel.org, linux-arm-kernel@lists.infradead.org, linux-hexagon@vger.kernel.org, linux-ia64@vger.kernel.org, linux-mm@kvack.org, linuxppc-dev@lists.ozlabs.org, linux-sh@vger.kernel.org, nios2-dev@lists.rocketboards.org, openrisc@lists.librecores.org, uclinux-h8-devel@lists.sourceforge.jp, Mike Rapoport References: <20200216081843.28670-1-rppt@kernel.org> <20200216081843.28670-8-rppt@kernel.org> From: Christophe Leroy Message-ID: Date: Sun, 16 Feb 2020 11:41:07 +0100 User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:68.0) Gecko/20100101 Thunderbird/68.5.0 MIME-Version: 1.0 In-Reply-To: <20200216081843.28670-8-rppt@kernel.org> Content-Type: text/plain; charset=utf-8; format=flowed Content-Language: fr Content-Transfer-Encoding: 8bit Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Le 16/02/2020 à 09:18, Mike Rapoport a écrit : > From: Mike Rapoport > > Implement primitives necessary for the 4th level folding, add walks of p4d > level where appropriate and replace 5level-fixup.h with pgtable-nop4d.h. I don't think it is worth adding all this additionnals walks of p4d, this patch could be limited to changes like: - pud = pud_offset(pgd, gpa); + pud = pud_offset(p4d_offset(pgd, gpa), gpa); The additionnal walks should be added through another patch the day powerpc need them. See below for more comments. > > Signed-off-by: Mike Rapoport > Tested-by: Christophe Leroy # 8xx and 83xx > --- > arch/powerpc/include/asm/book3s/32/pgtable.h | 1 - > arch/powerpc/include/asm/book3s/64/hash.h | 4 +- > arch/powerpc/include/asm/book3s/64/pgalloc.h | 4 +- > arch/powerpc/include/asm/book3s/64/pgtable.h | 58 ++++++++++-------- > arch/powerpc/include/asm/book3s/64/radix.h | 6 +- > arch/powerpc/include/asm/nohash/32/pgtable.h | 1 - > arch/powerpc/include/asm/nohash/64/pgalloc.h | 2 +- > .../include/asm/nohash/64/pgtable-4k.h | 32 +++++----- > arch/powerpc/include/asm/nohash/64/pgtable.h | 6 +- > arch/powerpc/include/asm/pgtable.h | 8 +++ > arch/powerpc/kvm/book3s_64_mmu_radix.c | 59 ++++++++++++++++--- > arch/powerpc/lib/code-patching.c | 7 ++- > arch/powerpc/mm/book3s32/mmu.c | 2 +- > arch/powerpc/mm/book3s32/tlb.c | 4 +- > arch/powerpc/mm/book3s64/hash_pgtable.c | 4 +- > arch/powerpc/mm/book3s64/radix_pgtable.c | 19 ++++-- > arch/powerpc/mm/book3s64/subpage_prot.c | 6 +- > arch/powerpc/mm/hugetlbpage.c | 28 +++++---- > arch/powerpc/mm/kasan/kasan_init_32.c | 8 +-- > arch/powerpc/mm/mem.c | 4 +- > arch/powerpc/mm/nohash/40x.c | 4 +- > arch/powerpc/mm/nohash/book3e_pgtable.c | 15 +++-- > arch/powerpc/mm/pgtable.c | 25 +++++++- > arch/powerpc/mm/pgtable_32.c | 28 +++++---- > arch/powerpc/mm/pgtable_64.c | 10 ++-- > arch/powerpc/mm/ptdump/hashpagetable.c | 20 ++++++- > arch/powerpc/mm/ptdump/ptdump.c | 22 ++++++- > arch/powerpc/xmon/xmon.c | 17 +++++- > 28 files changed, 284 insertions(+), 120 deletions(-) > > diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h > index 5b39c11e884a..39ec11371be0 100644 > --- a/arch/powerpc/include/asm/book3s/32/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h > @@ -2,7 +2,6 @@ > #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H > #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H > > -#define __ARCH_USE_5LEVEL_HACK > #include > > #include > diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h > index 2781ebf6add4..876d1528c2cf 100644 > --- a/arch/powerpc/include/asm/book3s/64/hash.h > +++ b/arch/powerpc/include/asm/book3s/64/hash.h > @@ -134,9 +134,9 @@ static inline int get_region_id(unsigned long ea) > > #define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS) > #define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS) > -static inline int hash__pgd_bad(pgd_t pgd) > +static inline int hash__p4d_bad(p4d_t p4d) > { > - return (pgd_val(pgd) == 0); > + return (p4d_val(p4d) == 0); > } > #ifdef CONFIG_STRICT_KERNEL_RWX > extern void hash__mark_rodata_ro(void); > diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h > index a41e91bd0580..69c5b051734f 100644 > --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h > +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h > @@ -85,9 +85,9 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) > kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); > } > > -static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) > +static inline void p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud) > { > - *pgd = __pgd(__pgtable_ptr_val(pud) | PGD_VAL_BITS); > + *pgd = __p4d(__pgtable_ptr_val(pud) | PGD_VAL_BITS); > } > > static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) > diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h > index 201a69e6a355..ddddbafff0ab 100644 > --- a/arch/powerpc/include/asm/book3s/64/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h > @@ -2,7 +2,7 @@ > #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ > #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ > > -#include > +#include > > #ifndef __ASSEMBLY__ > #include > @@ -251,7 +251,7 @@ extern unsigned long __pmd_frag_size_shift; > /* Bits to mask out from a PUD to get to the PMD page */ > #define PUD_MASKED_BITS 0xc0000000000000ffUL > /* Bits to mask out from a PGD to get to the PUD page */ > -#define PGD_MASKED_BITS 0xc0000000000000ffUL > +#define P4D_MASKED_BITS 0xc0000000000000ffUL > > /* > * Used as an indicator for rcu callback functions > @@ -949,54 +949,60 @@ static inline bool pud_access_permitted(pud_t pud, bool write) > return pte_access_permitted(pud_pte(pud), write); > } > > -#define pgd_write(pgd) pte_write(pgd_pte(pgd)) > +#define __p4d_raw(x) ((p4d_t) { __pgd_raw(x) }) > +static inline __be64 p4d_raw(p4d_t x) > +{ > + return pgd_raw(x.pgd); > +} > + Shouldn't this be defined in asm/pgtable-be-types.h, just like other __pxx_raw() ? > +#define p4d_write(p4d) pte_write(p4d_pte(p4d)) > > -static inline void pgd_clear(pgd_t *pgdp) > +static inline void p4d_clear(p4d_t *p4dp) > { > - *pgdp = __pgd(0); > + *p4dp = __p4d(0); > } > > -static inline int pgd_none(pgd_t pgd) > +static inline int p4d_none(p4d_t p4d) > { > - return !pgd_raw(pgd); > + return !p4d_raw(p4d); > } > > -static inline int pgd_present(pgd_t pgd) > +static inline int p4d_present(p4d_t p4d) > { > - return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); > + return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PRESENT)); > } > > -static inline pte_t pgd_pte(pgd_t pgd) > +static inline pte_t p4d_pte(p4d_t p4d) > { > - return __pte_raw(pgd_raw(pgd)); > + return __pte_raw(p4d_raw(p4d)); > } > > -static inline pgd_t pte_pgd(pte_t pte) > +static inline p4d_t pte_p4d(pte_t pte) > { > - return __pgd_raw(pte_raw(pte)); > + return __p4d_raw(pte_raw(pte)); > } > > -static inline int pgd_bad(pgd_t pgd) > +static inline int p4d_bad(p4d_t p4d) > { > if (radix_enabled()) > - return radix__pgd_bad(pgd); > - return hash__pgd_bad(pgd); > + return radix__p4d_bad(p4d); > + return hash__p4d_bad(p4d); > } > > -#define pgd_access_permitted pgd_access_permitted > -static inline bool pgd_access_permitted(pgd_t pgd, bool write) > +#define p4d_access_permitted p4d_access_permitted > +static inline bool p4d_access_permitted(p4d_t p4d, bool write) > { > - return pte_access_permitted(pgd_pte(pgd), write); > + return pte_access_permitted(p4d_pte(p4d), write); > } > > -extern struct page *pgd_page(pgd_t pgd); > +extern struct page *p4d_page(p4d_t p4d); > > /* Pointers in the page table tree are physical addresses */ > #define __pgtable_ptr_val(ptr) __pa(ptr) > > #define pmd_page_vaddr(pmd) __va(pmd_val(pmd) & ~PMD_MASKED_BITS) > #define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS) > -#define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS) > +#define p4d_page_vaddr(p4d) __va(p4d_val(p4d) & ~P4D_MASKED_BITS) > > #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) > #define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1)) > @@ -1010,8 +1016,8 @@ extern struct page *pgd_page(pgd_t pgd); > > #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) > > -#define pud_offset(pgdp, addr) \ > - (((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr)) > +#define pud_offset(p4dp, addr) \ > + (((pud_t *) p4d_page_vaddr(*(p4dp))) + pud_index(addr)) > #define pmd_offset(pudp,addr) \ > (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr)) > #define pte_offset_kernel(dir,addr) \ > @@ -1368,6 +1374,12 @@ static inline bool pud_is_leaf(pud_t pud) > return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); > } > > +#define p4d_is_leaf p4d_is_leaf > +static inline bool p4d_is_leaf(p4d_t p4d) > +{ > + return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PTE)); > +} > + > #define pgd_is_leaf pgd_is_leaf > #define pgd_leaf pgd_is_leaf > static inline bool pgd_is_leaf(pgd_t pgd) [...] > diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h > index 8cc543ed114c..0a05fddd7881 100644 > --- a/arch/powerpc/include/asm/pgtable.h > +++ b/arch/powerpc/include/asm/pgtable.h > @@ -139,6 +139,14 @@ static inline bool pud_is_leaf(pud_t pud) > } > #endif > > +#ifndef p4d_is_leaf > +#define p4d_is_leaf p4d_is_leaf > +static inline bool p4d_is_leaf(p4d_t p4d) > +{ > + return false; > +} > +#endif > + > #ifndef pgd_is_leaf > #define pgd_is_leaf pgd_is_leaf > static inline bool pgd_is_leaf(pgd_t pgd) > diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c > index 803940d79b73..5aacfa0b27ef 100644 > --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c > +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c > @@ -494,17 +494,39 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, > pud_free(kvm->mm, pud); > } > > +static void kvmppc_unmap_free_p4d(struct kvm *kvm, p4d_t *p4d, > + unsigned int lpid) > +{ > + unsigned long iu; > + p4d_t *p = p4d; > + > + for (iu = 0; iu < PTRS_PER_P4D; ++iu, ++p) { > + if (!p4d_present(*p)) > + continue; > + if (p4d_is_leaf(*p)) { > + p4d_clear(p); > + } else { > + pud_t *pud; > + > + pud = pud_offset(p, 0); > + kvmppc_unmap_free_pud(kvm, pud, lpid); > + p4d_clear(p); > + } > + } > + p4d_free(kvm->mm, p4d); > +} > + > void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid) > { > unsigned long ig; > > for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) { > - pud_t *pud; > + p4d_t *p4d; > > if (!pgd_present(*pgd)) > continue; > - pud = pud_offset(pgd, 0); > - kvmppc_unmap_free_pud(kvm, pud, lpid); > + p4d = p4d_offset(pgd, 0); > + kvmppc_unmap_free_p4d(kvm, p4d, lpid); > pgd_clear(pgd); > } > } > @@ -566,6 +588,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > unsigned long *rmapp, struct rmap_nested **n_rmap) > { > pgd_t *pgd; > + p4d_t *p4d, *new_p4d = NULL; > pud_t *pud, *new_pud = NULL; > pmd_t *pmd, *new_pmd = NULL; > pte_t *ptep, *new_ptep = NULL; > @@ -573,9 +596,15 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > > /* Traverse the guest's 2nd-level tree, allocate new levels needed */ > pgd = pgtable + pgd_index(gpa); > - pud = NULL; > + p4d = NULL; > if (pgd_present(*pgd)) > - pud = pud_offset(pgd, gpa); > + p4d = p4d_offset(pgd, gpa); > + else > + new_p4d = p4d_alloc_one(kvm->mm, gpa); > + > + pud = NULL; > + if (p4d_present(*p4d)) > + pud = pud_offset(p4d, gpa); Is it worth adding all this new code ? My understanding is that the series objective is to get rid of __ARCH_HAS_5LEVEL_HACK, to to add support for 5 levels to an architecture that not need it (at least for now). If we want to add support for 5 levels, it can be done later in another patch. Here I think your change could be limited to: - pud = pud_offset(pgd, gpa); + pud = pud_offset(p4d_offset(pgd, gpa), gpa); > else > new_pud = pud_alloc_one(kvm->mm, gpa); > > @@ -597,12 +626,18 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > /* Now traverse again under the lock and change the tree */ > ret = -ENOMEM; > if (pgd_none(*pgd)) { > + if (!new_p4d) > + goto out_unlock; > + pgd_populate(kvm->mm, pgd, new_p4d); > + new_p4d = NULL; > + } > + if (p4d_none(*p4d)) { > if (!new_pud) > goto out_unlock; > - pgd_populate(kvm->mm, pgd, new_pud); > + p4d_populate(kvm->mm, p4d, new_pud); > new_pud = NULL; > } > - pud = pud_offset(pgd, gpa); > + pud = pud_offset(p4d, gpa); > if (pud_is_leaf(*pud)) { > unsigned long hgpa = gpa & PUD_MASK; > > @@ -1220,6 +1255,7 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf, > pgd_t *pgt; > struct kvm_nested_guest *nested; > pgd_t pgd, *pgdp; > + p4d_t p4d, *p4dp; > pud_t pud, *pudp; > pmd_t pmd, *pmdp; > pte_t *ptep; > @@ -1298,7 +1334,14 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf, > continue; > } > > - pudp = pud_offset(&pgd, gpa); > + p4dp = p4d_offset(&pgd, gpa); > + p4d = READ_ONCE(*p4dp); > + if (!(p4d_val(p4d) & _PAGE_PRESENT)) { > + gpa = (gpa & P4D_MASK) + P4D_SIZE; > + continue; > + } > + > + pudp = pud_offset(&p4d, gpa); Same, here you are forcing a useless read with READ_ONCE(). Your change could be limited to - pudp = pud_offset(&pgd, gpa); + pudp = pud_offset(p4d_offset(&pgd, gpa), gpa); This comment applies to many other places. > pud = READ_ONCE(*pudp); > if (!(pud_val(pud) & _PAGE_PRESENT)) { > gpa = (gpa & PUD_MASK) + PUD_SIZE; > diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c > index 3345f039a876..7a59f6863cec 100644 > --- a/arch/powerpc/lib/code-patching.c > +++ b/arch/powerpc/lib/code-patching.c > @@ -107,13 +107,18 @@ static inline int unmap_patch_area(unsigned long addr) > pte_t *ptep; > pmd_t *pmdp; > pud_t *pudp; > + p4d_t *p4dp; > pgd_t *pgdp; > > pgdp = pgd_offset_k(addr); > if (unlikely(!pgdp)) > return -EINVAL; > > - pudp = pud_offset(pgdp, addr); > + p4dp = p4d_offset(pgdp, addr); > + if (unlikely(!p4dp)) > + return -EINVAL; > + > + pudp = pud_offset(p4dp, addr); > if (unlikely(!pudp)) > return -EINVAL; > > diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c > index 0a1c65a2c565..b2fc3e71165c 100644 > --- a/arch/powerpc/mm/book3s32/mmu.c > +++ b/arch/powerpc/mm/book3s32/mmu.c > @@ -312,7 +312,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea) > > if (!Hash) > return; > - pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea); > + pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, ea), ea), ea), ea); If we continue like this, in ten years this like is going to be many kilometers long. I think the above would be worth a generic helper. > if (!pmd_none(*pmd)) > add_hash_page(mm->context.id, ea, pmd_val(*pmd)); > } > diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c > index 2fcd321040ff..175bc33b41b7 100644 > --- a/arch/powerpc/mm/book3s32/tlb.c > +++ b/arch/powerpc/mm/book3s32/tlb.c > @@ -87,7 +87,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start, > if (start >= end) > return; > end = (end - 1) | ~PAGE_MASK; > - pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start); > + pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, start), start), start), start); > for (;;) { > pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1; > if (pmd_end > end) > @@ -145,7 +145,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) > return; > } > mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; > - pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr); > + pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr), vmaddr); > if (!pmd_none(*pmd)) > flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); > } > diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c > index 64733b9cb20a..9cd15937e88a 100644 > --- a/arch/powerpc/mm/book3s64/hash_pgtable.c > +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c > @@ -148,6 +148,7 @@ void hash__vmemmap_remove_mapping(unsigned long start, > int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) > { > pgd_t *pgdp; > + p4d_t *p4dp; > pud_t *pudp; > pmd_t *pmdp; > pte_t *ptep; > @@ -155,7 +156,8 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) > BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE); > if (slab_is_available()) { > pgdp = pgd_offset_k(ea); > - pudp = pud_alloc(&init_mm, pgdp, ea); > + p4dp = p4d_offset(pgdp, ea); > + pudp = pud_alloc(&init_mm, p4dp, ea); Could be a single line, without a new var. - pudp = pud_alloc(&init_mm, pgdp, ea); + pudp = pud_alloc(&init_mm, p4d_offset(pgdp, ea), ea); Same kind of comments as already done apply to the rest. Christophe From mboxrd@z Thu Jan 1 00:00:00 1970 From: Christophe Leroy Subject: Re: [PATCH v2 07/13] powerpc: add support for folded p4d page tables Date: Sun, 16 Feb 2020 11:41:07 +0100 Message-ID: References: <20200216081843.28670-1-rppt@kernel.org> <20200216081843.28670-8-rppt@kernel.org> Mime-Version: 1.0 Content-Type: text/plain; charset=utf-8; format=flowed Content-Transfer-Encoding: 8bit Return-path: In-Reply-To: <20200216081843.28670-8-rppt@kernel.org> Content-Language: fr Sender: linux-hexagon-owner@vger.kernel.org To: Mike Rapoport , linux-kernel@vger.kernel.org Cc: Andrew Morton , Arnd Bergmann , Benjamin Herrenschmidt , Brian Cain , Catalin Marinas , Fenghua Yu , Geert Uytterhoeven , Guan Xuetao , James Morse , Jonas Bonn , Julien Thierry , Ley Foon Tan , Marc Zyngier , Michael Ellerman , Paul Mackerras , Rich Felker , Russell King , Stafford Horne , Stefan Kristiansson , Suzuki List-Id: linux-arch.vger.kernel.org Le 16/02/2020 à 09:18, Mike Rapoport a écrit : > From: Mike Rapoport > > Implement primitives necessary for the 4th level folding, add walks of p4d > level where appropriate and replace 5level-fixup.h with pgtable-nop4d.h. I don't think it is worth adding all this additionnals walks of p4d, this patch could be limited to changes like: - pud = pud_offset(pgd, gpa); + pud = pud_offset(p4d_offset(pgd, gpa), gpa); The additionnal walks should be added through another patch the day powerpc need them. See below for more comments. > > Signed-off-by: Mike Rapoport > Tested-by: Christophe Leroy # 8xx and 83xx > --- > arch/powerpc/include/asm/book3s/32/pgtable.h | 1 - > arch/powerpc/include/asm/book3s/64/hash.h | 4 +- > arch/powerpc/include/asm/book3s/64/pgalloc.h | 4 +- > arch/powerpc/include/asm/book3s/64/pgtable.h | 58 ++++++++++-------- > arch/powerpc/include/asm/book3s/64/radix.h | 6 +- > arch/powerpc/include/asm/nohash/32/pgtable.h | 1 - > arch/powerpc/include/asm/nohash/64/pgalloc.h | 2 +- > .../include/asm/nohash/64/pgtable-4k.h | 32 +++++----- > arch/powerpc/include/asm/nohash/64/pgtable.h | 6 +- > arch/powerpc/include/asm/pgtable.h | 8 +++ > arch/powerpc/kvm/book3s_64_mmu_radix.c | 59 ++++++++++++++++--- > arch/powerpc/lib/code-patching.c | 7 ++- > arch/powerpc/mm/book3s32/mmu.c | 2 +- > arch/powerpc/mm/book3s32/tlb.c | 4 +- > arch/powerpc/mm/book3s64/hash_pgtable.c | 4 +- > arch/powerpc/mm/book3s64/radix_pgtable.c | 19 ++++-- > arch/powerpc/mm/book3s64/subpage_prot.c | 6 +- > arch/powerpc/mm/hugetlbpage.c | 28 +++++---- > arch/powerpc/mm/kasan/kasan_init_32.c | 8 +-- > arch/powerpc/mm/mem.c | 4 +- > arch/powerpc/mm/nohash/40x.c | 4 +- > arch/powerpc/mm/nohash/book3e_pgtable.c | 15 +++-- > arch/powerpc/mm/pgtable.c | 25 +++++++- > arch/powerpc/mm/pgtable_32.c | 28 +++++---- > arch/powerpc/mm/pgtable_64.c | 10 ++-- > arch/powerpc/mm/ptdump/hashpagetable.c | 20 ++++++- > arch/powerpc/mm/ptdump/ptdump.c | 22 ++++++- > arch/powerpc/xmon/xmon.c | 17 +++++- > 28 files changed, 284 insertions(+), 120 deletions(-) > > diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h > index 5b39c11e884a..39ec11371be0 100644 > --- a/arch/powerpc/include/asm/book3s/32/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h > @@ -2,7 +2,6 @@ > #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H > #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H > > -#define __ARCH_USE_5LEVEL_HACK > #include > > #include > diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h > index 2781ebf6add4..876d1528c2cf 100644 > --- a/arch/powerpc/include/asm/book3s/64/hash.h > +++ b/arch/powerpc/include/asm/book3s/64/hash.h > @@ -134,9 +134,9 @@ static inline int get_region_id(unsigned long ea) > > #define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS) > #define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS) > -static inline int hash__pgd_bad(pgd_t pgd) > +static inline int hash__p4d_bad(p4d_t p4d) > { > - return (pgd_val(pgd) == 0); > + return (p4d_val(p4d) == 0); > } > #ifdef CONFIG_STRICT_KERNEL_RWX > extern void hash__mark_rodata_ro(void); > diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h > index a41e91bd0580..69c5b051734f 100644 > --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h > +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h > @@ -85,9 +85,9 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) > kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); > } > > -static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) > +static inline void p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud) > { > - *pgd = __pgd(__pgtable_ptr_val(pud) | PGD_VAL_BITS); > + *pgd = __p4d(__pgtable_ptr_val(pud) | PGD_VAL_BITS); > } > > static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) > diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h > index 201a69e6a355..ddddbafff0ab 100644 > --- a/arch/powerpc/include/asm/book3s/64/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h > @@ -2,7 +2,7 @@ > #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ > #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ > > -#include > +#include > > #ifndef __ASSEMBLY__ > #include > @@ -251,7 +251,7 @@ extern unsigned long __pmd_frag_size_shift; > /* Bits to mask out from a PUD to get to the PMD page */ > #define PUD_MASKED_BITS 0xc0000000000000ffUL > /* Bits to mask out from a PGD to get to the PUD page */ > -#define PGD_MASKED_BITS 0xc0000000000000ffUL > +#define P4D_MASKED_BITS 0xc0000000000000ffUL > > /* > * Used as an indicator for rcu callback functions > @@ -949,54 +949,60 @@ static inline bool pud_access_permitted(pud_t pud, bool write) > return pte_access_permitted(pud_pte(pud), write); > } > > -#define pgd_write(pgd) pte_write(pgd_pte(pgd)) > +#define __p4d_raw(x) ((p4d_t) { __pgd_raw(x) }) > +static inline __be64 p4d_raw(p4d_t x) > +{ > + return pgd_raw(x.pgd); > +} > + Shouldn't this be defined in asm/pgtable-be-types.h, just like other __pxx_raw() ? > +#define p4d_write(p4d) pte_write(p4d_pte(p4d)) > > -static inline void pgd_clear(pgd_t *pgdp) > +static inline void p4d_clear(p4d_t *p4dp) > { > - *pgdp = __pgd(0); > + *p4dp = __p4d(0); > } > > -static inline int pgd_none(pgd_t pgd) > +static inline int p4d_none(p4d_t p4d) > { > - return !pgd_raw(pgd); > + return !p4d_raw(p4d); > } > > -static inline int pgd_present(pgd_t pgd) > +static inline int p4d_present(p4d_t p4d) > { > - return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); > + return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PRESENT)); > } > > -static inline pte_t pgd_pte(pgd_t pgd) > +static inline pte_t p4d_pte(p4d_t p4d) > { > - return __pte_raw(pgd_raw(pgd)); > + return __pte_raw(p4d_raw(p4d)); > } > > -static inline pgd_t pte_pgd(pte_t pte) > +static inline p4d_t pte_p4d(pte_t pte) > { > - return __pgd_raw(pte_raw(pte)); > + return __p4d_raw(pte_raw(pte)); > } > > -static inline int pgd_bad(pgd_t pgd) > +static inline int p4d_bad(p4d_t p4d) > { > if (radix_enabled()) > - return radix__pgd_bad(pgd); > - return hash__pgd_bad(pgd); > + return radix__p4d_bad(p4d); > + return hash__p4d_bad(p4d); > } > > -#define pgd_access_permitted pgd_access_permitted > -static inline bool pgd_access_permitted(pgd_t pgd, bool write) > +#define p4d_access_permitted p4d_access_permitted > +static inline bool p4d_access_permitted(p4d_t p4d, bool write) > { > - return pte_access_permitted(pgd_pte(pgd), write); > + return pte_access_permitted(p4d_pte(p4d), write); > } > > -extern struct page *pgd_page(pgd_t pgd); > +extern struct page *p4d_page(p4d_t p4d); > > /* Pointers in the page table tree are physical addresses */ > #define __pgtable_ptr_val(ptr) __pa(ptr) > > #define pmd_page_vaddr(pmd) __va(pmd_val(pmd) & ~PMD_MASKED_BITS) > #define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS) > -#define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS) > +#define p4d_page_vaddr(p4d) __va(p4d_val(p4d) & ~P4D_MASKED_BITS) > > #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) > #define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1)) > @@ -1010,8 +1016,8 @@ extern struct page *pgd_page(pgd_t pgd); > > #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) > > -#define pud_offset(pgdp, addr) \ > - (((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr)) > +#define pud_offset(p4dp, addr) \ > + (((pud_t *) p4d_page_vaddr(*(p4dp))) + pud_index(addr)) > #define pmd_offset(pudp,addr) \ > (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr)) > #define pte_offset_kernel(dir,addr) \ > @@ -1368,6 +1374,12 @@ static inline bool pud_is_leaf(pud_t pud) > return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); > } > > +#define p4d_is_leaf p4d_is_leaf > +static inline bool p4d_is_leaf(p4d_t p4d) > +{ > + return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PTE)); > +} > + > #define pgd_is_leaf pgd_is_leaf > #define pgd_leaf pgd_is_leaf > static inline bool pgd_is_leaf(pgd_t pgd) [...] > diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h > index 8cc543ed114c..0a05fddd7881 100644 > --- a/arch/powerpc/include/asm/pgtable.h > +++ b/arch/powerpc/include/asm/pgtable.h > @@ -139,6 +139,14 @@ static inline bool pud_is_leaf(pud_t pud) > } > #endif > > +#ifndef p4d_is_leaf > +#define p4d_is_leaf p4d_is_leaf > +static inline bool p4d_is_leaf(p4d_t p4d) > +{ > + return false; > +} > +#endif > + > #ifndef pgd_is_leaf > #define pgd_is_leaf pgd_is_leaf > static inline bool pgd_is_leaf(pgd_t pgd) > diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c > index 803940d79b73..5aacfa0b27ef 100644 > --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c > +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c > @@ -494,17 +494,39 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, > pud_free(kvm->mm, pud); > } > > +static void kvmppc_unmap_free_p4d(struct kvm *kvm, p4d_t *p4d, > + unsigned int lpid) > +{ > + unsigned long iu; > + p4d_t *p = p4d; > + > + for (iu = 0; iu < PTRS_PER_P4D; ++iu, ++p) { > + if (!p4d_present(*p)) > + continue; > + if (p4d_is_leaf(*p)) { > + p4d_clear(p); > + } else { > + pud_t *pud; > + > + pud = pud_offset(p, 0); > + kvmppc_unmap_free_pud(kvm, pud, lpid); > + p4d_clear(p); > + } > + } > + p4d_free(kvm->mm, p4d); > +} > + > void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid) > { > unsigned long ig; > > for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) { > - pud_t *pud; > + p4d_t *p4d; > > if (!pgd_present(*pgd)) > continue; > - pud = pud_offset(pgd, 0); > - kvmppc_unmap_free_pud(kvm, pud, lpid); > + p4d = p4d_offset(pgd, 0); > + kvmppc_unmap_free_p4d(kvm, p4d, lpid); > pgd_clear(pgd); > } > } > @@ -566,6 +588,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > unsigned long *rmapp, struct rmap_nested **n_rmap) > { > pgd_t *pgd; > + p4d_t *p4d, *new_p4d = NULL; > pud_t *pud, *new_pud = NULL; > pmd_t *pmd, *new_pmd = NULL; > pte_t *ptep, *new_ptep = NULL; > @@ -573,9 +596,15 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > > /* Traverse the guest's 2nd-level tree, allocate new levels needed */ > pgd = pgtable + pgd_index(gpa); > - pud = NULL; > + p4d = NULL; > if (pgd_present(*pgd)) > - pud = pud_offset(pgd, gpa); > + p4d = p4d_offset(pgd, gpa); > + else > + new_p4d = p4d_alloc_one(kvm->mm, gpa); > + > + pud = NULL; > + if (p4d_present(*p4d)) > + pud = pud_offset(p4d, gpa); Is it worth adding all this new code ? My understanding is that the series objective is to get rid of __ARCH_HAS_5LEVEL_HACK, to to add support for 5 levels to an architecture that not need it (at least for now). If we want to add support for 5 levels, it can be done later in another patch. Here I think your change could be limited to: - pud = pud_offset(pgd, gpa); + pud = pud_offset(p4d_offset(pgd, gpa), gpa); > else > new_pud = pud_alloc_one(kvm->mm, gpa); > > @@ -597,12 +626,18 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > /* Now traverse again under the lock and change the tree */ > ret = -ENOMEM; > if (pgd_none(*pgd)) { > + if (!new_p4d) > + goto out_unlock; > + pgd_populate(kvm->mm, pgd, new_p4d); > + new_p4d = NULL; > + } > + if (p4d_none(*p4d)) { > if (!new_pud) > goto out_unlock; > - pgd_populate(kvm->mm, pgd, new_pud); > + p4d_populate(kvm->mm, p4d, new_pud); > new_pud = NULL; > } > - pud = pud_offset(pgd, gpa); > + pud = pud_offset(p4d, gpa); > if (pud_is_leaf(*pud)) { > unsigned long hgpa = gpa & PUD_MASK; > > @@ -1220,6 +1255,7 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf, > pgd_t *pgt; > struct kvm_nested_guest *nested; > pgd_t pgd, *pgdp; > + p4d_t p4d, *p4dp; > pud_t pud, *pudp; > pmd_t pmd, *pmdp; > pte_t *ptep; > @@ -1298,7 +1334,14 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf, > continue; > } > > - pudp = pud_offset(&pgd, gpa); > + p4dp = p4d_offset(&pgd, gpa); > + p4d = READ_ONCE(*p4dp); > + if (!(p4d_val(p4d) & _PAGE_PRESENT)) { > + gpa = (gpa & P4D_MASK) + P4D_SIZE; > + continue; > + } > + > + pudp = pud_offset(&p4d, gpa); Same, here you are forcing a useless read with READ_ONCE(). Your change could be limited to - pudp = pud_offset(&pgd, gpa); + pudp = pud_offset(p4d_offset(&pgd, gpa), gpa); This comment applies to many other places. > pud = READ_ONCE(*pudp); > if (!(pud_val(pud) & _PAGE_PRESENT)) { > gpa = (gpa & PUD_MASK) + PUD_SIZE; > diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c > index 3345f039a876..7a59f6863cec 100644 > --- a/arch/powerpc/lib/code-patching.c > +++ b/arch/powerpc/lib/code-patching.c > @@ -107,13 +107,18 @@ static inline int unmap_patch_area(unsigned long addr) > pte_t *ptep; > pmd_t *pmdp; > pud_t *pudp; > + p4d_t *p4dp; > pgd_t *pgdp; > > pgdp = pgd_offset_k(addr); > if (unlikely(!pgdp)) > return -EINVAL; > > - pudp = pud_offset(pgdp, addr); > + p4dp = p4d_offset(pgdp, addr); > + if (unlikely(!p4dp)) > + return -EINVAL; > + > + pudp = pud_offset(p4dp, addr); > if (unlikely(!pudp)) > return -EINVAL; > > diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c > index 0a1c65a2c565..b2fc3e71165c 100644 > --- a/arch/powerpc/mm/book3s32/mmu.c > +++ b/arch/powerpc/mm/book3s32/mmu.c > @@ -312,7 +312,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea) > > if (!Hash) > return; > - pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea); > + pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, ea), ea), ea), ea); If we continue like this, in ten years this like is going to be many kilometers long. I think the above would be worth a generic helper. > if (!pmd_none(*pmd)) > add_hash_page(mm->context.id, ea, pmd_val(*pmd)); > } > diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c > index 2fcd321040ff..175bc33b41b7 100644 > --- a/arch/powerpc/mm/book3s32/tlb.c > +++ b/arch/powerpc/mm/book3s32/tlb.c > @@ -87,7 +87,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start, > if (start >= end) > return; > end = (end - 1) | ~PAGE_MASK; > - pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start); > + pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, start), start), start), start); > for (;;) { > pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1; > if (pmd_end > end) > @@ -145,7 +145,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) > return; > } > mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; > - pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr); > + pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr), vmaddr); > if (!pmd_none(*pmd)) > flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); > } > diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c > index 64733b9cb20a..9cd15937e88a 100644 > --- a/arch/powerpc/mm/book3s64/hash_pgtable.c > +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c > @@ -148,6 +148,7 @@ void hash__vmemmap_remove_mapping(unsigned long start, > int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) > { > pgd_t *pgdp; > + p4d_t *p4dp; > pud_t *pudp; > pmd_t *pmdp; > pte_t *ptep; > @@ -155,7 +156,8 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) > BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE); > if (slab_is_available()) { > pgdp = pgd_offset_k(ea); > - pudp = pud_alloc(&init_mm, pgdp, ea); > + p4dp = p4d_offset(pgdp, ea); > + pudp = pud_alloc(&init_mm, p4dp, ea); Could be a single line, without a new var. - pudp = pud_alloc(&init_mm, pgdp, ea); + pudp = pud_alloc(&init_mm, p4d_offset(pgdp, ea), ea); Same kind of comments as already done apply to the rest. Christophe From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-8.1 required=3.0 tests=DKIM_INVALID,DKIM_SIGNED, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY, SPF_HELO_NONE,SPF_PASS,USER_AGENT_SANE_1 autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id B74CAC2BA83 for ; Sun, 16 Feb 2020 10:42:49 +0000 (UTC) Received: from lists.ozlabs.org (lists.ozlabs.org [203.11.71.2]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 0528F20718 for ; Sun, 16 Feb 2020 10:42:48 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=fail reason="signature verification failed" (1024-bit key) header.d=c-s.fr header.i=@c-s.fr header.b="v+gbcEDk" DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 0528F20718 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=c-s.fr Authentication-Results: mail.kernel.org; spf=pass smtp.mailfrom=linuxppc-dev-bounces+linuxppc-dev=archiver.kernel.org@lists.ozlabs.org Received: from lists.ozlabs.org (lists.ozlabs.org [IPv6:2401:3900:2:1::3]) by lists.ozlabs.org (Postfix) with ESMTP id 48L3bp33Q2zDqfk for ; Sun, 16 Feb 2020 21:42:46 +1100 (AEDT) Authentication-Results: lists.ozlabs.org; spf=pass (sender SPF authorized) smtp.mailfrom=c-s.fr (client-ip=93.17.236.30; helo=pegase1.c-s.fr; envelope-from=christophe.leroy@c-s.fr; receiver=) Authentication-Results: lists.ozlabs.org; dmarc=none (p=none dis=none) header.from=c-s.fr Authentication-Results: lists.ozlabs.org; dkim=pass (1024-bit key; unprotected) header.d=c-s.fr header.i=@c-s.fr header.a=rsa-sha256 header.s=mail header.b=v+gbcEDk; dkim-atps=neutral Received: from pegase1.c-s.fr (pegase1.c-s.fr [93.17.236.30]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by lists.ozlabs.org (Postfix) with ESMTPS id 48L3Z54lznzDqdr for ; Sun, 16 Feb 2020 21:41:15 +1100 (AEDT) Received: from localhost (mailhub1-int [192.168.12.234]) by localhost (Postfix) with ESMTP id 48L3Yv0sRkz9tyM7; Sun, 16 Feb 2020 11:41:07 +0100 (CET) Authentication-Results: localhost; dkim=pass reason="1024-bit key; insecure key" header.d=c-s.fr header.i=@c-s.fr header.b=v+gbcEDk; dkim-adsp=pass; dkim-atps=neutral X-Virus-Scanned: Debian amavisd-new at c-s.fr Received: from pegase1.c-s.fr ([192.168.12.234]) by localhost (pegase1.c-s.fr [192.168.12.234]) (amavisd-new, port 10024) with ESMTP id WyvC_D0c5SGS; Sun, 16 Feb 2020 11:41:07 +0100 (CET) Received: from messagerie.si.c-s.fr (messagerie.si.c-s.fr [192.168.25.192]) by pegase1.c-s.fr (Postfix) with ESMTP id 48L3Yt6byhz9tyM6; Sun, 16 Feb 2020 11:41:06 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=c-s.fr; s=mail; t=1581849666; bh=uYX8/YhQnPSB2yIUz2Iiabdy4hBMITqWSMKEeNzcsUE=; h=Subject:To:Cc:References:From:Date:In-Reply-To:From; b=v+gbcEDki2kN7vHEJxzn4fWBIL7Q/3I+0FBhUByo9drALwmhlhMysLr6CcMG4Tb/X Z4bTovlfUg5KRdTCIWxMkP3mPM9tSoSf43EJfHHltAPtJWCtCrqKOA8Gx1u5xXKGgL NDEaCjEHzsYa4iDa+yCT8tNN28WzZnaXEMbkJvBY= Received: from localhost (localhost [127.0.0.1]) by messagerie.si.c-s.fr (Postfix) with ESMTP id D90528B784; Sun, 16 Feb 2020 11:41:09 +0100 (CET) X-Virus-Scanned: amavisd-new at c-s.fr Received: from messagerie.si.c-s.fr ([127.0.0.1]) by localhost (messagerie.si.c-s.fr [127.0.0.1]) (amavisd-new, port 10023) with ESMTP id VGVzW0Ckh2uU; Sun, 16 Feb 2020 11:41:09 +0100 (CET) Received: from [192.168.4.90] (unknown [192.168.4.90]) by messagerie.si.c-s.fr (Postfix) with ESMTP id B4E908B755; Sun, 16 Feb 2020 11:41:07 +0100 (CET) Subject: Re: [PATCH v2 07/13] powerpc: add support for folded p4d page tables To: Mike Rapoport , linux-kernel@vger.kernel.org References: <20200216081843.28670-1-rppt@kernel.org> <20200216081843.28670-8-rppt@kernel.org> From: Christophe Leroy Message-ID: Date: Sun, 16 Feb 2020 11:41:07 +0100 User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:68.0) Gecko/20100101 Thunderbird/68.5.0 MIME-Version: 1.0 In-Reply-To: <20200216081843.28670-8-rppt@kernel.org> Content-Type: text/plain; charset=utf-8; format=flowed Content-Language: fr Content-Transfer-Encoding: 8bit X-BeenThere: linuxppc-dev@lists.ozlabs.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: Linux on PowerPC Developers Mail List List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Rich Felker , linux-ia64@vger.kernel.org, Geert Uytterhoeven , linux-sh@vger.kernel.org, linux-mm@kvack.org, Paul Mackerras , linux-hexagon@vger.kernel.org, Will Deacon , kvmarm@lists.cs.columbia.edu, Jonas Bonn , linux-arch@vger.kernel.org, Brian Cain , Marc Zyngier , Russell King , Ley Foon Tan , Mike Rapoport , Catalin Marinas , Julien Thierry , uclinux-h8-devel@lists.sourceforge.jp, Fenghua Yu , Arnd Bergmann , Suzuki K Poulose , kvm-ppc@vger.kernel.org, Stefan Kristiansson , openrisc@lists.librecores.org, Stafford Horne , Guan Xuetao , linux-arm-kernel@lists.infradead.org, Tony Luck , Yoshinori Sato , James Morse , nios2-dev@lists.rocketboards.org, Andrew Morton , linuxppc-dev@lists.ozlabs.org Errors-To: linuxppc-dev-bounces+linuxppc-dev=archiver.kernel.org@lists.ozlabs.org Sender: "Linuxppc-dev" Le 16/02/2020 à 09:18, Mike Rapoport a écrit : > From: Mike Rapoport > > Implement primitives necessary for the 4th level folding, add walks of p4d > level where appropriate and replace 5level-fixup.h with pgtable-nop4d.h. I don't think it is worth adding all this additionnals walks of p4d, this patch could be limited to changes like: - pud = pud_offset(pgd, gpa); + pud = pud_offset(p4d_offset(pgd, gpa), gpa); The additionnal walks should be added through another patch the day powerpc need them. See below for more comments. > > Signed-off-by: Mike Rapoport > Tested-by: Christophe Leroy # 8xx and 83xx > --- > arch/powerpc/include/asm/book3s/32/pgtable.h | 1 - > arch/powerpc/include/asm/book3s/64/hash.h | 4 +- > arch/powerpc/include/asm/book3s/64/pgalloc.h | 4 +- > arch/powerpc/include/asm/book3s/64/pgtable.h | 58 ++++++++++-------- > arch/powerpc/include/asm/book3s/64/radix.h | 6 +- > arch/powerpc/include/asm/nohash/32/pgtable.h | 1 - > arch/powerpc/include/asm/nohash/64/pgalloc.h | 2 +- > .../include/asm/nohash/64/pgtable-4k.h | 32 +++++----- > arch/powerpc/include/asm/nohash/64/pgtable.h | 6 +- > arch/powerpc/include/asm/pgtable.h | 8 +++ > arch/powerpc/kvm/book3s_64_mmu_radix.c | 59 ++++++++++++++++--- > arch/powerpc/lib/code-patching.c | 7 ++- > arch/powerpc/mm/book3s32/mmu.c | 2 +- > arch/powerpc/mm/book3s32/tlb.c | 4 +- > arch/powerpc/mm/book3s64/hash_pgtable.c | 4 +- > arch/powerpc/mm/book3s64/radix_pgtable.c | 19 ++++-- > arch/powerpc/mm/book3s64/subpage_prot.c | 6 +- > arch/powerpc/mm/hugetlbpage.c | 28 +++++---- > arch/powerpc/mm/kasan/kasan_init_32.c | 8 +-- > arch/powerpc/mm/mem.c | 4 +- > arch/powerpc/mm/nohash/40x.c | 4 +- > arch/powerpc/mm/nohash/book3e_pgtable.c | 15 +++-- > arch/powerpc/mm/pgtable.c | 25 +++++++- > arch/powerpc/mm/pgtable_32.c | 28 +++++---- > arch/powerpc/mm/pgtable_64.c | 10 ++-- > arch/powerpc/mm/ptdump/hashpagetable.c | 20 ++++++- > arch/powerpc/mm/ptdump/ptdump.c | 22 ++++++- > arch/powerpc/xmon/xmon.c | 17 +++++- > 28 files changed, 284 insertions(+), 120 deletions(-) > > diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h > index 5b39c11e884a..39ec11371be0 100644 > --- a/arch/powerpc/include/asm/book3s/32/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h > @@ -2,7 +2,6 @@ > #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H > #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H > > -#define __ARCH_USE_5LEVEL_HACK > #include > > #include > diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h > index 2781ebf6add4..876d1528c2cf 100644 > --- a/arch/powerpc/include/asm/book3s/64/hash.h > +++ b/arch/powerpc/include/asm/book3s/64/hash.h > @@ -134,9 +134,9 @@ static inline int get_region_id(unsigned long ea) > > #define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS) > #define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS) > -static inline int hash__pgd_bad(pgd_t pgd) > +static inline int hash__p4d_bad(p4d_t p4d) > { > - return (pgd_val(pgd) == 0); > + return (p4d_val(p4d) == 0); > } > #ifdef CONFIG_STRICT_KERNEL_RWX > extern void hash__mark_rodata_ro(void); > diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h > index a41e91bd0580..69c5b051734f 100644 > --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h > +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h > @@ -85,9 +85,9 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) > kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); > } > > -static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) > +static inline void p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud) > { > - *pgd = __pgd(__pgtable_ptr_val(pud) | PGD_VAL_BITS); > + *pgd = __p4d(__pgtable_ptr_val(pud) | PGD_VAL_BITS); > } > > static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) > diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h > index 201a69e6a355..ddddbafff0ab 100644 > --- a/arch/powerpc/include/asm/book3s/64/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h > @@ -2,7 +2,7 @@ > #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ > #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ > > -#include > +#include > > #ifndef __ASSEMBLY__ > #include > @@ -251,7 +251,7 @@ extern unsigned long __pmd_frag_size_shift; > /* Bits to mask out from a PUD to get to the PMD page */ > #define PUD_MASKED_BITS 0xc0000000000000ffUL > /* Bits to mask out from a PGD to get to the PUD page */ > -#define PGD_MASKED_BITS 0xc0000000000000ffUL > +#define P4D_MASKED_BITS 0xc0000000000000ffUL > > /* > * Used as an indicator for rcu callback functions > @@ -949,54 +949,60 @@ static inline bool pud_access_permitted(pud_t pud, bool write) > return pte_access_permitted(pud_pte(pud), write); > } > > -#define pgd_write(pgd) pte_write(pgd_pte(pgd)) > +#define __p4d_raw(x) ((p4d_t) { __pgd_raw(x) }) > +static inline __be64 p4d_raw(p4d_t x) > +{ > + return pgd_raw(x.pgd); > +} > + Shouldn't this be defined in asm/pgtable-be-types.h, just like other __pxx_raw() ? > +#define p4d_write(p4d) pte_write(p4d_pte(p4d)) > > -static inline void pgd_clear(pgd_t *pgdp) > +static inline void p4d_clear(p4d_t *p4dp) > { > - *pgdp = __pgd(0); > + *p4dp = __p4d(0); > } > > -static inline int pgd_none(pgd_t pgd) > +static inline int p4d_none(p4d_t p4d) > { > - return !pgd_raw(pgd); > + return !p4d_raw(p4d); > } > > -static inline int pgd_present(pgd_t pgd) > +static inline int p4d_present(p4d_t p4d) > { > - return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); > + return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PRESENT)); > } > > -static inline pte_t pgd_pte(pgd_t pgd) > +static inline pte_t p4d_pte(p4d_t p4d) > { > - return __pte_raw(pgd_raw(pgd)); > + return __pte_raw(p4d_raw(p4d)); > } > > -static inline pgd_t pte_pgd(pte_t pte) > +static inline p4d_t pte_p4d(pte_t pte) > { > - return __pgd_raw(pte_raw(pte)); > + return __p4d_raw(pte_raw(pte)); > } > > -static inline int pgd_bad(pgd_t pgd) > +static inline int p4d_bad(p4d_t p4d) > { > if (radix_enabled()) > - return radix__pgd_bad(pgd); > - return hash__pgd_bad(pgd); > + return radix__p4d_bad(p4d); > + return hash__p4d_bad(p4d); > } > > -#define pgd_access_permitted pgd_access_permitted > -static inline bool pgd_access_permitted(pgd_t pgd, bool write) > +#define p4d_access_permitted p4d_access_permitted > +static inline bool p4d_access_permitted(p4d_t p4d, bool write) > { > - return pte_access_permitted(pgd_pte(pgd), write); > + return pte_access_permitted(p4d_pte(p4d), write); > } > > -extern struct page *pgd_page(pgd_t pgd); > +extern struct page *p4d_page(p4d_t p4d); > > /* Pointers in the page table tree are physical addresses */ > #define __pgtable_ptr_val(ptr) __pa(ptr) > > #define pmd_page_vaddr(pmd) __va(pmd_val(pmd) & ~PMD_MASKED_BITS) > #define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS) > -#define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS) > +#define p4d_page_vaddr(p4d) __va(p4d_val(p4d) & ~P4D_MASKED_BITS) > > #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) > #define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1)) > @@ -1010,8 +1016,8 @@ extern struct page *pgd_page(pgd_t pgd); > > #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) > > -#define pud_offset(pgdp, addr) \ > - (((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr)) > +#define pud_offset(p4dp, addr) \ > + (((pud_t *) p4d_page_vaddr(*(p4dp))) + pud_index(addr)) > #define pmd_offset(pudp,addr) \ > (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr)) > #define pte_offset_kernel(dir,addr) \ > @@ -1368,6 +1374,12 @@ static inline bool pud_is_leaf(pud_t pud) > return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); > } > > +#define p4d_is_leaf p4d_is_leaf > +static inline bool p4d_is_leaf(p4d_t p4d) > +{ > + return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PTE)); > +} > + > #define pgd_is_leaf pgd_is_leaf > #define pgd_leaf pgd_is_leaf > static inline bool pgd_is_leaf(pgd_t pgd) [...] > diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h > index 8cc543ed114c..0a05fddd7881 100644 > --- a/arch/powerpc/include/asm/pgtable.h > +++ b/arch/powerpc/include/asm/pgtable.h > @@ -139,6 +139,14 @@ static inline bool pud_is_leaf(pud_t pud) > } > #endif > > +#ifndef p4d_is_leaf > +#define p4d_is_leaf p4d_is_leaf > +static inline bool p4d_is_leaf(p4d_t p4d) > +{ > + return false; > +} > +#endif > + > #ifndef pgd_is_leaf > #define pgd_is_leaf pgd_is_leaf > static inline bool pgd_is_leaf(pgd_t pgd) > diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c > index 803940d79b73..5aacfa0b27ef 100644 > --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c > +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c > @@ -494,17 +494,39 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, > pud_free(kvm->mm, pud); > } > > +static void kvmppc_unmap_free_p4d(struct kvm *kvm, p4d_t *p4d, > + unsigned int lpid) > +{ > + unsigned long iu; > + p4d_t *p = p4d; > + > + for (iu = 0; iu < PTRS_PER_P4D; ++iu, ++p) { > + if (!p4d_present(*p)) > + continue; > + if (p4d_is_leaf(*p)) { > + p4d_clear(p); > + } else { > + pud_t *pud; > + > + pud = pud_offset(p, 0); > + kvmppc_unmap_free_pud(kvm, pud, lpid); > + p4d_clear(p); > + } > + } > + p4d_free(kvm->mm, p4d); > +} > + > void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid) > { > unsigned long ig; > > for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) { > - pud_t *pud; > + p4d_t *p4d; > > if (!pgd_present(*pgd)) > continue; > - pud = pud_offset(pgd, 0); > - kvmppc_unmap_free_pud(kvm, pud, lpid); > + p4d = p4d_offset(pgd, 0); > + kvmppc_unmap_free_p4d(kvm, p4d, lpid); > pgd_clear(pgd); > } > } > @@ -566,6 +588,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > unsigned long *rmapp, struct rmap_nested **n_rmap) > { > pgd_t *pgd; > + p4d_t *p4d, *new_p4d = NULL; > pud_t *pud, *new_pud = NULL; > pmd_t *pmd, *new_pmd = NULL; > pte_t *ptep, *new_ptep = NULL; > @@ -573,9 +596,15 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > > /* Traverse the guest's 2nd-level tree, allocate new levels needed */ > pgd = pgtable + pgd_index(gpa); > - pud = NULL; > + p4d = NULL; > if (pgd_present(*pgd)) > - pud = pud_offset(pgd, gpa); > + p4d = p4d_offset(pgd, gpa); > + else > + new_p4d = p4d_alloc_one(kvm->mm, gpa); > + > + pud = NULL; > + if (p4d_present(*p4d)) > + pud = pud_offset(p4d, gpa); Is it worth adding all this new code ? My understanding is that the series objective is to get rid of __ARCH_HAS_5LEVEL_HACK, to to add support for 5 levels to an architecture that not need it (at least for now). If we want to add support for 5 levels, it can be done later in another patch. Here I think your change could be limited to: - pud = pud_offset(pgd, gpa); + pud = pud_offset(p4d_offset(pgd, gpa), gpa); > else > new_pud = pud_alloc_one(kvm->mm, gpa); > > @@ -597,12 +626,18 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > /* Now traverse again under the lock and change the tree */ > ret = -ENOMEM; > if (pgd_none(*pgd)) { > + if (!new_p4d) > + goto out_unlock; > + pgd_populate(kvm->mm, pgd, new_p4d); > + new_p4d = NULL; > + } > + if (p4d_none(*p4d)) { > if (!new_pud) > goto out_unlock; > - pgd_populate(kvm->mm, pgd, new_pud); > + p4d_populate(kvm->mm, p4d, new_pud); > new_pud = NULL; > } > - pud = pud_offset(pgd, gpa); > + pud = pud_offset(p4d, gpa); > if (pud_is_leaf(*pud)) { > unsigned long hgpa = gpa & PUD_MASK; > > @@ -1220,6 +1255,7 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf, > pgd_t *pgt; > struct kvm_nested_guest *nested; > pgd_t pgd, *pgdp; > + p4d_t p4d, *p4dp; > pud_t pud, *pudp; > pmd_t pmd, *pmdp; > pte_t *ptep; > @@ -1298,7 +1334,14 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf, > continue; > } > > - pudp = pud_offset(&pgd, gpa); > + p4dp = p4d_offset(&pgd, gpa); > + p4d = READ_ONCE(*p4dp); > + if (!(p4d_val(p4d) & _PAGE_PRESENT)) { > + gpa = (gpa & P4D_MASK) + P4D_SIZE; > + continue; > + } > + > + pudp = pud_offset(&p4d, gpa); Same, here you are forcing a useless read with READ_ONCE(). Your change could be limited to - pudp = pud_offset(&pgd, gpa); + pudp = pud_offset(p4d_offset(&pgd, gpa), gpa); This comment applies to many other places. > pud = READ_ONCE(*pudp); > if (!(pud_val(pud) & _PAGE_PRESENT)) { > gpa = (gpa & PUD_MASK) + PUD_SIZE; > diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c > index 3345f039a876..7a59f6863cec 100644 > --- a/arch/powerpc/lib/code-patching.c > +++ b/arch/powerpc/lib/code-patching.c > @@ -107,13 +107,18 @@ static inline int unmap_patch_area(unsigned long addr) > pte_t *ptep; > pmd_t *pmdp; > pud_t *pudp; > + p4d_t *p4dp; > pgd_t *pgdp; > > pgdp = pgd_offset_k(addr); > if (unlikely(!pgdp)) > return -EINVAL; > > - pudp = pud_offset(pgdp, addr); > + p4dp = p4d_offset(pgdp, addr); > + if (unlikely(!p4dp)) > + return -EINVAL; > + > + pudp = pud_offset(p4dp, addr); > if (unlikely(!pudp)) > return -EINVAL; > > diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c > index 0a1c65a2c565..b2fc3e71165c 100644 > --- a/arch/powerpc/mm/book3s32/mmu.c > +++ b/arch/powerpc/mm/book3s32/mmu.c > @@ -312,7 +312,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea) > > if (!Hash) > return; > - pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea); > + pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, ea), ea), ea), ea); If we continue like this, in ten years this like is going to be many kilometers long. I think the above would be worth a generic helper. > if (!pmd_none(*pmd)) > add_hash_page(mm->context.id, ea, pmd_val(*pmd)); > } > diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c > index 2fcd321040ff..175bc33b41b7 100644 > --- a/arch/powerpc/mm/book3s32/tlb.c > +++ b/arch/powerpc/mm/book3s32/tlb.c > @@ -87,7 +87,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start, > if (start >= end) > return; > end = (end - 1) | ~PAGE_MASK; > - pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start); > + pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, start), start), start), start); > for (;;) { > pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1; > if (pmd_end > end) > @@ -145,7 +145,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) > return; > } > mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; > - pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr); > + pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr), vmaddr); > if (!pmd_none(*pmd)) > flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); > } > diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c > index 64733b9cb20a..9cd15937e88a 100644 > --- a/arch/powerpc/mm/book3s64/hash_pgtable.c > +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c > @@ -148,6 +148,7 @@ void hash__vmemmap_remove_mapping(unsigned long start, > int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) > { > pgd_t *pgdp; > + p4d_t *p4dp; > pud_t *pudp; > pmd_t *pmdp; > pte_t *ptep; > @@ -155,7 +156,8 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) > BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE); > if (slab_is_available()) { > pgdp = pgd_offset_k(ea); > - pudp = pud_alloc(&init_mm, pgdp, ea); > + p4dp = p4d_offset(pgdp, ea); > + pudp = pud_alloc(&init_mm, p4dp, ea); Could be a single line, without a new var. - pudp = pud_alloc(&init_mm, pgdp, ea); + pudp = pud_alloc(&init_mm, p4d_offset(pgdp, ea), ea); Same kind of comments as already done apply to the rest. Christophe From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-8.1 required=3.0 tests=DKIM_INVALID,DKIM_SIGNED, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY, SPF_HELO_NONE,SPF_PASS,USER_AGENT_SANE_1 autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 5ABAAC7619B for ; Sun, 16 Feb 2020 10:41:18 +0000 (UTC) Received: from mm01.cs.columbia.edu (mm01.cs.columbia.edu [128.59.11.253]) by mail.kernel.org (Postfix) with ESMTP id D43F722522 for ; Sun, 16 Feb 2020 10:41:17 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=fail reason="signature verification failed" (1024-bit key) header.d=c-s.fr header.i=@c-s.fr header.b="v+gbcEDk" DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org D43F722522 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=c-s.fr Authentication-Results: mail.kernel.org; spf=pass smtp.mailfrom=kvmarm-bounces@lists.cs.columbia.edu Received: from localhost (localhost [127.0.0.1]) by mm01.cs.columbia.edu (Postfix) with ESMTP id 373B54AEE0; Sun, 16 Feb 2020 05:41:17 -0500 (EST) X-Virus-Scanned: at lists.cs.columbia.edu Authentication-Results: mm01.cs.columbia.edu (amavisd-new); dkim=softfail (fail, message has been altered) header.i=@c-s.fr Received: from mm01.cs.columbia.edu ([127.0.0.1]) by localhost (mm01.cs.columbia.edu [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id rGiYgPQt0whE; Sun, 16 Feb 2020 05:41:15 -0500 (EST) Received: from mm01.cs.columbia.edu (localhost [127.0.0.1]) by mm01.cs.columbia.edu (Postfix) with ESMTP id 3521E4AEE3; Sun, 16 Feb 2020 05:41:15 -0500 (EST) Received: from localhost (localhost [127.0.0.1]) by mm01.cs.columbia.edu (Postfix) with ESMTP id 40C144AEC6 for ; Sun, 16 Feb 2020 05:41:14 -0500 (EST) X-Virus-Scanned: at lists.cs.columbia.edu Received: from mm01.cs.columbia.edu ([127.0.0.1]) by localhost (mm01.cs.columbia.edu [127.0.0.1]) (amavisd-new, port 10024) with ESMTP id 1pqQMtqqRMPc for ; Sun, 16 Feb 2020 05:41:12 -0500 (EST) Received: from pegase1.c-s.fr (pegase1.c-s.fr [93.17.236.30]) by mm01.cs.columbia.edu (Postfix) with ESMTPS id 077154AEBF for ; Sun, 16 Feb 2020 05:41:12 -0500 (EST) Received: from localhost (mailhub1-int [192.168.12.234]) by localhost (Postfix) with ESMTP id 48L3Yv0sRkz9tyM7; Sun, 16 Feb 2020 11:41:07 +0100 (CET) Authentication-Results: localhost; dkim=pass reason="1024-bit key; insecure key" header.d=c-s.fr header.i=@c-s.fr header.b=v+gbcEDk; dkim-adsp=pass; dkim-atps=neutral X-Virus-Scanned: Debian amavisd-new at c-s.fr Received: from pegase1.c-s.fr ([192.168.12.234]) by localhost (pegase1.c-s.fr [192.168.12.234]) (amavisd-new, port 10024) with ESMTP id WyvC_D0c5SGS; Sun, 16 Feb 2020 11:41:07 +0100 (CET) Received: from messagerie.si.c-s.fr (messagerie.si.c-s.fr [192.168.25.192]) by pegase1.c-s.fr (Postfix) with ESMTP id 48L3Yt6byhz9tyM6; Sun, 16 Feb 2020 11:41:06 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=c-s.fr; s=mail; t=1581849666; bh=uYX8/YhQnPSB2yIUz2Iiabdy4hBMITqWSMKEeNzcsUE=; h=Subject:To:Cc:References:From:Date:In-Reply-To:From; b=v+gbcEDki2kN7vHEJxzn4fWBIL7Q/3I+0FBhUByo9drALwmhlhMysLr6CcMG4Tb/X Z4bTovlfUg5KRdTCIWxMkP3mPM9tSoSf43EJfHHltAPtJWCtCrqKOA8Gx1u5xXKGgL NDEaCjEHzsYa4iDa+yCT8tNN28WzZnaXEMbkJvBY= Received: from localhost (localhost [127.0.0.1]) by messagerie.si.c-s.fr (Postfix) with ESMTP id D90528B784; Sun, 16 Feb 2020 11:41:09 +0100 (CET) X-Virus-Scanned: amavisd-new at c-s.fr Received: from messagerie.si.c-s.fr ([127.0.0.1]) by localhost (messagerie.si.c-s.fr [127.0.0.1]) (amavisd-new, port 10023) with ESMTP id VGVzW0Ckh2uU; Sun, 16 Feb 2020 11:41:09 +0100 (CET) Received: from [192.168.4.90] (unknown [192.168.4.90]) by messagerie.si.c-s.fr (Postfix) with ESMTP id B4E908B755; Sun, 16 Feb 2020 11:41:07 +0100 (CET) Subject: Re: [PATCH v2 07/13] powerpc: add support for folded p4d page tables To: Mike Rapoport , linux-kernel@vger.kernel.org References: <20200216081843.28670-1-rppt@kernel.org> <20200216081843.28670-8-rppt@kernel.org> From: Christophe Leroy Message-ID: Date: Sun, 16 Feb 2020 11:41:07 +0100 User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:68.0) Gecko/20100101 Thunderbird/68.5.0 MIME-Version: 1.0 In-Reply-To: <20200216081843.28670-8-rppt@kernel.org> Content-Language: fr Cc: Rich Felker , linux-ia64@vger.kernel.org, Geert Uytterhoeven , linux-sh@vger.kernel.org, Benjamin Herrenschmidt , linux-mm@kvack.org, Paul Mackerras , linux-hexagon@vger.kernel.org, Will Deacon , kvmarm@lists.cs.columbia.edu, Jonas Bonn , linux-arch@vger.kernel.org, Brian Cain , Marc Zyngier , Russell King , Ley Foon Tan , Mike Rapoport , Catalin Marinas , uclinux-h8-devel@lists.sourceforge.jp, Fenghua Yu , Arnd Bergmann , kvm-ppc@vger.kernel.org, Stefan Kristiansson , openrisc@lists.librecores.org, Stafford Horne , Guan Xuetao , linux-arm-kernel@lists.infradead.org, Tony Luck , Yoshinori Sato , Michael Ellerman , nios2-dev@lists.rocketboards.org, Andrew Morton , linuxppc-dev@lists.ozlabs.org X-BeenThere: kvmarm@lists.cs.columbia.edu X-Mailman-Version: 2.1.14 Precedence: list List-Id: Where KVM/ARM decisions are made List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Content-Transfer-Encoding: base64 Content-Type: text/plain; charset="utf-8"; Format="flowed" Errors-To: kvmarm-bounces@lists.cs.columbia.edu Sender: kvmarm-bounces@lists.cs.columbia.edu CgpMZSAxNi8wMi8yMDIwIMOgIDA5OjE4LCBNaWtlIFJhcG9wb3J0IGEgw6ljcml0wqA6Cj4gRnJv bTogTWlrZSBSYXBvcG9ydCA8cnBwdEBsaW51eC5pYm0uY29tPgo+IAo+IEltcGxlbWVudCBwcmlt aXRpdmVzIG5lY2Vzc2FyeSBmb3IgdGhlIDR0aCBsZXZlbCBmb2xkaW5nLCBhZGQgd2Fsa3Mgb2Yg cDRkCj4gbGV2ZWwgd2hlcmUgYXBwcm9wcmlhdGUgYW5kIHJlcGxhY2UgNWxldmVsLWZpeHVwLmgg d2l0aCBwZ3RhYmxlLW5vcDRkLmguCgpJIGRvbid0IHRoaW5rIGl0IGlzIHdvcnRoIGFkZGluZyBh bGwgdGhpcyBhZGRpdGlvbm5hbHMgd2Fsa3Mgb2YgcDRkLCAKdGhpcyBwYXRjaCBjb3VsZCBiZSBs aW1pdGVkIHRvIGNoYW5nZXMgbGlrZToKCi0JCXB1ZCA9IHB1ZF9vZmZzZXQocGdkLCBncGEpOwor CQlwdWQgPSBwdWRfb2Zmc2V0KHA0ZF9vZmZzZXQocGdkLCBncGEpLCBncGEpOwoKVGhlIGFkZGl0 aW9ubmFsIHdhbGtzIHNob3VsZCBiZSBhZGRlZCB0aHJvdWdoIGFub3RoZXIgcGF0Y2ggdGhlIGRh eSAKcG93ZXJwYyBuZWVkIHRoZW0uCgpTZWUgYmVsb3cgZm9yIG1vcmUgY29tbWVudHMuCgo+IAo+ IFNpZ25lZC1vZmYtYnk6IE1pa2UgUmFwb3BvcnQgPHJwcHRAbGludXguaWJtLmNvbT4KPiBUZXN0 ZWQtYnk6IENocmlzdG9waGUgTGVyb3kgPGNocmlzdG9waGUubGVyb3lAYy1zLmZyPiAjIDh4eCBh bmQgODN4eAo+IC0tLQo+ICAgYXJjaC9wb3dlcnBjL2luY2x1ZGUvYXNtL2Jvb2szcy8zMi9wZ3Rh YmxlLmggIHwgIDEgLQo+ICAgYXJjaC9wb3dlcnBjL2luY2x1ZGUvYXNtL2Jvb2szcy82NC9oYXNo LmggICAgIHwgIDQgKy0KPiAgIGFyY2gvcG93ZXJwYy9pbmNsdWRlL2FzbS9ib29rM3MvNjQvcGdh bGxvYy5oICB8ICA0ICstCj4gICBhcmNoL3Bvd2VycGMvaW5jbHVkZS9hc20vYm9vazNzLzY0L3Bn dGFibGUuaCAgfCA1OCArKysrKysrKysrLS0tLS0tLS0KPiAgIGFyY2gvcG93ZXJwYy9pbmNsdWRl L2FzbS9ib29rM3MvNjQvcmFkaXguaCAgICB8ICA2ICstCj4gICBhcmNoL3Bvd2VycGMvaW5jbHVk ZS9hc20vbm9oYXNoLzMyL3BndGFibGUuaCAgfCAgMSAtCj4gICBhcmNoL3Bvd2VycGMvaW5jbHVk ZS9hc20vbm9oYXNoLzY0L3BnYWxsb2MuaCAgfCAgMiArLQo+ICAgLi4uL2luY2x1ZGUvYXNtL25v aGFzaC82NC9wZ3RhYmxlLTRrLmggICAgICAgIHwgMzIgKysrKystLS0tLQo+ICAgYXJjaC9wb3dl cnBjL2luY2x1ZGUvYXNtL25vaGFzaC82NC9wZ3RhYmxlLmggIHwgIDYgKy0KPiAgIGFyY2gvcG93 ZXJwYy9pbmNsdWRlL2FzbS9wZ3RhYmxlLmggICAgICAgICAgICB8ICA4ICsrKwo+ICAgYXJjaC9w b3dlcnBjL2t2bS9ib29rM3NfNjRfbW11X3JhZGl4LmMgICAgICAgIHwgNTkgKysrKysrKysrKysr KysrKy0tLQo+ICAgYXJjaC9wb3dlcnBjL2xpYi9jb2RlLXBhdGNoaW5nLmMgICAgICAgICAgICAg IHwgIDcgKystCj4gICBhcmNoL3Bvd2VycGMvbW0vYm9vazNzMzIvbW11LmMgICAgICAgICAgICAg ICAgfCAgMiArLQo+ICAgYXJjaC9wb3dlcnBjL21tL2Jvb2szczMyL3RsYi5jICAgICAgICAgICAg ICAgIHwgIDQgKy0KPiAgIGFyY2gvcG93ZXJwYy9tbS9ib29rM3M2NC9oYXNoX3BndGFibGUuYyAg ICAgICB8ICA0ICstCj4gICBhcmNoL3Bvd2VycGMvbW0vYm9vazNzNjQvcmFkaXhfcGd0YWJsZS5j ICAgICAgfCAxOSArKysrLS0KPiAgIGFyY2gvcG93ZXJwYy9tbS9ib29rM3M2NC9zdWJwYWdlX3By b3QuYyAgICAgICB8ICA2ICstCj4gICBhcmNoL3Bvd2VycGMvbW0vaHVnZXRsYnBhZ2UuYyAgICAg ICAgICAgICAgICAgfCAyOCArKysrKy0tLS0KPiAgIGFyY2gvcG93ZXJwYy9tbS9rYXNhbi9rYXNh bl9pbml0XzMyLmMgICAgICAgICB8ICA4ICstLQo+ICAgYXJjaC9wb3dlcnBjL21tL21lbS5jICAg ICAgICAgICAgICAgICAgICAgICAgIHwgIDQgKy0KPiAgIGFyY2gvcG93ZXJwYy9tbS9ub2hhc2gv NDB4LmMgICAgICAgICAgICAgICAgICB8ICA0ICstCj4gICBhcmNoL3Bvd2VycGMvbW0vbm9oYXNo L2Jvb2szZV9wZ3RhYmxlLmMgICAgICAgfCAxNSArKystLQo+ICAgYXJjaC9wb3dlcnBjL21tL3Bn dGFibGUuYyAgICAgICAgICAgICAgICAgICAgIHwgMjUgKysrKysrKy0KPiAgIGFyY2gvcG93ZXJw Yy9tbS9wZ3RhYmxlXzMyLmMgICAgICAgICAgICAgICAgICB8IDI4ICsrKysrLS0tLQo+ICAgYXJj aC9wb3dlcnBjL21tL3BndGFibGVfNjQuYyAgICAgICAgICAgICAgICAgIHwgMTAgKystLQo+ICAg YXJjaC9wb3dlcnBjL21tL3B0ZHVtcC9oYXNocGFnZXRhYmxlLmMgICAgICAgIHwgMjAgKysrKysr LQo+ICAgYXJjaC9wb3dlcnBjL21tL3B0ZHVtcC9wdGR1bXAuYyAgICAgICAgICAgICAgIHwgMjIg KysrKysrLQo+ICAgYXJjaC9wb3dlcnBjL3htb24veG1vbi5jICAgICAgICAgICAgICAgICAgICAg IHwgMTcgKysrKystCj4gICAyOCBmaWxlcyBjaGFuZ2VkLCAyODQgaW5zZXJ0aW9ucygrKSwgMTIw IGRlbGV0aW9ucygtKQo+IAo+IGRpZmYgLS1naXQgYS9hcmNoL3Bvd2VycGMvaW5jbHVkZS9hc20v Ym9vazNzLzMyL3BndGFibGUuaCBiL2FyY2gvcG93ZXJwYy9pbmNsdWRlL2FzbS9ib29rM3MvMzIv cGd0YWJsZS5oCj4gaW5kZXggNWIzOWMxMWU4ODRhLi4zOWVjMTEzNzFiZTAgMTAwNjQ0Cj4gLS0t IGEvYXJjaC9wb3dlcnBjL2luY2x1ZGUvYXNtL2Jvb2szcy8zMi9wZ3RhYmxlLmgKPiArKysgYi9h cmNoL3Bvd2VycGMvaW5jbHVkZS9hc20vYm9vazNzLzMyL3BndGFibGUuaAo+IEBAIC0yLDcgKzIs NiBAQAo+ICAgI2lmbmRlZiBfQVNNX1BPV0VSUENfQk9PSzNTXzMyX1BHVEFCTEVfSAo+ICAgI2Rl ZmluZSBfQVNNX1BPV0VSUENfQk9PSzNTXzMyX1BHVEFCTEVfSAo+ICAgCj4gLSNkZWZpbmUgX19B UkNIX1VTRV81TEVWRUxfSEFDSwo+ICAgI2luY2x1ZGUgPGFzbS1nZW5lcmljL3BndGFibGUtbm9w bWQuaD4KPiAgIAo+ICAgI2luY2x1ZGUgPGFzbS9ib29rM3MvMzIvaGFzaC5oPgo+IGRpZmYgLS1n aXQgYS9hcmNoL3Bvd2VycGMvaW5jbHVkZS9hc20vYm9vazNzLzY0L2hhc2guaCBiL2FyY2gvcG93 ZXJwYy9pbmNsdWRlL2FzbS9ib29rM3MvNjQvaGFzaC5oCj4gaW5kZXggMjc4MWViZjZhZGQ0Li44 NzZkMTUyOGMyY2YgMTAwNjQ0Cj4gLS0tIGEvYXJjaC9wb3dlcnBjL2luY2x1ZGUvYXNtL2Jvb2sz cy82NC9oYXNoLmgKPiArKysgYi9hcmNoL3Bvd2VycGMvaW5jbHVkZS9hc20vYm9vazNzLzY0L2hh c2guaAo+IEBAIC0xMzQsOSArMTM0LDkgQEAgc3RhdGljIGlubGluZSBpbnQgZ2V0X3JlZ2lvbl9p ZCh1bnNpZ25lZCBsb25nIGVhKQo+ICAgCj4gICAjZGVmaW5lCWhhc2hfX3BtZF9iYWQocG1kKQkJ KHBtZF92YWwocG1kKSAmIEhfUE1EX0JBRF9CSVRTKQo+ICAgI2RlZmluZQloYXNoX19wdWRfYmFk KHB1ZCkJCShwdWRfdmFsKHB1ZCkgJiBIX1BVRF9CQURfQklUUykKPiAtc3RhdGljIGlubGluZSBp bnQgaGFzaF9fcGdkX2JhZChwZ2RfdCBwZ2QpCj4gK3N0YXRpYyBpbmxpbmUgaW50IGhhc2hfX3A0 ZF9iYWQocDRkX3QgcDRkKQo+ICAgewo+IC0JcmV0dXJuIChwZ2RfdmFsKHBnZCkgPT0gMCk7Cj4g KwlyZXR1cm4gKHA0ZF92YWwocDRkKSA9PSAwKTsKPiAgIH0KPiAgICNpZmRlZiBDT05GSUdfU1RS SUNUX0tFUk5FTF9SV1gKPiAgIGV4dGVybiB2b2lkIGhhc2hfX21hcmtfcm9kYXRhX3JvKHZvaWQp Owo+IGRpZmYgLS1naXQgYS9hcmNoL3Bvd2VycGMvaW5jbHVkZS9hc20vYm9vazNzLzY0L3BnYWxs b2MuaCBiL2FyY2gvcG93ZXJwYy9pbmNsdWRlL2FzbS9ib29rM3MvNjQvcGdhbGxvYy5oCj4gaW5k ZXggYTQxZTkxYmQwNTgwLi42OWM1YjA1MTczNGYgMTAwNjQ0Cj4gLS0tIGEvYXJjaC9wb3dlcnBj L2luY2x1ZGUvYXNtL2Jvb2szcy82NC9wZ2FsbG9jLmgKPiArKysgYi9hcmNoL3Bvd2VycGMvaW5j bHVkZS9hc20vYm9vazNzLzY0L3BnYWxsb2MuaAo+IEBAIC04NSw5ICs4NSw5IEBAIHN0YXRpYyBp bmxpbmUgdm9pZCBwZ2RfZnJlZShzdHJ1Y3QgbW1fc3RydWN0ICptbSwgcGdkX3QgKnBnZCkKPiAg IAlrbWVtX2NhY2hlX2ZyZWUoUEdUX0NBQ0hFKFBHRF9JTkRFWF9TSVpFKSwgcGdkKTsKPiAgIH0K PiAgIAo+IC1zdGF0aWMgaW5saW5lIHZvaWQgcGdkX3BvcHVsYXRlKHN0cnVjdCBtbV9zdHJ1Y3Qg Km1tLCBwZ2RfdCAqcGdkLCBwdWRfdCAqcHVkKQo+ICtzdGF0aWMgaW5saW5lIHZvaWQgcDRkX3Bv cHVsYXRlKHN0cnVjdCBtbV9zdHJ1Y3QgKm1tLCBwNGRfdCAqcGdkLCBwdWRfdCAqcHVkKQo+ICAg ewo+IC0JKnBnZCA9ICBfX3BnZChfX3BndGFibGVfcHRyX3ZhbChwdWQpIHwgUEdEX1ZBTF9CSVRT KTsKPiArCSpwZ2QgPSAgX19wNGQoX19wZ3RhYmxlX3B0cl92YWwocHVkKSB8IFBHRF9WQUxfQklU Uyk7Cj4gICB9Cj4gICAKPiAgIHN0YXRpYyBpbmxpbmUgcHVkX3QgKnB1ZF9hbGxvY19vbmUoc3Ry dWN0IG1tX3N0cnVjdCAqbW0sIHVuc2lnbmVkIGxvbmcgYWRkcikKPiBkaWZmIC0tZ2l0IGEvYXJj aC9wb3dlcnBjL2luY2x1ZGUvYXNtL2Jvb2szcy82NC9wZ3RhYmxlLmggYi9hcmNoL3Bvd2VycGMv aW5jbHVkZS9hc20vYm9vazNzLzY0L3BndGFibGUuaAo+IGluZGV4IDIwMWE2OWU2YTM1NS4uZGRk ZGJhZmZmMGFiIDEwMDY0NAo+IC0tLSBhL2FyY2gvcG93ZXJwYy9pbmNsdWRlL2FzbS9ib29rM3Mv NjQvcGd0YWJsZS5oCj4gKysrIGIvYXJjaC9wb3dlcnBjL2luY2x1ZGUvYXNtL2Jvb2szcy82NC9w Z3RhYmxlLmgKPiBAQCAtMiw3ICsyLDcgQEAKPiAgICNpZm5kZWYgX0FTTV9QT1dFUlBDX0JPT0sz U182NF9QR1RBQkxFX0hfCj4gICAjZGVmaW5lIF9BU01fUE9XRVJQQ19CT09LM1NfNjRfUEdUQUJM RV9IXwo+ICAgCj4gLSNpbmNsdWRlIDxhc20tZ2VuZXJpYy81bGV2ZWwtZml4dXAuaD4KPiArI2lu Y2x1ZGUgPGFzbS1nZW5lcmljL3BndGFibGUtbm9wNGQuaD4KPiAgIAo+ICAgI2lmbmRlZiBfX0FT U0VNQkxZX18KPiAgICNpbmNsdWRlIDxsaW51eC9tbWRlYnVnLmg+Cj4gQEAgLTI1MSw3ICsyNTEs NyBAQCBleHRlcm4gdW5zaWduZWQgbG9uZyBfX3BtZF9mcmFnX3NpemVfc2hpZnQ7Cj4gICAvKiBC aXRzIHRvIG1hc2sgb3V0IGZyb20gYSBQVUQgdG8gZ2V0IHRvIHRoZSBQTUQgcGFnZSAqLwo+ICAg I2RlZmluZSBQVURfTUFTS0VEX0JJVFMJCTB4YzAwMDAwMDAwMDAwMDBmZlVMCj4gICAvKiBCaXRz IHRvIG1hc2sgb3V0IGZyb20gYSBQR0QgdG8gZ2V0IHRvIHRoZSBQVUQgcGFnZSAqLwo+IC0jZGVm aW5lIFBHRF9NQVNLRURfQklUUwkJMHhjMDAwMDAwMDAwMDAwMGZmVUwKPiArI2RlZmluZSBQNERf TUFTS0VEX0JJVFMJCTB4YzAwMDAwMDAwMDAwMDBmZlVMCj4gICAKPiAgIC8qCj4gICAgKiBVc2Vk IGFzIGFuIGluZGljYXRvciBmb3IgcmN1IGNhbGxiYWNrIGZ1bmN0aW9ucwo+IEBAIC05NDksNTQg Kzk0OSw2MCBAQCBzdGF0aWMgaW5saW5lIGJvb2wgcHVkX2FjY2Vzc19wZXJtaXR0ZWQocHVkX3Qg cHVkLCBib29sIHdyaXRlKQo+ICAgCXJldHVybiBwdGVfYWNjZXNzX3Blcm1pdHRlZChwdWRfcHRl KHB1ZCksIHdyaXRlKTsKPiAgIH0KPiAgIAo+IC0jZGVmaW5lIHBnZF93cml0ZShwZ2QpCQlwdGVf d3JpdGUocGdkX3B0ZShwZ2QpKQo+ICsjZGVmaW5lIF9fcDRkX3Jhdyh4KQkoKHA0ZF90KSB7IF9f cGdkX3Jhdyh4KSB9KQo+ICtzdGF0aWMgaW5saW5lIF9fYmU2NCBwNGRfcmF3KHA0ZF90IHgpCj4g K3sKPiArCXJldHVybiBwZ2RfcmF3KHgucGdkKTsKPiArfQo+ICsKClNob3VsZG4ndCB0aGlzIGJl IGRlZmluZWQgaW4gYXNtL3BndGFibGUtYmUtdHlwZXMuaCwganVzdCBsaWtlIG90aGVyIApfX3B4 eF9yYXcoKSA/Cgo+ICsjZGVmaW5lIHA0ZF93cml0ZShwNGQpCQlwdGVfd3JpdGUocDRkX3B0ZShw NGQpKQo+ICAgCj4gLXN0YXRpYyBpbmxpbmUgdm9pZCBwZ2RfY2xlYXIocGdkX3QgKnBnZHApCj4g K3N0YXRpYyBpbmxpbmUgdm9pZCBwNGRfY2xlYXIocDRkX3QgKnA0ZHApCj4gICB7Cj4gLQkqcGdk cCA9IF9fcGdkKDApOwo+ICsJKnA0ZHAgPSBfX3A0ZCgwKTsKPiAgIH0KPiAgIAo+IC1zdGF0aWMg aW5saW5lIGludCBwZ2Rfbm9uZShwZ2RfdCBwZ2QpCj4gK3N0YXRpYyBpbmxpbmUgaW50IHA0ZF9u b25lKHA0ZF90IHA0ZCkKPiAgIHsKPiAtCXJldHVybiAhcGdkX3JhdyhwZ2QpOwo+ICsJcmV0dXJu ICFwNGRfcmF3KHA0ZCk7Cj4gICB9Cj4gICAKPiAtc3RhdGljIGlubGluZSBpbnQgcGdkX3ByZXNl bnQocGdkX3QgcGdkKQo+ICtzdGF0aWMgaW5saW5lIGludCBwNGRfcHJlc2VudChwNGRfdCBwNGQp Cj4gICB7Cj4gLQlyZXR1cm4gISEocGdkX3JhdyhwZ2QpICYgY3B1X3RvX2JlNjQoX1BBR0VfUFJF U0VOVCkpOwo+ICsJcmV0dXJuICEhKHA0ZF9yYXcocDRkKSAmIGNwdV90b19iZTY0KF9QQUdFX1BS RVNFTlQpKTsKPiAgIH0KPiAgIAo+IC1zdGF0aWMgaW5saW5lIHB0ZV90IHBnZF9wdGUocGdkX3Qg cGdkKQo+ICtzdGF0aWMgaW5saW5lIHB0ZV90IHA0ZF9wdGUocDRkX3QgcDRkKQo+ICAgewo+IC0J cmV0dXJuIF9fcHRlX3JhdyhwZ2RfcmF3KHBnZCkpOwo+ICsJcmV0dXJuIF9fcHRlX3JhdyhwNGRf cmF3KHA0ZCkpOwo+ICAgfQo+ICAgCj4gLXN0YXRpYyBpbmxpbmUgcGdkX3QgcHRlX3BnZChwdGVf dCBwdGUpCj4gK3N0YXRpYyBpbmxpbmUgcDRkX3QgcHRlX3A0ZChwdGVfdCBwdGUpCj4gICB7Cj4g LQlyZXR1cm4gX19wZ2RfcmF3KHB0ZV9yYXcocHRlKSk7Cj4gKwlyZXR1cm4gX19wNGRfcmF3KHB0 ZV9yYXcocHRlKSk7Cj4gICB9Cj4gICAKPiAtc3RhdGljIGlubGluZSBpbnQgcGdkX2JhZChwZ2Rf dCBwZ2QpCj4gK3N0YXRpYyBpbmxpbmUgaW50IHA0ZF9iYWQocDRkX3QgcDRkKQo+ICAgewo+ICAg CWlmIChyYWRpeF9lbmFibGVkKCkpCj4gLQkJcmV0dXJuIHJhZGl4X19wZ2RfYmFkKHBnZCk7Cj4g LQlyZXR1cm4gaGFzaF9fcGdkX2JhZChwZ2QpOwo+ICsJCXJldHVybiByYWRpeF9fcDRkX2JhZChw NGQpOwo+ICsJcmV0dXJuIGhhc2hfX3A0ZF9iYWQocDRkKTsKPiAgIH0KPiAgIAo+IC0jZGVmaW5l IHBnZF9hY2Nlc3NfcGVybWl0dGVkIHBnZF9hY2Nlc3NfcGVybWl0dGVkCj4gLXN0YXRpYyBpbmxp bmUgYm9vbCBwZ2RfYWNjZXNzX3Blcm1pdHRlZChwZ2RfdCBwZ2QsIGJvb2wgd3JpdGUpCj4gKyNk ZWZpbmUgcDRkX2FjY2Vzc19wZXJtaXR0ZWQgcDRkX2FjY2Vzc19wZXJtaXR0ZWQKPiArc3RhdGlj IGlubGluZSBib29sIHA0ZF9hY2Nlc3NfcGVybWl0dGVkKHA0ZF90IHA0ZCwgYm9vbCB3cml0ZSkK PiAgIHsKPiAtCXJldHVybiBwdGVfYWNjZXNzX3Blcm1pdHRlZChwZ2RfcHRlKHBnZCksIHdyaXRl KTsKPiArCXJldHVybiBwdGVfYWNjZXNzX3Blcm1pdHRlZChwNGRfcHRlKHA0ZCksIHdyaXRlKTsK PiAgIH0KPiAgIAo+IC1leHRlcm4gc3RydWN0IHBhZ2UgKnBnZF9wYWdlKHBnZF90IHBnZCk7Cj4g K2V4dGVybiBzdHJ1Y3QgcGFnZSAqcDRkX3BhZ2UocDRkX3QgcDRkKTsKPiAgIAo+ICAgLyogUG9p bnRlcnMgaW4gdGhlIHBhZ2UgdGFibGUgdHJlZSBhcmUgcGh5c2ljYWwgYWRkcmVzc2VzICovCj4g ICAjZGVmaW5lIF9fcGd0YWJsZV9wdHJfdmFsKHB0cikJX19wYShwdHIpCj4gICAKPiAgICNkZWZp bmUgcG1kX3BhZ2VfdmFkZHIocG1kKQlfX3ZhKHBtZF92YWwocG1kKSAmIH5QTURfTUFTS0VEX0JJ VFMpCj4gICAjZGVmaW5lIHB1ZF9wYWdlX3ZhZGRyKHB1ZCkJX192YShwdWRfdmFsKHB1ZCkgJiB+ UFVEX01BU0tFRF9CSVRTKQo+IC0jZGVmaW5lIHBnZF9wYWdlX3ZhZGRyKHBnZCkJX192YShwZ2Rf dmFsKHBnZCkgJiB+UEdEX01BU0tFRF9CSVRTKQo+ICsjZGVmaW5lIHA0ZF9wYWdlX3ZhZGRyKHA0 ZCkJX192YShwNGRfdmFsKHA0ZCkgJiB+UDREX01BU0tFRF9CSVRTKQo+ICAgCj4gICAjZGVmaW5l IHBnZF9pbmRleChhZGRyZXNzKSAoKChhZGRyZXNzKSA+PiAoUEdESVJfU0hJRlQpKSAmIChQVFJT X1BFUl9QR0QgLSAxKSkKPiAgICNkZWZpbmUgcHVkX2luZGV4KGFkZHJlc3MpICgoKGFkZHJlc3Mp ID4+IChQVURfU0hJRlQpKSAmIChQVFJTX1BFUl9QVUQgLSAxKSkKPiBAQCAtMTAxMCw4ICsxMDE2 LDggQEAgZXh0ZXJuIHN0cnVjdCBwYWdlICpwZ2RfcGFnZShwZ2RfdCBwZ2QpOwo+ICAgCj4gICAj ZGVmaW5lIHBnZF9vZmZzZXQobW0sIGFkZHJlc3MpCSAoKG1tKS0+cGdkICsgcGdkX2luZGV4KGFk ZHJlc3MpKQo+ICAgCj4gLSNkZWZpbmUgcHVkX29mZnNldChwZ2RwLCBhZGRyKQlcCj4gLQkoKChw dWRfdCAqKSBwZ2RfcGFnZV92YWRkcigqKHBnZHApKSkgKyBwdWRfaW5kZXgoYWRkcikpCj4gKyNk ZWZpbmUgcHVkX29mZnNldChwNGRwLCBhZGRyKQlcCj4gKwkoKChwdWRfdCAqKSBwNGRfcGFnZV92 YWRkcigqKHA0ZHApKSkgKyBwdWRfaW5kZXgoYWRkcikpCj4gICAjZGVmaW5lIHBtZF9vZmZzZXQo cHVkcCxhZGRyKSBcCj4gICAJKCgocG1kX3QgKikgcHVkX3BhZ2VfdmFkZHIoKihwdWRwKSkpICsg cG1kX2luZGV4KGFkZHIpKQo+ICAgI2RlZmluZSBwdGVfb2Zmc2V0X2tlcm5lbChkaXIsYWRkcikg XAo+IEBAIC0xMzY4LDYgKzEzNzQsMTIgQEAgc3RhdGljIGlubGluZSBib29sIHB1ZF9pc19sZWFm KHB1ZF90IHB1ZCkKPiAgIAlyZXR1cm4gISEocHVkX3JhdyhwdWQpICYgY3B1X3RvX2JlNjQoX1BB R0VfUFRFKSk7Cj4gICB9Cj4gICAKPiArI2RlZmluZSBwNGRfaXNfbGVhZiBwNGRfaXNfbGVhZgo+ ICtzdGF0aWMgaW5saW5lIGJvb2wgcDRkX2lzX2xlYWYocDRkX3QgcDRkKQo+ICt7Cj4gKwlyZXR1 cm4gISEocDRkX3JhdyhwNGQpICYgY3B1X3RvX2JlNjQoX1BBR0VfUFRFKSk7Cj4gK30KPiArCj4g ICAjZGVmaW5lIHBnZF9pc19sZWFmIHBnZF9pc19sZWFmCj4gICAjZGVmaW5lIHBnZF9sZWFmIHBn ZF9pc19sZWFmCj4gICBzdGF0aWMgaW5saW5lIGJvb2wgcGdkX2lzX2xlYWYocGdkX3QgcGdkKQoK Wy4uLl0KCj4gZGlmZiAtLWdpdCBhL2FyY2gvcG93ZXJwYy9pbmNsdWRlL2FzbS9wZ3RhYmxlLmgg Yi9hcmNoL3Bvd2VycGMvaW5jbHVkZS9hc20vcGd0YWJsZS5oCj4gaW5kZXggOGNjNTQzZWQxMTRj Li4wYTA1ZmRkZDc4ODEgMTAwNjQ0Cj4gLS0tIGEvYXJjaC9wb3dlcnBjL2luY2x1ZGUvYXNtL3Bn dGFibGUuaAo+ICsrKyBiL2FyY2gvcG93ZXJwYy9pbmNsdWRlL2FzbS9wZ3RhYmxlLmgKPiBAQCAt MTM5LDYgKzEzOSwxNCBAQCBzdGF0aWMgaW5saW5lIGJvb2wgcHVkX2lzX2xlYWYocHVkX3QgcHVk KQo+ICAgfQo+ICAgI2VuZGlmCj4gICAKPiArI2lmbmRlZiBwNGRfaXNfbGVhZgo+ICsjZGVmaW5l IHA0ZF9pc19sZWFmIHA0ZF9pc19sZWFmCj4gK3N0YXRpYyBpbmxpbmUgYm9vbCBwNGRfaXNfbGVh ZihwNGRfdCBwNGQpCj4gK3sKPiArCXJldHVybiBmYWxzZTsKPiArfQo+ICsjZW5kaWYKPiArCj4g ICAjaWZuZGVmIHBnZF9pc19sZWFmCj4gICAjZGVmaW5lIHBnZF9pc19sZWFmIHBnZF9pc19sZWFm Cj4gICBzdGF0aWMgaW5saW5lIGJvb2wgcGdkX2lzX2xlYWYocGdkX3QgcGdkKQo+IGRpZmYgLS1n aXQgYS9hcmNoL3Bvd2VycGMva3ZtL2Jvb2szc182NF9tbXVfcmFkaXguYyBiL2FyY2gvcG93ZXJw Yy9rdm0vYm9vazNzXzY0X21tdV9yYWRpeC5jCj4gaW5kZXggODAzOTQwZDc5YjczLi41YWFjZmEw YjI3ZWYgMTAwNjQ0Cj4gLS0tIGEvYXJjaC9wb3dlcnBjL2t2bS9ib29rM3NfNjRfbW11X3JhZGl4 LmMKPiArKysgYi9hcmNoL3Bvd2VycGMva3ZtL2Jvb2szc182NF9tbXVfcmFkaXguYwo+IEBAIC00 OTQsMTcgKzQ5NCwzOSBAQCBzdGF0aWMgdm9pZCBrdm1wcGNfdW5tYXBfZnJlZV9wdWQoc3RydWN0 IGt2bSAqa3ZtLCBwdWRfdCAqcHVkLAo+ICAgCXB1ZF9mcmVlKGt2bS0+bW0sIHB1ZCk7Cj4gICB9 Cj4gICAKPiArc3RhdGljIHZvaWQga3ZtcHBjX3VubWFwX2ZyZWVfcDRkKHN0cnVjdCBrdm0gKmt2 bSwgcDRkX3QgKnA0ZCwKPiArCQkJCSAgdW5zaWduZWQgaW50IGxwaWQpCj4gK3sKPiArCXVuc2ln bmVkIGxvbmcgaXU7Cj4gKwlwNGRfdCAqcCA9IHA0ZDsKPiArCj4gKwlmb3IgKGl1ID0gMDsgaXUg PCBQVFJTX1BFUl9QNEQ7ICsraXUsICsrcCkgewo+ICsJCWlmICghcDRkX3ByZXNlbnQoKnApKQo+ ICsJCQljb250aW51ZTsKPiArCQlpZiAocDRkX2lzX2xlYWYoKnApKSB7Cj4gKwkJCXA0ZF9jbGVh cihwKTsKPiArCQl9IGVsc2Ugewo+ICsJCQlwdWRfdCAqcHVkOwo+ICsKPiArCQkJcHVkID0gcHVk X29mZnNldChwLCAwKTsKPiArCQkJa3ZtcHBjX3VubWFwX2ZyZWVfcHVkKGt2bSwgcHVkLCBscGlk KTsKPiArCQkJcDRkX2NsZWFyKHApOwo+ICsJCX0KPiArCX0KPiArCXA0ZF9mcmVlKGt2bS0+bW0s IHA0ZCk7Cj4gK30KPiArCj4gICB2b2lkIGt2bXBwY19mcmVlX3BndGFibGVfcmFkaXgoc3RydWN0 IGt2bSAqa3ZtLCBwZ2RfdCAqcGdkLCB1bnNpZ25lZCBpbnQgbHBpZCkKPiAgIHsKPiAgIAl1bnNp Z25lZCBsb25nIGlnOwo+ICAgCj4gICAJZm9yIChpZyA9IDA7IGlnIDwgUFRSU19QRVJfUEdEOyAr K2lnLCArK3BnZCkgewo+IC0JCXB1ZF90ICpwdWQ7Cj4gKwkJcDRkX3QgKnA0ZDsKPiAgIAo+ICAg CQlpZiAoIXBnZF9wcmVzZW50KCpwZ2QpKQo+ICAgCQkJY29udGludWU7Cj4gLQkJcHVkID0gcHVk X29mZnNldChwZ2QsIDApOwo+IC0JCWt2bXBwY191bm1hcF9mcmVlX3B1ZChrdm0sIHB1ZCwgbHBp ZCk7Cj4gKwkJcDRkID0gcDRkX29mZnNldChwZ2QsIDApOwo+ICsJCWt2bXBwY191bm1hcF9mcmVl X3A0ZChrdm0sIHA0ZCwgbHBpZCk7Cj4gICAJCXBnZF9jbGVhcihwZ2QpOwo+ICAgCX0KPiAgIH0K PiBAQCAtNTY2LDYgKzU4OCw3IEBAIGludCBrdm1wcGNfY3JlYXRlX3B0ZShzdHJ1Y3Qga3ZtICpr dm0sIHBnZF90ICpwZ3RhYmxlLCBwdGVfdCBwdGUsCj4gICAJCSAgICAgIHVuc2lnbmVkIGxvbmcg KnJtYXBwLCBzdHJ1Y3Qgcm1hcF9uZXN0ZWQgKipuX3JtYXApCj4gICB7Cj4gICAJcGdkX3QgKnBn ZDsKPiArCXA0ZF90ICpwNGQsICpuZXdfcDRkID0gTlVMTDsKPiAgIAlwdWRfdCAqcHVkLCAqbmV3 X3B1ZCA9IE5VTEw7Cj4gICAJcG1kX3QgKnBtZCwgKm5ld19wbWQgPSBOVUxMOwo+ICAgCXB0ZV90 ICpwdGVwLCAqbmV3X3B0ZXAgPSBOVUxMOwo+IEBAIC01NzMsOSArNTk2LDE1IEBAIGludCBrdm1w cGNfY3JlYXRlX3B0ZShzdHJ1Y3Qga3ZtICprdm0sIHBnZF90ICpwZ3RhYmxlLCBwdGVfdCBwdGUs Cj4gICAKPiAgIAkvKiBUcmF2ZXJzZSB0aGUgZ3Vlc3QncyAybmQtbGV2ZWwgdHJlZSwgYWxsb2Nh dGUgbmV3IGxldmVscyBuZWVkZWQgKi8KPiAgIAlwZ2QgPSBwZ3RhYmxlICsgcGdkX2luZGV4KGdw YSk7Cj4gLQlwdWQgPSBOVUxMOwo+ICsJcDRkID0gTlVMTDsKPiAgIAlpZiAocGdkX3ByZXNlbnQo KnBnZCkpCj4gLQkJcHVkID0gcHVkX29mZnNldChwZ2QsIGdwYSk7Cj4gKwkJcDRkID0gcDRkX29m ZnNldChwZ2QsIGdwYSk7Cj4gKwllbHNlCj4gKwkJbmV3X3A0ZCA9IHA0ZF9hbGxvY19vbmUoa3Zt LT5tbSwgZ3BhKTsKPiArCj4gKwlwdWQgPSBOVUxMOwo+ICsJaWYgKHA0ZF9wcmVzZW50KCpwNGQp KQo+ICsJCXB1ZCA9IHB1ZF9vZmZzZXQocDRkLCBncGEpOwoKSXMgaXQgd29ydGggYWRkaW5nIGFs bCB0aGlzIG5ldyBjb2RlID8KCk15IHVuZGVyc3RhbmRpbmcgaXMgdGhhdCB0aGUgc2VyaWVzIG9i amVjdGl2ZSBpcyB0byBnZXQgcmlkIG9mIApfX0FSQ0hfSEFTXzVMRVZFTF9IQUNLLCB0byB0byBh ZGQgc3VwcG9ydCBmb3IgNSBsZXZlbHMgdG8gYW4gCmFyY2hpdGVjdHVyZSB0aGF0IG5vdCBuZWVk IGl0IChhdCBsZWFzdCBmb3Igbm93KS4KSWYgd2Ugd2FudCB0byBhZGQgc3VwcG9ydCBmb3IgNSBs ZXZlbHMsIGl0IGNhbiBiZSBkb25lIGxhdGVyIGluIGFub3RoZXIgCnBhdGNoLgoKSGVyZSBJIHRo aW5rIHlvdXIgY2hhbmdlIGNvdWxkIGJlIGxpbWl0ZWQgdG86CgotCQlwdWQgPSBwdWRfb2Zmc2V0 KHBnZCwgZ3BhKTsKKwkJcHVkID0gcHVkX29mZnNldChwNGRfb2Zmc2V0KHBnZCwgZ3BhKSwgZ3Bh KTsKCgo+ICAgCWVsc2UKPiAgIAkJbmV3X3B1ZCA9IHB1ZF9hbGxvY19vbmUoa3ZtLT5tbSwgZ3Bh KTsKPiAgIAo+IEBAIC01OTcsMTIgKzYyNiwxOCBAQCBpbnQga3ZtcHBjX2NyZWF0ZV9wdGUoc3Ry dWN0IGt2bSAqa3ZtLCBwZ2RfdCAqcGd0YWJsZSwgcHRlX3QgcHRlLAo+ICAgCS8qIE5vdyB0cmF2 ZXJzZSBhZ2FpbiB1bmRlciB0aGUgbG9jayBhbmQgY2hhbmdlIHRoZSB0cmVlICovCj4gICAJcmV0 ID0gLUVOT01FTTsKPiAgIAlpZiAocGdkX25vbmUoKnBnZCkpIHsKPiArCQlpZiAoIW5ld19wNGQp Cj4gKwkJCWdvdG8gb3V0X3VubG9jazsKPiArCQlwZ2RfcG9wdWxhdGUoa3ZtLT5tbSwgcGdkLCBu ZXdfcDRkKTsKPiArCQluZXdfcDRkID0gTlVMTDsKPiArCX0KPiArCWlmIChwNGRfbm9uZSgqcDRk KSkgewo+ICAgCQlpZiAoIW5ld19wdWQpCj4gICAJCQlnb3RvIG91dF91bmxvY2s7Cj4gLQkJcGdk X3BvcHVsYXRlKGt2bS0+bW0sIHBnZCwgbmV3X3B1ZCk7Cj4gKwkJcDRkX3BvcHVsYXRlKGt2bS0+ bW0sIHA0ZCwgbmV3X3B1ZCk7Cj4gICAJCW5ld19wdWQgPSBOVUxMOwo+ICAgCX0KPiAtCXB1ZCA9 IHB1ZF9vZmZzZXQocGdkLCBncGEpOwo+ICsJcHVkID0gcHVkX29mZnNldChwNGQsIGdwYSk7Cj4g ICAJaWYgKHB1ZF9pc19sZWFmKCpwdWQpKSB7Cj4gICAJCXVuc2lnbmVkIGxvbmcgaGdwYSA9IGdw YSAmIFBVRF9NQVNLOwo+ICAgCj4gQEAgLTEyMjAsNiArMTI1NSw3IEBAIHN0YXRpYyBzc2l6ZV90 IGRlYnVnZnNfcmFkaXhfcmVhZChzdHJ1Y3QgZmlsZSAqZmlsZSwgY2hhciBfX3VzZXIgKmJ1ZiwK PiAgIAlwZ2RfdCAqcGd0Owo+ICAgCXN0cnVjdCBrdm1fbmVzdGVkX2d1ZXN0ICpuZXN0ZWQ7Cj4g ICAJcGdkX3QgcGdkLCAqcGdkcDsKPiArCXA0ZF90IHA0ZCwgKnA0ZHA7Cj4gICAJcHVkX3QgcHVk LCAqcHVkcDsKPiAgIAlwbWRfdCBwbWQsICpwbWRwOwo+ICAgCXB0ZV90ICpwdGVwOwo+IEBAIC0x Mjk4LDcgKzEzMzQsMTQgQEAgc3RhdGljIHNzaXplX3QgZGVidWdmc19yYWRpeF9yZWFkKHN0cnVj dCBmaWxlICpmaWxlLCBjaGFyIF9fdXNlciAqYnVmLAo+ICAgCQkJY29udGludWU7Cj4gICAJCX0K PiAgIAo+IC0JCXB1ZHAgPSBwdWRfb2Zmc2V0KCZwZ2QsIGdwYSk7Cj4gKwkJcDRkcCA9IHA0ZF9v ZmZzZXQoJnBnZCwgZ3BhKTsKPiArCQlwNGQgPSBSRUFEX09OQ0UoKnA0ZHApOwo+ICsJCWlmICgh KHA0ZF92YWwocDRkKSAmIF9QQUdFX1BSRVNFTlQpKSB7Cj4gKwkJCWdwYSA9IChncGEgJiBQNERf TUFTSykgKyBQNERfU0laRTsKPiArCQkJY29udGludWU7Cj4gKwkJfQo+ICsKPiArCQlwdWRwID0g cHVkX29mZnNldCgmcDRkLCBncGEpOwoKU2FtZSwgaGVyZSB5b3UgYXJlIGZvcmNpbmcgYSB1c2Vs ZXNzIHJlYWQgd2l0aCBSRUFEX09OQ0UoKS4KCllvdXIgY2hhbmdlIGNvdWxkIGJlIGxpbWl0ZWQg dG8KCi0JCXB1ZHAgPSBwdWRfb2Zmc2V0KCZwZ2QsIGdwYSk7CisJCXB1ZHAgPSBwdWRfb2Zmc2V0 KHA0ZF9vZmZzZXQoJnBnZCwgZ3BhKSwgZ3BhKTsKClRoaXMgY29tbWVudCBhcHBsaWVzIHRvIG1h bnkgb3RoZXIgcGxhY2VzLgoKCj4gICAJCXB1ZCA9IFJFQURfT05DRSgqcHVkcCk7Cj4gICAJCWlm ICghKHB1ZF92YWwocHVkKSAmIF9QQUdFX1BSRVNFTlQpKSB7Cj4gICAJCQlncGEgPSAoZ3BhICYg UFVEX01BU0spICsgUFVEX1NJWkU7Cj4gZGlmZiAtLWdpdCBhL2FyY2gvcG93ZXJwYy9saWIvY29k ZS1wYXRjaGluZy5jIGIvYXJjaC9wb3dlcnBjL2xpYi9jb2RlLXBhdGNoaW5nLmMKPiBpbmRleCAz MzQ1ZjAzOWE4NzYuLjdhNTlmNjg2M2NlYyAxMDA2NDQKPiAtLS0gYS9hcmNoL3Bvd2VycGMvbGli L2NvZGUtcGF0Y2hpbmcuYwo+ICsrKyBiL2FyY2gvcG93ZXJwYy9saWIvY29kZS1wYXRjaGluZy5j Cj4gQEAgLTEwNywxMyArMTA3LDE4IEBAIHN0YXRpYyBpbmxpbmUgaW50IHVubWFwX3BhdGNoX2Fy ZWEodW5zaWduZWQgbG9uZyBhZGRyKQo+ICAgCXB0ZV90ICpwdGVwOwo+ICAgCXBtZF90ICpwbWRw Owo+ICAgCXB1ZF90ICpwdWRwOwo+ICsJcDRkX3QgKnA0ZHA7Cj4gICAJcGdkX3QgKnBnZHA7Cj4g ICAKPiAgIAlwZ2RwID0gcGdkX29mZnNldF9rKGFkZHIpOwo+ICAgCWlmICh1bmxpa2VseSghcGdk cCkpCj4gICAJCXJldHVybiAtRUlOVkFMOwo+ICAgCj4gLQlwdWRwID0gcHVkX29mZnNldChwZ2Rw LCBhZGRyKTsKPiArCXA0ZHAgPSBwNGRfb2Zmc2V0KHBnZHAsIGFkZHIpOwo+ICsJaWYgKHVubGlr ZWx5KCFwNGRwKSkKPiArCQlyZXR1cm4gLUVJTlZBTDsKPiArCj4gKwlwdWRwID0gcHVkX29mZnNl dChwNGRwLCBhZGRyKTsKPiAgIAlpZiAodW5saWtlbHkoIXB1ZHApKQo+ICAgCQlyZXR1cm4gLUVJ TlZBTDsKPiAgIAo+IGRpZmYgLS1naXQgYS9hcmNoL3Bvd2VycGMvbW0vYm9vazNzMzIvbW11LmMg Yi9hcmNoL3Bvd2VycGMvbW0vYm9vazNzMzIvbW11LmMKPiBpbmRleCAwYTFjNjVhMmM1NjUuLmIy ZmMzZTcxMTY1YyAxMDA2NDQKPiAtLS0gYS9hcmNoL3Bvd2VycGMvbW0vYm9vazNzMzIvbW11LmMK PiArKysgYi9hcmNoL3Bvd2VycGMvbW0vYm9vazNzMzIvbW11LmMKPiBAQCAtMzEyLDcgKzMxMiw3 IEBAIHZvaWQgaGFzaF9wcmVsb2FkKHN0cnVjdCBtbV9zdHJ1Y3QgKm1tLCB1bnNpZ25lZCBsb25n IGVhKQo+ICAgCj4gICAJaWYgKCFIYXNoKQo+ICAgCQlyZXR1cm47Cj4gLQlwbWQgPSBwbWRfb2Zm c2V0KHB1ZF9vZmZzZXQocGdkX29mZnNldChtbSwgZWEpLCBlYSksIGVhKTsKPiArCXBtZCA9IHBt ZF9vZmZzZXQocHVkX29mZnNldChwNGRfb2Zmc2V0KHBnZF9vZmZzZXQobW0sIGVhKSwgZWEpLCBl YSksIGVhKTsKCklmIHdlIGNvbnRpbnVlIGxpa2UgdGhpcywgaW4gdGVuIHllYXJzIHRoaXMgbGlr ZSBpcyBnb2luZyB0byBiZSBtYW55IApraWxvbWV0ZXJzIGxvbmcuCgpJIHRoaW5rIHRoZSBhYm92 ZSB3b3VsZCBiZSB3b3J0aCBhIGdlbmVyaWMgaGVscGVyLgoKPiAgIAlpZiAoIXBtZF9ub25lKCpw bWQpKQo+ICAgCQlhZGRfaGFzaF9wYWdlKG1tLT5jb250ZXh0LmlkLCBlYSwgcG1kX3ZhbCgqcG1k KSk7Cj4gICB9Cj4gZGlmZiAtLWdpdCBhL2FyY2gvcG93ZXJwYy9tbS9ib29rM3MzMi90bGIuYyBi L2FyY2gvcG93ZXJwYy9tbS9ib29rM3MzMi90bGIuYwo+IGluZGV4IDJmY2QzMjEwNDBmZi4uMTc1 YmMzM2I0MWI3IDEwMDY0NAo+IC0tLSBhL2FyY2gvcG93ZXJwYy9tbS9ib29rM3MzMi90bGIuYwo+ ICsrKyBiL2FyY2gvcG93ZXJwYy9tbS9ib29rM3MzMi90bGIuYwo+IEBAIC04Nyw3ICs4Nyw3IEBA IHN0YXRpYyB2b2lkIGZsdXNoX3JhbmdlKHN0cnVjdCBtbV9zdHJ1Y3QgKm1tLCB1bnNpZ25lZCBs b25nIHN0YXJ0LAo+ICAgCWlmIChzdGFydCA+PSBlbmQpCj4gICAJCXJldHVybjsKPiAgIAllbmQg PSAoZW5kIC0gMSkgfCB+UEFHRV9NQVNLOwo+IC0JcG1kID0gcG1kX29mZnNldChwdWRfb2Zmc2V0 KHBnZF9vZmZzZXQobW0sIHN0YXJ0KSwgc3RhcnQpLCBzdGFydCk7Cj4gKwlwbWQgPSBwbWRfb2Zm c2V0KHB1ZF9vZmZzZXQocDRkX29mZnNldChwZ2Rfb2Zmc2V0KG1tLCBzdGFydCksIHN0YXJ0KSwg c3RhcnQpLCBzdGFydCk7Cj4gICAJZm9yICg7Oykgewo+ICAgCQlwbWRfZW5kID0gKChzdGFydCAr IFBHRElSX1NJWkUpICYgUEdESVJfTUFTSykgLSAxOwo+ICAgCQlpZiAocG1kX2VuZCA+IGVuZCkK PiBAQCAtMTQ1LDcgKzE0NSw3IEBAIHZvaWQgZmx1c2hfdGxiX3BhZ2Uoc3RydWN0IHZtX2FyZWFf c3RydWN0ICp2bWEsIHVuc2lnbmVkIGxvbmcgdm1hZGRyKQo+ICAgCQlyZXR1cm47Cj4gICAJfQo+ ICAgCW1tID0gKHZtYWRkciA8IFRBU0tfU0laRSk/IHZtYS0+dm1fbW06ICZpbml0X21tOwo+IC0J cG1kID0gcG1kX29mZnNldChwdWRfb2Zmc2V0KHBnZF9vZmZzZXQobW0sIHZtYWRkciksIHZtYWRk ciksIHZtYWRkcik7Cj4gKwlwbWQgPSBwbWRfb2Zmc2V0KHB1ZF9vZmZzZXQocDRkX29mZnNldChw Z2Rfb2Zmc2V0KG1tLCB2bWFkZHIpLCB2bWFkZHIpLCB2bWFkZHIpLCB2bWFkZHIpOwo+ICAgCWlm ICghcG1kX25vbmUoKnBtZCkpCj4gICAJCWZsdXNoX2hhc2hfcGFnZXMobW0tPmNvbnRleHQuaWQs IHZtYWRkciwgcG1kX3ZhbCgqcG1kKSwgMSk7Cj4gICB9Cj4gZGlmZiAtLWdpdCBhL2FyY2gvcG93 ZXJwYy9tbS9ib29rM3M2NC9oYXNoX3BndGFibGUuYyBiL2FyY2gvcG93ZXJwYy9tbS9ib29rM3M2 NC9oYXNoX3BndGFibGUuYwo+IGluZGV4IDY0NzMzYjljYjIwYS4uOWNkMTU5MzdlODhhIDEwMDY0 NAo+IC0tLSBhL2FyY2gvcG93ZXJwYy9tbS9ib29rM3M2NC9oYXNoX3BndGFibGUuYwo+ICsrKyBi L2FyY2gvcG93ZXJwYy9tbS9ib29rM3M2NC9oYXNoX3BndGFibGUuYwo+IEBAIC0xNDgsNiArMTQ4 LDcgQEAgdm9pZCBoYXNoX192bWVtbWFwX3JlbW92ZV9tYXBwaW5nKHVuc2lnbmVkIGxvbmcgc3Rh cnQsCj4gICBpbnQgaGFzaF9fbWFwX2tlcm5lbF9wYWdlKHVuc2lnbmVkIGxvbmcgZWEsIHVuc2ln bmVkIGxvbmcgcGEsIHBncHJvdF90IHByb3QpCj4gICB7Cj4gICAJcGdkX3QgKnBnZHA7Cj4gKwlw NGRfdCAqcDRkcDsKPiAgIAlwdWRfdCAqcHVkcDsKPiAgIAlwbWRfdCAqcG1kcDsKPiAgIAlwdGVf dCAqcHRlcDsKPiBAQCAtMTU1LDcgKzE1Niw4IEBAIGludCBoYXNoX19tYXBfa2VybmVsX3BhZ2Uo dW5zaWduZWQgbG9uZyBlYSwgdW5zaWduZWQgbG9uZyBwYSwgcGdwcm90X3QgcHJvdCkKPiAgIAlC VUlMRF9CVUdfT04oVEFTS19TSVpFX1VTRVI2NCA+IEhfUEdUQUJMRV9SQU5HRSk7Cj4gICAJaWYg KHNsYWJfaXNfYXZhaWxhYmxlKCkpIHsKPiAgIAkJcGdkcCA9IHBnZF9vZmZzZXRfayhlYSk7Cj4g LQkJcHVkcCA9IHB1ZF9hbGxvYygmaW5pdF9tbSwgcGdkcCwgZWEpOwo+ICsJCXA0ZHAgPSBwNGRf b2Zmc2V0KHBnZHAsIGVhKTsKPiArCQlwdWRwID0gcHVkX2FsbG9jKCZpbml0X21tLCBwNGRwLCBl YSk7CgpDb3VsZCBiZSBhIHNpbmdsZSBsaW5lLCB3aXRob3V0IGEgbmV3IHZhci4KCi0JCXB1ZHAg PSBwdWRfYWxsb2MoJmluaXRfbW0sIHBnZHAsIGVhKTsKKwkJcHVkcCA9IHB1ZF9hbGxvYygmaW5p dF9tbSwgcDRkX29mZnNldChwZ2RwLCBlYSksIGVhKTsKCgpTYW1lIGtpbmQgb2YgY29tbWVudHMg YXMgYWxyZWFkeSBkb25lIGFwcGx5IHRvIHRoZSByZXN0LgoKQ2hyaXN0b3BoZQpfX19fX19fX19f X19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fXwprdm1hcm0gbWFpbGluZyBsaXN0 Cmt2bWFybUBsaXN0cy5jcy5jb2x1bWJpYS5lZHUKaHR0cHM6Ly9saXN0cy5jcy5jb2x1bWJpYS5l ZHUvbWFpbG1hbi9saXN0aW5mby9rdm1hcm0K From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-8.3 required=3.0 tests=DKIMWL_WL_HIGH,DKIM_SIGNED, DKIM_VALID,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_PATCH,MAILING_LIST_MULTI, SIGNED_OFF_BY,SPF_HELO_NONE,SPF_PASS,USER_AGENT_SANE_1 autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id CF344C2BA83 for ; Sun, 16 Feb 2020 10:41:33 +0000 (UTC) Received: from bombadil.infradead.org (bombadil.infradead.org [198.137.202.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id 9D61B20718 for ; Sun, 16 Feb 2020 10:41:33 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=pass (2048-bit key) header.d=lists.infradead.org header.i=@lists.infradead.org header.b="eWb9QqDd"; dkim=fail reason="signature verification failed" (1024-bit key) header.d=c-s.fr header.i=@c-s.fr header.b="v+gbcEDk" DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 9D61B20718 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=c-s.fr Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=linux-arm-kernel-bounces+infradead-linux-arm-kernel=archiver.kernel.org@lists.infradead.org DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=lists.infradead.org; s=bombadil.20170209; h=Sender:Content-Type: Content-Transfer-Encoding:Cc:List-Subscribe:List-Help:List-Post:List-Archive: List-Unsubscribe:List-Id:In-Reply-To:MIME-Version:Date:Message-ID:From: References:To:Subject:Reply-To:Content-ID:Content-Description:Resent-Date: Resent-From:Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:List-Owner; bh=OCO9gj9aakToPqSdsakFYIxt0ACVmhvtPMFEETjPnVg=; b=eWb9QqDdQaep/wywHpl2gsNPx xn31wLYOLLyhp53kJsuqoh9hLpsZCHNo9UeR9YShlV+dsNl4HawoGcPf0/kTpZJZQlkcw4tiepkxi jm4dd9gWozZodiwFzklRzyaEBYBoDNSBWOMef07yKuhg2+YYb48kt3BrmIvjxhtOlPmV6IbaE018N VKzF+TOF0gU8mDdh7CtVP/lt4SLXBtyXKRgnuQF7TWyEvCRcgIzimDikB4NKatE3I3dfAlF0BV17X C2yhaxreIdsMu6Y50ts+saIavNhsDw8vKpK4uqcWJX/VrDuAT7eQ7eQif/uhdHNSIRGmRAESRlEvs Z74A/4iYQ==; Received: from localhost ([127.0.0.1] helo=bombadil.infradead.org) by bombadil.infradead.org with esmtp (Exim 4.92.3 #3 (Red Hat Linux)) id 1j3HMY-0000jd-2c; Sun, 16 Feb 2020 10:41:22 +0000 Received: from pegase1.c-s.fr ([93.17.236.30]) by bombadil.infradead.org with esmtps (Exim 4.92.3 #3 (Red Hat Linux)) id 1j3HMT-0000h8-JG for linux-arm-kernel@lists.infradead.org; Sun, 16 Feb 2020 10:41:20 +0000 Received: from localhost (mailhub1-int [192.168.12.234]) by localhost (Postfix) with ESMTP id 48L3Yv0sRkz9tyM7; Sun, 16 Feb 2020 11:41:07 +0100 (CET) Authentication-Results: localhost; dkim=pass reason="1024-bit key; insecure key" header.d=c-s.fr header.i=@c-s.fr header.b=v+gbcEDk; dkim-adsp=pass; dkim-atps=neutral X-Virus-Scanned: Debian amavisd-new at c-s.fr Received: from pegase1.c-s.fr ([192.168.12.234]) by localhost (pegase1.c-s.fr [192.168.12.234]) (amavisd-new, port 10024) with ESMTP id WyvC_D0c5SGS; Sun, 16 Feb 2020 11:41:07 +0100 (CET) Received: from messagerie.si.c-s.fr (messagerie.si.c-s.fr [192.168.25.192]) by pegase1.c-s.fr (Postfix) with ESMTP id 48L3Yt6byhz9tyM6; Sun, 16 Feb 2020 11:41:06 +0100 (CET) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=c-s.fr; s=mail; t=1581849666; bh=uYX8/YhQnPSB2yIUz2Iiabdy4hBMITqWSMKEeNzcsUE=; h=Subject:To:Cc:References:From:Date:In-Reply-To:From; b=v+gbcEDki2kN7vHEJxzn4fWBIL7Q/3I+0FBhUByo9drALwmhlhMysLr6CcMG4Tb/X Z4bTovlfUg5KRdTCIWxMkP3mPM9tSoSf43EJfHHltAPtJWCtCrqKOA8Gx1u5xXKGgL NDEaCjEHzsYa4iDa+yCT8tNN28WzZnaXEMbkJvBY= Received: from localhost (localhost [127.0.0.1]) by messagerie.si.c-s.fr (Postfix) with ESMTP id D90528B784; Sun, 16 Feb 2020 11:41:09 +0100 (CET) X-Virus-Scanned: amavisd-new at c-s.fr Received: from messagerie.si.c-s.fr ([127.0.0.1]) by localhost (messagerie.si.c-s.fr [127.0.0.1]) (amavisd-new, port 10023) with ESMTP id VGVzW0Ckh2uU; Sun, 16 Feb 2020 11:41:09 +0100 (CET) Received: from [192.168.4.90] (unknown [192.168.4.90]) by messagerie.si.c-s.fr (Postfix) with ESMTP id B4E908B755; Sun, 16 Feb 2020 11:41:07 +0100 (CET) Subject: Re: [PATCH v2 07/13] powerpc: add support for folded p4d page tables To: Mike Rapoport , linux-kernel@vger.kernel.org References: <20200216081843.28670-1-rppt@kernel.org> <20200216081843.28670-8-rppt@kernel.org> From: Christophe Leroy Message-ID: Date: Sun, 16 Feb 2020 11:41:07 +0100 User-Agent: Mozilla/5.0 (Windows NT 6.1; WOW64; rv:68.0) Gecko/20100101 Thunderbird/68.5.0 MIME-Version: 1.0 In-Reply-To: <20200216081843.28670-8-rppt@kernel.org> Content-Language: fr X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 X-CRM114-CacheID: sfid-20200216_024117_930280_85A3E3F2 X-CRM114-Status: GOOD ( 26.30 ) X-BeenThere: linux-arm-kernel@lists.infradead.org X-Mailman-Version: 2.1.29 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Cc: Rich Felker , linux-ia64@vger.kernel.org, Geert Uytterhoeven , linux-sh@vger.kernel.org, Benjamin Herrenschmidt , linux-mm@kvack.org, Paul Mackerras , linux-hexagon@vger.kernel.org, Will Deacon , kvmarm@lists.cs.columbia.edu, Jonas Bonn , linux-arch@vger.kernel.org, Brian Cain , Marc Zyngier , Russell King , Ley Foon Tan , Mike Rapoport , Catalin Marinas , Julien Thierry , uclinux-h8-devel@lists.sourceforge.jp, Fenghua Yu , Arnd Bergmann , Suzuki K Poulose , kvm-ppc@vger.kernel.org, Stefan Kristiansson , openrisc@lists.librecores.org, Stafford Horne , Guan Xuetao , linux-arm-kernel@lists.infradead.org, Tony Luck , Yoshinori Sato , James Morse , Michael Ellerman , nios2-dev@lists.rocketboards.org, Andrew Morton , linuxppc-dev@lists.ozlabs.org Content-Transfer-Encoding: base64 Content-Type: text/plain; charset="utf-8"; Format="flowed" Sender: "linux-arm-kernel" Errors-To: linux-arm-kernel-bounces+infradead-linux-arm-kernel=archiver.kernel.org@lists.infradead.org CgpMZSAxNi8wMi8yMDIwIMOgIDA5OjE4LCBNaWtlIFJhcG9wb3J0IGEgw6ljcml0wqA6Cj4gRnJv bTogTWlrZSBSYXBvcG9ydCA8cnBwdEBsaW51eC5pYm0uY29tPgo+IAo+IEltcGxlbWVudCBwcmlt aXRpdmVzIG5lY2Vzc2FyeSBmb3IgdGhlIDR0aCBsZXZlbCBmb2xkaW5nLCBhZGQgd2Fsa3Mgb2Yg cDRkCj4gbGV2ZWwgd2hlcmUgYXBwcm9wcmlhdGUgYW5kIHJlcGxhY2UgNWxldmVsLWZpeHVwLmgg d2l0aCBwZ3RhYmxlLW5vcDRkLmguCgpJIGRvbid0IHRoaW5rIGl0IGlzIHdvcnRoIGFkZGluZyBh bGwgdGhpcyBhZGRpdGlvbm5hbHMgd2Fsa3Mgb2YgcDRkLCAKdGhpcyBwYXRjaCBjb3VsZCBiZSBs aW1pdGVkIHRvIGNoYW5nZXMgbGlrZToKCi0JCXB1ZCA9IHB1ZF9vZmZzZXQocGdkLCBncGEpOwor CQlwdWQgPSBwdWRfb2Zmc2V0KHA0ZF9vZmZzZXQocGdkLCBncGEpLCBncGEpOwoKVGhlIGFkZGl0 aW9ubmFsIHdhbGtzIHNob3VsZCBiZSBhZGRlZCB0aHJvdWdoIGFub3RoZXIgcGF0Y2ggdGhlIGRh eSAKcG93ZXJwYyBuZWVkIHRoZW0uCgpTZWUgYmVsb3cgZm9yIG1vcmUgY29tbWVudHMuCgo+IAo+ IFNpZ25lZC1vZmYtYnk6IE1pa2UgUmFwb3BvcnQgPHJwcHRAbGludXguaWJtLmNvbT4KPiBUZXN0 ZWQtYnk6IENocmlzdG9waGUgTGVyb3kgPGNocmlzdG9waGUubGVyb3lAYy1zLmZyPiAjIDh4eCBh bmQgODN4eAo+IC0tLQo+ICAgYXJjaC9wb3dlcnBjL2luY2x1ZGUvYXNtL2Jvb2szcy8zMi9wZ3Rh YmxlLmggIHwgIDEgLQo+ICAgYXJjaC9wb3dlcnBjL2luY2x1ZGUvYXNtL2Jvb2szcy82NC9oYXNo LmggICAgIHwgIDQgKy0KPiAgIGFyY2gvcG93ZXJwYy9pbmNsdWRlL2FzbS9ib29rM3MvNjQvcGdh bGxvYy5oICB8ICA0ICstCj4gICBhcmNoL3Bvd2VycGMvaW5jbHVkZS9hc20vYm9vazNzLzY0L3Bn dGFibGUuaCAgfCA1OCArKysrKysrKysrLS0tLS0tLS0KPiAgIGFyY2gvcG93ZXJwYy9pbmNsdWRl L2FzbS9ib29rM3MvNjQvcmFkaXguaCAgICB8ICA2ICstCj4gICBhcmNoL3Bvd2VycGMvaW5jbHVk ZS9hc20vbm9oYXNoLzMyL3BndGFibGUuaCAgfCAgMSAtCj4gICBhcmNoL3Bvd2VycGMvaW5jbHVk ZS9hc20vbm9oYXNoLzY0L3BnYWxsb2MuaCAgfCAgMiArLQo+ICAgLi4uL2luY2x1ZGUvYXNtL25v aGFzaC82NC9wZ3RhYmxlLTRrLmggICAgICAgIHwgMzIgKysrKystLS0tLQo+ICAgYXJjaC9wb3dl cnBjL2luY2x1ZGUvYXNtL25vaGFzaC82NC9wZ3RhYmxlLmggIHwgIDYgKy0KPiAgIGFyY2gvcG93 ZXJwYy9pbmNsdWRlL2FzbS9wZ3RhYmxlLmggICAgICAgICAgICB8ICA4ICsrKwo+ICAgYXJjaC9w b3dlcnBjL2t2bS9ib29rM3NfNjRfbW11X3JhZGl4LmMgICAgICAgIHwgNTkgKysrKysrKysrKysr KysrKy0tLQo+ICAgYXJjaC9wb3dlcnBjL2xpYi9jb2RlLXBhdGNoaW5nLmMgICAgICAgICAgICAg IHwgIDcgKystCj4gICBhcmNoL3Bvd2VycGMvbW0vYm9vazNzMzIvbW11LmMgICAgICAgICAgICAg ICAgfCAgMiArLQo+ICAgYXJjaC9wb3dlcnBjL21tL2Jvb2szczMyL3RsYi5jICAgICAgICAgICAg ICAgIHwgIDQgKy0KPiAgIGFyY2gvcG93ZXJwYy9tbS9ib29rM3M2NC9oYXNoX3BndGFibGUuYyAg ICAgICB8ICA0ICstCj4gICBhcmNoL3Bvd2VycGMvbW0vYm9vazNzNjQvcmFkaXhfcGd0YWJsZS5j ICAgICAgfCAxOSArKysrLS0KPiAgIGFyY2gvcG93ZXJwYy9tbS9ib29rM3M2NC9zdWJwYWdlX3By b3QuYyAgICAgICB8ICA2ICstCj4gICBhcmNoL3Bvd2VycGMvbW0vaHVnZXRsYnBhZ2UuYyAgICAg ICAgICAgICAgICAgfCAyOCArKysrKy0tLS0KPiAgIGFyY2gvcG93ZXJwYy9tbS9rYXNhbi9rYXNh bl9pbml0XzMyLmMgICAgICAgICB8ICA4ICstLQo+ICAgYXJjaC9wb3dlcnBjL21tL21lbS5jICAg ICAgICAgICAgICAgICAgICAgICAgIHwgIDQgKy0KPiAgIGFyY2gvcG93ZXJwYy9tbS9ub2hhc2gv NDB4LmMgICAgICAgICAgICAgICAgICB8ICA0ICstCj4gICBhcmNoL3Bvd2VycGMvbW0vbm9oYXNo L2Jvb2szZV9wZ3RhYmxlLmMgICAgICAgfCAxNSArKystLQo+ICAgYXJjaC9wb3dlcnBjL21tL3Bn dGFibGUuYyAgICAgICAgICAgICAgICAgICAgIHwgMjUgKysrKysrKy0KPiAgIGFyY2gvcG93ZXJw Yy9tbS9wZ3RhYmxlXzMyLmMgICAgICAgICAgICAgICAgICB8IDI4ICsrKysrLS0tLQo+ICAgYXJj aC9wb3dlcnBjL21tL3BndGFibGVfNjQuYyAgICAgICAgICAgICAgICAgIHwgMTAgKystLQo+ICAg YXJjaC9wb3dlcnBjL21tL3B0ZHVtcC9oYXNocGFnZXRhYmxlLmMgICAgICAgIHwgMjAgKysrKysr LQo+ICAgYXJjaC9wb3dlcnBjL21tL3B0ZHVtcC9wdGR1bXAuYyAgICAgICAgICAgICAgIHwgMjIg KysrKysrLQo+ICAgYXJjaC9wb3dlcnBjL3htb24veG1vbi5jICAgICAgICAgICAgICAgICAgICAg IHwgMTcgKysrKystCj4gICAyOCBmaWxlcyBjaGFuZ2VkLCAyODQgaW5zZXJ0aW9ucygrKSwgMTIw IGRlbGV0aW9ucygtKQo+IAo+IGRpZmYgLS1naXQgYS9hcmNoL3Bvd2VycGMvaW5jbHVkZS9hc20v Ym9vazNzLzMyL3BndGFibGUuaCBiL2FyY2gvcG93ZXJwYy9pbmNsdWRlL2FzbS9ib29rM3MvMzIv cGd0YWJsZS5oCj4gaW5kZXggNWIzOWMxMWU4ODRhLi4zOWVjMTEzNzFiZTAgMTAwNjQ0Cj4gLS0t IGEvYXJjaC9wb3dlcnBjL2luY2x1ZGUvYXNtL2Jvb2szcy8zMi9wZ3RhYmxlLmgKPiArKysgYi9h cmNoL3Bvd2VycGMvaW5jbHVkZS9hc20vYm9vazNzLzMyL3BndGFibGUuaAo+IEBAIC0yLDcgKzIs NiBAQAo+ICAgI2lmbmRlZiBfQVNNX1BPV0VSUENfQk9PSzNTXzMyX1BHVEFCTEVfSAo+ICAgI2Rl ZmluZSBfQVNNX1BPV0VSUENfQk9PSzNTXzMyX1BHVEFCTEVfSAo+ICAgCj4gLSNkZWZpbmUgX19B UkNIX1VTRV81TEVWRUxfSEFDSwo+ICAgI2luY2x1ZGUgPGFzbS1nZW5lcmljL3BndGFibGUtbm9w bWQuaD4KPiAgIAo+ICAgI2luY2x1ZGUgPGFzbS9ib29rM3MvMzIvaGFzaC5oPgo+IGRpZmYgLS1n aXQgYS9hcmNoL3Bvd2VycGMvaW5jbHVkZS9hc20vYm9vazNzLzY0L2hhc2guaCBiL2FyY2gvcG93 ZXJwYy9pbmNsdWRlL2FzbS9ib29rM3MvNjQvaGFzaC5oCj4gaW5kZXggMjc4MWViZjZhZGQ0Li44 NzZkMTUyOGMyY2YgMTAwNjQ0Cj4gLS0tIGEvYXJjaC9wb3dlcnBjL2luY2x1ZGUvYXNtL2Jvb2sz cy82NC9oYXNoLmgKPiArKysgYi9hcmNoL3Bvd2VycGMvaW5jbHVkZS9hc20vYm9vazNzLzY0L2hh c2guaAo+IEBAIC0xMzQsOSArMTM0LDkgQEAgc3RhdGljIGlubGluZSBpbnQgZ2V0X3JlZ2lvbl9p ZCh1bnNpZ25lZCBsb25nIGVhKQo+ICAgCj4gICAjZGVmaW5lCWhhc2hfX3BtZF9iYWQocG1kKQkJ KHBtZF92YWwocG1kKSAmIEhfUE1EX0JBRF9CSVRTKQo+ICAgI2RlZmluZQloYXNoX19wdWRfYmFk KHB1ZCkJCShwdWRfdmFsKHB1ZCkgJiBIX1BVRF9CQURfQklUUykKPiAtc3RhdGljIGlubGluZSBp bnQgaGFzaF9fcGdkX2JhZChwZ2RfdCBwZ2QpCj4gK3N0YXRpYyBpbmxpbmUgaW50IGhhc2hfX3A0 ZF9iYWQocDRkX3QgcDRkKQo+ICAgewo+IC0JcmV0dXJuIChwZ2RfdmFsKHBnZCkgPT0gMCk7Cj4g KwlyZXR1cm4gKHA0ZF92YWwocDRkKSA9PSAwKTsKPiAgIH0KPiAgICNpZmRlZiBDT05GSUdfU1RS SUNUX0tFUk5FTF9SV1gKPiAgIGV4dGVybiB2b2lkIGhhc2hfX21hcmtfcm9kYXRhX3JvKHZvaWQp Owo+IGRpZmYgLS1naXQgYS9hcmNoL3Bvd2VycGMvaW5jbHVkZS9hc20vYm9vazNzLzY0L3BnYWxs b2MuaCBiL2FyY2gvcG93ZXJwYy9pbmNsdWRlL2FzbS9ib29rM3MvNjQvcGdhbGxvYy5oCj4gaW5k ZXggYTQxZTkxYmQwNTgwLi42OWM1YjA1MTczNGYgMTAwNjQ0Cj4gLS0tIGEvYXJjaC9wb3dlcnBj L2luY2x1ZGUvYXNtL2Jvb2szcy82NC9wZ2FsbG9jLmgKPiArKysgYi9hcmNoL3Bvd2VycGMvaW5j bHVkZS9hc20vYm9vazNzLzY0L3BnYWxsb2MuaAo+IEBAIC04NSw5ICs4NSw5IEBAIHN0YXRpYyBp bmxpbmUgdm9pZCBwZ2RfZnJlZShzdHJ1Y3QgbW1fc3RydWN0ICptbSwgcGdkX3QgKnBnZCkKPiAg IAlrbWVtX2NhY2hlX2ZyZWUoUEdUX0NBQ0hFKFBHRF9JTkRFWF9TSVpFKSwgcGdkKTsKPiAgIH0K PiAgIAo+IC1zdGF0aWMgaW5saW5lIHZvaWQgcGdkX3BvcHVsYXRlKHN0cnVjdCBtbV9zdHJ1Y3Qg Km1tLCBwZ2RfdCAqcGdkLCBwdWRfdCAqcHVkKQo+ICtzdGF0aWMgaW5saW5lIHZvaWQgcDRkX3Bv cHVsYXRlKHN0cnVjdCBtbV9zdHJ1Y3QgKm1tLCBwNGRfdCAqcGdkLCBwdWRfdCAqcHVkKQo+ICAg ewo+IC0JKnBnZCA9ICBfX3BnZChfX3BndGFibGVfcHRyX3ZhbChwdWQpIHwgUEdEX1ZBTF9CSVRT KTsKPiArCSpwZ2QgPSAgX19wNGQoX19wZ3RhYmxlX3B0cl92YWwocHVkKSB8IFBHRF9WQUxfQklU Uyk7Cj4gICB9Cj4gICAKPiAgIHN0YXRpYyBpbmxpbmUgcHVkX3QgKnB1ZF9hbGxvY19vbmUoc3Ry dWN0IG1tX3N0cnVjdCAqbW0sIHVuc2lnbmVkIGxvbmcgYWRkcikKPiBkaWZmIC0tZ2l0IGEvYXJj aC9wb3dlcnBjL2luY2x1ZGUvYXNtL2Jvb2szcy82NC9wZ3RhYmxlLmggYi9hcmNoL3Bvd2VycGMv aW5jbHVkZS9hc20vYm9vazNzLzY0L3BndGFibGUuaAo+IGluZGV4IDIwMWE2OWU2YTM1NS4uZGRk ZGJhZmZmMGFiIDEwMDY0NAo+IC0tLSBhL2FyY2gvcG93ZXJwYy9pbmNsdWRlL2FzbS9ib29rM3Mv NjQvcGd0YWJsZS5oCj4gKysrIGIvYXJjaC9wb3dlcnBjL2luY2x1ZGUvYXNtL2Jvb2szcy82NC9w Z3RhYmxlLmgKPiBAQCAtMiw3ICsyLDcgQEAKPiAgICNpZm5kZWYgX0FTTV9QT1dFUlBDX0JPT0sz U182NF9QR1RBQkxFX0hfCj4gICAjZGVmaW5lIF9BU01fUE9XRVJQQ19CT09LM1NfNjRfUEdUQUJM RV9IXwo+ICAgCj4gLSNpbmNsdWRlIDxhc20tZ2VuZXJpYy81bGV2ZWwtZml4dXAuaD4KPiArI2lu Y2x1ZGUgPGFzbS1nZW5lcmljL3BndGFibGUtbm9wNGQuaD4KPiAgIAo+ICAgI2lmbmRlZiBfX0FT U0VNQkxZX18KPiAgICNpbmNsdWRlIDxsaW51eC9tbWRlYnVnLmg+Cj4gQEAgLTI1MSw3ICsyNTEs NyBAQCBleHRlcm4gdW5zaWduZWQgbG9uZyBfX3BtZF9mcmFnX3NpemVfc2hpZnQ7Cj4gICAvKiBC aXRzIHRvIG1hc2sgb3V0IGZyb20gYSBQVUQgdG8gZ2V0IHRvIHRoZSBQTUQgcGFnZSAqLwo+ICAg I2RlZmluZSBQVURfTUFTS0VEX0JJVFMJCTB4YzAwMDAwMDAwMDAwMDBmZlVMCj4gICAvKiBCaXRz IHRvIG1hc2sgb3V0IGZyb20gYSBQR0QgdG8gZ2V0IHRvIHRoZSBQVUQgcGFnZSAqLwo+IC0jZGVm aW5lIFBHRF9NQVNLRURfQklUUwkJMHhjMDAwMDAwMDAwMDAwMGZmVUwKPiArI2RlZmluZSBQNERf TUFTS0VEX0JJVFMJCTB4YzAwMDAwMDAwMDAwMDBmZlVMCj4gICAKPiAgIC8qCj4gICAgKiBVc2Vk IGFzIGFuIGluZGljYXRvciBmb3IgcmN1IGNhbGxiYWNrIGZ1bmN0aW9ucwo+IEBAIC05NDksNTQg Kzk0OSw2MCBAQCBzdGF0aWMgaW5saW5lIGJvb2wgcHVkX2FjY2Vzc19wZXJtaXR0ZWQocHVkX3Qg cHVkLCBib29sIHdyaXRlKQo+ICAgCXJldHVybiBwdGVfYWNjZXNzX3Blcm1pdHRlZChwdWRfcHRl KHB1ZCksIHdyaXRlKTsKPiAgIH0KPiAgIAo+IC0jZGVmaW5lIHBnZF93cml0ZShwZ2QpCQlwdGVf d3JpdGUocGdkX3B0ZShwZ2QpKQo+ICsjZGVmaW5lIF9fcDRkX3Jhdyh4KQkoKHA0ZF90KSB7IF9f cGdkX3Jhdyh4KSB9KQo+ICtzdGF0aWMgaW5saW5lIF9fYmU2NCBwNGRfcmF3KHA0ZF90IHgpCj4g K3sKPiArCXJldHVybiBwZ2RfcmF3KHgucGdkKTsKPiArfQo+ICsKClNob3VsZG4ndCB0aGlzIGJl IGRlZmluZWQgaW4gYXNtL3BndGFibGUtYmUtdHlwZXMuaCwganVzdCBsaWtlIG90aGVyIApfX3B4 eF9yYXcoKSA/Cgo+ICsjZGVmaW5lIHA0ZF93cml0ZShwNGQpCQlwdGVfd3JpdGUocDRkX3B0ZShw NGQpKQo+ICAgCj4gLXN0YXRpYyBpbmxpbmUgdm9pZCBwZ2RfY2xlYXIocGdkX3QgKnBnZHApCj4g K3N0YXRpYyBpbmxpbmUgdm9pZCBwNGRfY2xlYXIocDRkX3QgKnA0ZHApCj4gICB7Cj4gLQkqcGdk cCA9IF9fcGdkKDApOwo+ICsJKnA0ZHAgPSBfX3A0ZCgwKTsKPiAgIH0KPiAgIAo+IC1zdGF0aWMg aW5saW5lIGludCBwZ2Rfbm9uZShwZ2RfdCBwZ2QpCj4gK3N0YXRpYyBpbmxpbmUgaW50IHA0ZF9u b25lKHA0ZF90IHA0ZCkKPiAgIHsKPiAtCXJldHVybiAhcGdkX3JhdyhwZ2QpOwo+ICsJcmV0dXJu ICFwNGRfcmF3KHA0ZCk7Cj4gICB9Cj4gICAKPiAtc3RhdGljIGlubGluZSBpbnQgcGdkX3ByZXNl bnQocGdkX3QgcGdkKQo+ICtzdGF0aWMgaW5saW5lIGludCBwNGRfcHJlc2VudChwNGRfdCBwNGQp Cj4gICB7Cj4gLQlyZXR1cm4gISEocGdkX3JhdyhwZ2QpICYgY3B1X3RvX2JlNjQoX1BBR0VfUFJF U0VOVCkpOwo+ICsJcmV0dXJuICEhKHA0ZF9yYXcocDRkKSAmIGNwdV90b19iZTY0KF9QQUdFX1BS RVNFTlQpKTsKPiAgIH0KPiAgIAo+IC1zdGF0aWMgaW5saW5lIHB0ZV90IHBnZF9wdGUocGdkX3Qg cGdkKQo+ICtzdGF0aWMgaW5saW5lIHB0ZV90IHA0ZF9wdGUocDRkX3QgcDRkKQo+ICAgewo+IC0J cmV0dXJuIF9fcHRlX3JhdyhwZ2RfcmF3KHBnZCkpOwo+ICsJcmV0dXJuIF9fcHRlX3JhdyhwNGRf cmF3KHA0ZCkpOwo+ICAgfQo+ICAgCj4gLXN0YXRpYyBpbmxpbmUgcGdkX3QgcHRlX3BnZChwdGVf dCBwdGUpCj4gK3N0YXRpYyBpbmxpbmUgcDRkX3QgcHRlX3A0ZChwdGVfdCBwdGUpCj4gICB7Cj4g LQlyZXR1cm4gX19wZ2RfcmF3KHB0ZV9yYXcocHRlKSk7Cj4gKwlyZXR1cm4gX19wNGRfcmF3KHB0 ZV9yYXcocHRlKSk7Cj4gICB9Cj4gICAKPiAtc3RhdGljIGlubGluZSBpbnQgcGdkX2JhZChwZ2Rf dCBwZ2QpCj4gK3N0YXRpYyBpbmxpbmUgaW50IHA0ZF9iYWQocDRkX3QgcDRkKQo+ICAgewo+ICAg CWlmIChyYWRpeF9lbmFibGVkKCkpCj4gLQkJcmV0dXJuIHJhZGl4X19wZ2RfYmFkKHBnZCk7Cj4g LQlyZXR1cm4gaGFzaF9fcGdkX2JhZChwZ2QpOwo+ICsJCXJldHVybiByYWRpeF9fcDRkX2JhZChw NGQpOwo+ICsJcmV0dXJuIGhhc2hfX3A0ZF9iYWQocDRkKTsKPiAgIH0KPiAgIAo+IC0jZGVmaW5l IHBnZF9hY2Nlc3NfcGVybWl0dGVkIHBnZF9hY2Nlc3NfcGVybWl0dGVkCj4gLXN0YXRpYyBpbmxp bmUgYm9vbCBwZ2RfYWNjZXNzX3Blcm1pdHRlZChwZ2RfdCBwZ2QsIGJvb2wgd3JpdGUpCj4gKyNk ZWZpbmUgcDRkX2FjY2Vzc19wZXJtaXR0ZWQgcDRkX2FjY2Vzc19wZXJtaXR0ZWQKPiArc3RhdGlj IGlubGluZSBib29sIHA0ZF9hY2Nlc3NfcGVybWl0dGVkKHA0ZF90IHA0ZCwgYm9vbCB3cml0ZSkK PiAgIHsKPiAtCXJldHVybiBwdGVfYWNjZXNzX3Blcm1pdHRlZChwZ2RfcHRlKHBnZCksIHdyaXRl KTsKPiArCXJldHVybiBwdGVfYWNjZXNzX3Blcm1pdHRlZChwNGRfcHRlKHA0ZCksIHdyaXRlKTsK PiAgIH0KPiAgIAo+IC1leHRlcm4gc3RydWN0IHBhZ2UgKnBnZF9wYWdlKHBnZF90IHBnZCk7Cj4g K2V4dGVybiBzdHJ1Y3QgcGFnZSAqcDRkX3BhZ2UocDRkX3QgcDRkKTsKPiAgIAo+ICAgLyogUG9p bnRlcnMgaW4gdGhlIHBhZ2UgdGFibGUgdHJlZSBhcmUgcGh5c2ljYWwgYWRkcmVzc2VzICovCj4g ICAjZGVmaW5lIF9fcGd0YWJsZV9wdHJfdmFsKHB0cikJX19wYShwdHIpCj4gICAKPiAgICNkZWZp bmUgcG1kX3BhZ2VfdmFkZHIocG1kKQlfX3ZhKHBtZF92YWwocG1kKSAmIH5QTURfTUFTS0VEX0JJ VFMpCj4gICAjZGVmaW5lIHB1ZF9wYWdlX3ZhZGRyKHB1ZCkJX192YShwdWRfdmFsKHB1ZCkgJiB+ UFVEX01BU0tFRF9CSVRTKQo+IC0jZGVmaW5lIHBnZF9wYWdlX3ZhZGRyKHBnZCkJX192YShwZ2Rf dmFsKHBnZCkgJiB+UEdEX01BU0tFRF9CSVRTKQo+ICsjZGVmaW5lIHA0ZF9wYWdlX3ZhZGRyKHA0 ZCkJX192YShwNGRfdmFsKHA0ZCkgJiB+UDREX01BU0tFRF9CSVRTKQo+ICAgCj4gICAjZGVmaW5l IHBnZF9pbmRleChhZGRyZXNzKSAoKChhZGRyZXNzKSA+PiAoUEdESVJfU0hJRlQpKSAmIChQVFJT X1BFUl9QR0QgLSAxKSkKPiAgICNkZWZpbmUgcHVkX2luZGV4KGFkZHJlc3MpICgoKGFkZHJlc3Mp ID4+IChQVURfU0hJRlQpKSAmIChQVFJTX1BFUl9QVUQgLSAxKSkKPiBAQCAtMTAxMCw4ICsxMDE2 LDggQEAgZXh0ZXJuIHN0cnVjdCBwYWdlICpwZ2RfcGFnZShwZ2RfdCBwZ2QpOwo+ICAgCj4gICAj ZGVmaW5lIHBnZF9vZmZzZXQobW0sIGFkZHJlc3MpCSAoKG1tKS0+cGdkICsgcGdkX2luZGV4KGFk ZHJlc3MpKQo+ICAgCj4gLSNkZWZpbmUgcHVkX29mZnNldChwZ2RwLCBhZGRyKQlcCj4gLQkoKChw dWRfdCAqKSBwZ2RfcGFnZV92YWRkcigqKHBnZHApKSkgKyBwdWRfaW5kZXgoYWRkcikpCj4gKyNk ZWZpbmUgcHVkX29mZnNldChwNGRwLCBhZGRyKQlcCj4gKwkoKChwdWRfdCAqKSBwNGRfcGFnZV92 YWRkcigqKHA0ZHApKSkgKyBwdWRfaW5kZXgoYWRkcikpCj4gICAjZGVmaW5lIHBtZF9vZmZzZXQo cHVkcCxhZGRyKSBcCj4gICAJKCgocG1kX3QgKikgcHVkX3BhZ2VfdmFkZHIoKihwdWRwKSkpICsg cG1kX2luZGV4KGFkZHIpKQo+ICAgI2RlZmluZSBwdGVfb2Zmc2V0X2tlcm5lbChkaXIsYWRkcikg XAo+IEBAIC0xMzY4LDYgKzEzNzQsMTIgQEAgc3RhdGljIGlubGluZSBib29sIHB1ZF9pc19sZWFm KHB1ZF90IHB1ZCkKPiAgIAlyZXR1cm4gISEocHVkX3JhdyhwdWQpICYgY3B1X3RvX2JlNjQoX1BB R0VfUFRFKSk7Cj4gICB9Cj4gICAKPiArI2RlZmluZSBwNGRfaXNfbGVhZiBwNGRfaXNfbGVhZgo+ ICtzdGF0aWMgaW5saW5lIGJvb2wgcDRkX2lzX2xlYWYocDRkX3QgcDRkKQo+ICt7Cj4gKwlyZXR1 cm4gISEocDRkX3JhdyhwNGQpICYgY3B1X3RvX2JlNjQoX1BBR0VfUFRFKSk7Cj4gK30KPiArCj4g ICAjZGVmaW5lIHBnZF9pc19sZWFmIHBnZF9pc19sZWFmCj4gICAjZGVmaW5lIHBnZF9sZWFmIHBn ZF9pc19sZWFmCj4gICBzdGF0aWMgaW5saW5lIGJvb2wgcGdkX2lzX2xlYWYocGdkX3QgcGdkKQoK Wy4uLl0KCj4gZGlmZiAtLWdpdCBhL2FyY2gvcG93ZXJwYy9pbmNsdWRlL2FzbS9wZ3RhYmxlLmgg Yi9hcmNoL3Bvd2VycGMvaW5jbHVkZS9hc20vcGd0YWJsZS5oCj4gaW5kZXggOGNjNTQzZWQxMTRj Li4wYTA1ZmRkZDc4ODEgMTAwNjQ0Cj4gLS0tIGEvYXJjaC9wb3dlcnBjL2luY2x1ZGUvYXNtL3Bn dGFibGUuaAo+ICsrKyBiL2FyY2gvcG93ZXJwYy9pbmNsdWRlL2FzbS9wZ3RhYmxlLmgKPiBAQCAt MTM5LDYgKzEzOSwxNCBAQCBzdGF0aWMgaW5saW5lIGJvb2wgcHVkX2lzX2xlYWYocHVkX3QgcHVk KQo+ICAgfQo+ICAgI2VuZGlmCj4gICAKPiArI2lmbmRlZiBwNGRfaXNfbGVhZgo+ICsjZGVmaW5l IHA0ZF9pc19sZWFmIHA0ZF9pc19sZWFmCj4gK3N0YXRpYyBpbmxpbmUgYm9vbCBwNGRfaXNfbGVh ZihwNGRfdCBwNGQpCj4gK3sKPiArCXJldHVybiBmYWxzZTsKPiArfQo+ICsjZW5kaWYKPiArCj4g ICAjaWZuZGVmIHBnZF9pc19sZWFmCj4gICAjZGVmaW5lIHBnZF9pc19sZWFmIHBnZF9pc19sZWFm Cj4gICBzdGF0aWMgaW5saW5lIGJvb2wgcGdkX2lzX2xlYWYocGdkX3QgcGdkKQo+IGRpZmYgLS1n aXQgYS9hcmNoL3Bvd2VycGMva3ZtL2Jvb2szc182NF9tbXVfcmFkaXguYyBiL2FyY2gvcG93ZXJw Yy9rdm0vYm9vazNzXzY0X21tdV9yYWRpeC5jCj4gaW5kZXggODAzOTQwZDc5YjczLi41YWFjZmEw YjI3ZWYgMTAwNjQ0Cj4gLS0tIGEvYXJjaC9wb3dlcnBjL2t2bS9ib29rM3NfNjRfbW11X3JhZGl4 LmMKPiArKysgYi9hcmNoL3Bvd2VycGMva3ZtL2Jvb2szc182NF9tbXVfcmFkaXguYwo+IEBAIC00 OTQsMTcgKzQ5NCwzOSBAQCBzdGF0aWMgdm9pZCBrdm1wcGNfdW5tYXBfZnJlZV9wdWQoc3RydWN0 IGt2bSAqa3ZtLCBwdWRfdCAqcHVkLAo+ICAgCXB1ZF9mcmVlKGt2bS0+bW0sIHB1ZCk7Cj4gICB9 Cj4gICAKPiArc3RhdGljIHZvaWQga3ZtcHBjX3VubWFwX2ZyZWVfcDRkKHN0cnVjdCBrdm0gKmt2 bSwgcDRkX3QgKnA0ZCwKPiArCQkJCSAgdW5zaWduZWQgaW50IGxwaWQpCj4gK3sKPiArCXVuc2ln bmVkIGxvbmcgaXU7Cj4gKwlwNGRfdCAqcCA9IHA0ZDsKPiArCj4gKwlmb3IgKGl1ID0gMDsgaXUg PCBQVFJTX1BFUl9QNEQ7ICsraXUsICsrcCkgewo+ICsJCWlmICghcDRkX3ByZXNlbnQoKnApKQo+ ICsJCQljb250aW51ZTsKPiArCQlpZiAocDRkX2lzX2xlYWYoKnApKSB7Cj4gKwkJCXA0ZF9jbGVh cihwKTsKPiArCQl9IGVsc2Ugewo+ICsJCQlwdWRfdCAqcHVkOwo+ICsKPiArCQkJcHVkID0gcHVk X29mZnNldChwLCAwKTsKPiArCQkJa3ZtcHBjX3VubWFwX2ZyZWVfcHVkKGt2bSwgcHVkLCBscGlk KTsKPiArCQkJcDRkX2NsZWFyKHApOwo+ICsJCX0KPiArCX0KPiArCXA0ZF9mcmVlKGt2bS0+bW0s IHA0ZCk7Cj4gK30KPiArCj4gICB2b2lkIGt2bXBwY19mcmVlX3BndGFibGVfcmFkaXgoc3RydWN0 IGt2bSAqa3ZtLCBwZ2RfdCAqcGdkLCB1bnNpZ25lZCBpbnQgbHBpZCkKPiAgIHsKPiAgIAl1bnNp Z25lZCBsb25nIGlnOwo+ICAgCj4gICAJZm9yIChpZyA9IDA7IGlnIDwgUFRSU19QRVJfUEdEOyAr K2lnLCArK3BnZCkgewo+IC0JCXB1ZF90ICpwdWQ7Cj4gKwkJcDRkX3QgKnA0ZDsKPiAgIAo+ICAg CQlpZiAoIXBnZF9wcmVzZW50KCpwZ2QpKQo+ICAgCQkJY29udGludWU7Cj4gLQkJcHVkID0gcHVk X29mZnNldChwZ2QsIDApOwo+IC0JCWt2bXBwY191bm1hcF9mcmVlX3B1ZChrdm0sIHB1ZCwgbHBp ZCk7Cj4gKwkJcDRkID0gcDRkX29mZnNldChwZ2QsIDApOwo+ICsJCWt2bXBwY191bm1hcF9mcmVl X3A0ZChrdm0sIHA0ZCwgbHBpZCk7Cj4gICAJCXBnZF9jbGVhcihwZ2QpOwo+ICAgCX0KPiAgIH0K PiBAQCAtNTY2LDYgKzU4OCw3IEBAIGludCBrdm1wcGNfY3JlYXRlX3B0ZShzdHJ1Y3Qga3ZtICpr dm0sIHBnZF90ICpwZ3RhYmxlLCBwdGVfdCBwdGUsCj4gICAJCSAgICAgIHVuc2lnbmVkIGxvbmcg KnJtYXBwLCBzdHJ1Y3Qgcm1hcF9uZXN0ZWQgKipuX3JtYXApCj4gICB7Cj4gICAJcGdkX3QgKnBn ZDsKPiArCXA0ZF90ICpwNGQsICpuZXdfcDRkID0gTlVMTDsKPiAgIAlwdWRfdCAqcHVkLCAqbmV3 X3B1ZCA9IE5VTEw7Cj4gICAJcG1kX3QgKnBtZCwgKm5ld19wbWQgPSBOVUxMOwo+ICAgCXB0ZV90 ICpwdGVwLCAqbmV3X3B0ZXAgPSBOVUxMOwo+IEBAIC01NzMsOSArNTk2LDE1IEBAIGludCBrdm1w cGNfY3JlYXRlX3B0ZShzdHJ1Y3Qga3ZtICprdm0sIHBnZF90ICpwZ3RhYmxlLCBwdGVfdCBwdGUs Cj4gICAKPiAgIAkvKiBUcmF2ZXJzZSB0aGUgZ3Vlc3QncyAybmQtbGV2ZWwgdHJlZSwgYWxsb2Nh dGUgbmV3IGxldmVscyBuZWVkZWQgKi8KPiAgIAlwZ2QgPSBwZ3RhYmxlICsgcGdkX2luZGV4KGdw YSk7Cj4gLQlwdWQgPSBOVUxMOwo+ICsJcDRkID0gTlVMTDsKPiAgIAlpZiAocGdkX3ByZXNlbnQo KnBnZCkpCj4gLQkJcHVkID0gcHVkX29mZnNldChwZ2QsIGdwYSk7Cj4gKwkJcDRkID0gcDRkX29m ZnNldChwZ2QsIGdwYSk7Cj4gKwllbHNlCj4gKwkJbmV3X3A0ZCA9IHA0ZF9hbGxvY19vbmUoa3Zt LT5tbSwgZ3BhKTsKPiArCj4gKwlwdWQgPSBOVUxMOwo+ICsJaWYgKHA0ZF9wcmVzZW50KCpwNGQp KQo+ICsJCXB1ZCA9IHB1ZF9vZmZzZXQocDRkLCBncGEpOwoKSXMgaXQgd29ydGggYWRkaW5nIGFs bCB0aGlzIG5ldyBjb2RlID8KCk15IHVuZGVyc3RhbmRpbmcgaXMgdGhhdCB0aGUgc2VyaWVzIG9i amVjdGl2ZSBpcyB0byBnZXQgcmlkIG9mIApfX0FSQ0hfSEFTXzVMRVZFTF9IQUNLLCB0byB0byBh ZGQgc3VwcG9ydCBmb3IgNSBsZXZlbHMgdG8gYW4gCmFyY2hpdGVjdHVyZSB0aGF0IG5vdCBuZWVk IGl0IChhdCBsZWFzdCBmb3Igbm93KS4KSWYgd2Ugd2FudCB0byBhZGQgc3VwcG9ydCBmb3IgNSBs ZXZlbHMsIGl0IGNhbiBiZSBkb25lIGxhdGVyIGluIGFub3RoZXIgCnBhdGNoLgoKSGVyZSBJIHRo aW5rIHlvdXIgY2hhbmdlIGNvdWxkIGJlIGxpbWl0ZWQgdG86CgotCQlwdWQgPSBwdWRfb2Zmc2V0 KHBnZCwgZ3BhKTsKKwkJcHVkID0gcHVkX29mZnNldChwNGRfb2Zmc2V0KHBnZCwgZ3BhKSwgZ3Bh KTsKCgo+ICAgCWVsc2UKPiAgIAkJbmV3X3B1ZCA9IHB1ZF9hbGxvY19vbmUoa3ZtLT5tbSwgZ3Bh KTsKPiAgIAo+IEBAIC01OTcsMTIgKzYyNiwxOCBAQCBpbnQga3ZtcHBjX2NyZWF0ZV9wdGUoc3Ry dWN0IGt2bSAqa3ZtLCBwZ2RfdCAqcGd0YWJsZSwgcHRlX3QgcHRlLAo+ICAgCS8qIE5vdyB0cmF2 ZXJzZSBhZ2FpbiB1bmRlciB0aGUgbG9jayBhbmQgY2hhbmdlIHRoZSB0cmVlICovCj4gICAJcmV0 ID0gLUVOT01FTTsKPiAgIAlpZiAocGdkX25vbmUoKnBnZCkpIHsKPiArCQlpZiAoIW5ld19wNGQp Cj4gKwkJCWdvdG8gb3V0X3VubG9jazsKPiArCQlwZ2RfcG9wdWxhdGUoa3ZtLT5tbSwgcGdkLCBu ZXdfcDRkKTsKPiArCQluZXdfcDRkID0gTlVMTDsKPiArCX0KPiArCWlmIChwNGRfbm9uZSgqcDRk KSkgewo+ICAgCQlpZiAoIW5ld19wdWQpCj4gICAJCQlnb3RvIG91dF91bmxvY2s7Cj4gLQkJcGdk X3BvcHVsYXRlKGt2bS0+bW0sIHBnZCwgbmV3X3B1ZCk7Cj4gKwkJcDRkX3BvcHVsYXRlKGt2bS0+ bW0sIHA0ZCwgbmV3X3B1ZCk7Cj4gICAJCW5ld19wdWQgPSBOVUxMOwo+ICAgCX0KPiAtCXB1ZCA9 IHB1ZF9vZmZzZXQocGdkLCBncGEpOwo+ICsJcHVkID0gcHVkX29mZnNldChwNGQsIGdwYSk7Cj4g ICAJaWYgKHB1ZF9pc19sZWFmKCpwdWQpKSB7Cj4gICAJCXVuc2lnbmVkIGxvbmcgaGdwYSA9IGdw YSAmIFBVRF9NQVNLOwo+ICAgCj4gQEAgLTEyMjAsNiArMTI1NSw3IEBAIHN0YXRpYyBzc2l6ZV90 IGRlYnVnZnNfcmFkaXhfcmVhZChzdHJ1Y3QgZmlsZSAqZmlsZSwgY2hhciBfX3VzZXIgKmJ1ZiwK PiAgIAlwZ2RfdCAqcGd0Owo+ICAgCXN0cnVjdCBrdm1fbmVzdGVkX2d1ZXN0ICpuZXN0ZWQ7Cj4g ICAJcGdkX3QgcGdkLCAqcGdkcDsKPiArCXA0ZF90IHA0ZCwgKnA0ZHA7Cj4gICAJcHVkX3QgcHVk LCAqcHVkcDsKPiAgIAlwbWRfdCBwbWQsICpwbWRwOwo+ICAgCXB0ZV90ICpwdGVwOwo+IEBAIC0x Mjk4LDcgKzEzMzQsMTQgQEAgc3RhdGljIHNzaXplX3QgZGVidWdmc19yYWRpeF9yZWFkKHN0cnVj dCBmaWxlICpmaWxlLCBjaGFyIF9fdXNlciAqYnVmLAo+ICAgCQkJY29udGludWU7Cj4gICAJCX0K PiAgIAo+IC0JCXB1ZHAgPSBwdWRfb2Zmc2V0KCZwZ2QsIGdwYSk7Cj4gKwkJcDRkcCA9IHA0ZF9v ZmZzZXQoJnBnZCwgZ3BhKTsKPiArCQlwNGQgPSBSRUFEX09OQ0UoKnA0ZHApOwo+ICsJCWlmICgh KHA0ZF92YWwocDRkKSAmIF9QQUdFX1BSRVNFTlQpKSB7Cj4gKwkJCWdwYSA9IChncGEgJiBQNERf TUFTSykgKyBQNERfU0laRTsKPiArCQkJY29udGludWU7Cj4gKwkJfQo+ICsKPiArCQlwdWRwID0g cHVkX29mZnNldCgmcDRkLCBncGEpOwoKU2FtZSwgaGVyZSB5b3UgYXJlIGZvcmNpbmcgYSB1c2Vs ZXNzIHJlYWQgd2l0aCBSRUFEX09OQ0UoKS4KCllvdXIgY2hhbmdlIGNvdWxkIGJlIGxpbWl0ZWQg dG8KCi0JCXB1ZHAgPSBwdWRfb2Zmc2V0KCZwZ2QsIGdwYSk7CisJCXB1ZHAgPSBwdWRfb2Zmc2V0 KHA0ZF9vZmZzZXQoJnBnZCwgZ3BhKSwgZ3BhKTsKClRoaXMgY29tbWVudCBhcHBsaWVzIHRvIG1h bnkgb3RoZXIgcGxhY2VzLgoKCj4gICAJCXB1ZCA9IFJFQURfT05DRSgqcHVkcCk7Cj4gICAJCWlm ICghKHB1ZF92YWwocHVkKSAmIF9QQUdFX1BSRVNFTlQpKSB7Cj4gICAJCQlncGEgPSAoZ3BhICYg UFVEX01BU0spICsgUFVEX1NJWkU7Cj4gZGlmZiAtLWdpdCBhL2FyY2gvcG93ZXJwYy9saWIvY29k ZS1wYXRjaGluZy5jIGIvYXJjaC9wb3dlcnBjL2xpYi9jb2RlLXBhdGNoaW5nLmMKPiBpbmRleCAz MzQ1ZjAzOWE4NzYuLjdhNTlmNjg2M2NlYyAxMDA2NDQKPiAtLS0gYS9hcmNoL3Bvd2VycGMvbGli L2NvZGUtcGF0Y2hpbmcuYwo+ICsrKyBiL2FyY2gvcG93ZXJwYy9saWIvY29kZS1wYXRjaGluZy5j Cj4gQEAgLTEwNywxMyArMTA3LDE4IEBAIHN0YXRpYyBpbmxpbmUgaW50IHVubWFwX3BhdGNoX2Fy ZWEodW5zaWduZWQgbG9uZyBhZGRyKQo+ICAgCXB0ZV90ICpwdGVwOwo+ICAgCXBtZF90ICpwbWRw Owo+ICAgCXB1ZF90ICpwdWRwOwo+ICsJcDRkX3QgKnA0ZHA7Cj4gICAJcGdkX3QgKnBnZHA7Cj4g ICAKPiAgIAlwZ2RwID0gcGdkX29mZnNldF9rKGFkZHIpOwo+ICAgCWlmICh1bmxpa2VseSghcGdk cCkpCj4gICAJCXJldHVybiAtRUlOVkFMOwo+ICAgCj4gLQlwdWRwID0gcHVkX29mZnNldChwZ2Rw LCBhZGRyKTsKPiArCXA0ZHAgPSBwNGRfb2Zmc2V0KHBnZHAsIGFkZHIpOwo+ICsJaWYgKHVubGlr ZWx5KCFwNGRwKSkKPiArCQlyZXR1cm4gLUVJTlZBTDsKPiArCj4gKwlwdWRwID0gcHVkX29mZnNl dChwNGRwLCBhZGRyKTsKPiAgIAlpZiAodW5saWtlbHkoIXB1ZHApKQo+ICAgCQlyZXR1cm4gLUVJ TlZBTDsKPiAgIAo+IGRpZmYgLS1naXQgYS9hcmNoL3Bvd2VycGMvbW0vYm9vazNzMzIvbW11LmMg Yi9hcmNoL3Bvd2VycGMvbW0vYm9vazNzMzIvbW11LmMKPiBpbmRleCAwYTFjNjVhMmM1NjUuLmIy ZmMzZTcxMTY1YyAxMDA2NDQKPiAtLS0gYS9hcmNoL3Bvd2VycGMvbW0vYm9vazNzMzIvbW11LmMK PiArKysgYi9hcmNoL3Bvd2VycGMvbW0vYm9vazNzMzIvbW11LmMKPiBAQCAtMzEyLDcgKzMxMiw3 IEBAIHZvaWQgaGFzaF9wcmVsb2FkKHN0cnVjdCBtbV9zdHJ1Y3QgKm1tLCB1bnNpZ25lZCBsb25n IGVhKQo+ICAgCj4gICAJaWYgKCFIYXNoKQo+ICAgCQlyZXR1cm47Cj4gLQlwbWQgPSBwbWRfb2Zm c2V0KHB1ZF9vZmZzZXQocGdkX29mZnNldChtbSwgZWEpLCBlYSksIGVhKTsKPiArCXBtZCA9IHBt ZF9vZmZzZXQocHVkX29mZnNldChwNGRfb2Zmc2V0KHBnZF9vZmZzZXQobW0sIGVhKSwgZWEpLCBl YSksIGVhKTsKCklmIHdlIGNvbnRpbnVlIGxpa2UgdGhpcywgaW4gdGVuIHllYXJzIHRoaXMgbGlr ZSBpcyBnb2luZyB0byBiZSBtYW55IApraWxvbWV0ZXJzIGxvbmcuCgpJIHRoaW5rIHRoZSBhYm92 ZSB3b3VsZCBiZSB3b3J0aCBhIGdlbmVyaWMgaGVscGVyLgoKPiAgIAlpZiAoIXBtZF9ub25lKCpw bWQpKQo+ICAgCQlhZGRfaGFzaF9wYWdlKG1tLT5jb250ZXh0LmlkLCBlYSwgcG1kX3ZhbCgqcG1k KSk7Cj4gICB9Cj4gZGlmZiAtLWdpdCBhL2FyY2gvcG93ZXJwYy9tbS9ib29rM3MzMi90bGIuYyBi L2FyY2gvcG93ZXJwYy9tbS9ib29rM3MzMi90bGIuYwo+IGluZGV4IDJmY2QzMjEwNDBmZi4uMTc1 YmMzM2I0MWI3IDEwMDY0NAo+IC0tLSBhL2FyY2gvcG93ZXJwYy9tbS9ib29rM3MzMi90bGIuYwo+ ICsrKyBiL2FyY2gvcG93ZXJwYy9tbS9ib29rM3MzMi90bGIuYwo+IEBAIC04Nyw3ICs4Nyw3IEBA IHN0YXRpYyB2b2lkIGZsdXNoX3JhbmdlKHN0cnVjdCBtbV9zdHJ1Y3QgKm1tLCB1bnNpZ25lZCBs b25nIHN0YXJ0LAo+ICAgCWlmIChzdGFydCA+PSBlbmQpCj4gICAJCXJldHVybjsKPiAgIAllbmQg PSAoZW5kIC0gMSkgfCB+UEFHRV9NQVNLOwo+IC0JcG1kID0gcG1kX29mZnNldChwdWRfb2Zmc2V0 KHBnZF9vZmZzZXQobW0sIHN0YXJ0KSwgc3RhcnQpLCBzdGFydCk7Cj4gKwlwbWQgPSBwbWRfb2Zm c2V0KHB1ZF9vZmZzZXQocDRkX29mZnNldChwZ2Rfb2Zmc2V0KG1tLCBzdGFydCksIHN0YXJ0KSwg c3RhcnQpLCBzdGFydCk7Cj4gICAJZm9yICg7Oykgewo+ICAgCQlwbWRfZW5kID0gKChzdGFydCAr IFBHRElSX1NJWkUpICYgUEdESVJfTUFTSykgLSAxOwo+ICAgCQlpZiAocG1kX2VuZCA+IGVuZCkK PiBAQCAtMTQ1LDcgKzE0NSw3IEBAIHZvaWQgZmx1c2hfdGxiX3BhZ2Uoc3RydWN0IHZtX2FyZWFf c3RydWN0ICp2bWEsIHVuc2lnbmVkIGxvbmcgdm1hZGRyKQo+ICAgCQlyZXR1cm47Cj4gICAJfQo+ ICAgCW1tID0gKHZtYWRkciA8IFRBU0tfU0laRSk/IHZtYS0+dm1fbW06ICZpbml0X21tOwo+IC0J cG1kID0gcG1kX29mZnNldChwdWRfb2Zmc2V0KHBnZF9vZmZzZXQobW0sIHZtYWRkciksIHZtYWRk ciksIHZtYWRkcik7Cj4gKwlwbWQgPSBwbWRfb2Zmc2V0KHB1ZF9vZmZzZXQocDRkX29mZnNldChw Z2Rfb2Zmc2V0KG1tLCB2bWFkZHIpLCB2bWFkZHIpLCB2bWFkZHIpLCB2bWFkZHIpOwo+ICAgCWlm ICghcG1kX25vbmUoKnBtZCkpCj4gICAJCWZsdXNoX2hhc2hfcGFnZXMobW0tPmNvbnRleHQuaWQs IHZtYWRkciwgcG1kX3ZhbCgqcG1kKSwgMSk7Cj4gICB9Cj4gZGlmZiAtLWdpdCBhL2FyY2gvcG93 ZXJwYy9tbS9ib29rM3M2NC9oYXNoX3BndGFibGUuYyBiL2FyY2gvcG93ZXJwYy9tbS9ib29rM3M2 NC9oYXNoX3BndGFibGUuYwo+IGluZGV4IDY0NzMzYjljYjIwYS4uOWNkMTU5MzdlODhhIDEwMDY0 NAo+IC0tLSBhL2FyY2gvcG93ZXJwYy9tbS9ib29rM3M2NC9oYXNoX3BndGFibGUuYwo+ICsrKyBi L2FyY2gvcG93ZXJwYy9tbS9ib29rM3M2NC9oYXNoX3BndGFibGUuYwo+IEBAIC0xNDgsNiArMTQ4 LDcgQEAgdm9pZCBoYXNoX192bWVtbWFwX3JlbW92ZV9tYXBwaW5nKHVuc2lnbmVkIGxvbmcgc3Rh cnQsCj4gICBpbnQgaGFzaF9fbWFwX2tlcm5lbF9wYWdlKHVuc2lnbmVkIGxvbmcgZWEsIHVuc2ln bmVkIGxvbmcgcGEsIHBncHJvdF90IHByb3QpCj4gICB7Cj4gICAJcGdkX3QgKnBnZHA7Cj4gKwlw NGRfdCAqcDRkcDsKPiAgIAlwdWRfdCAqcHVkcDsKPiAgIAlwbWRfdCAqcG1kcDsKPiAgIAlwdGVf dCAqcHRlcDsKPiBAQCAtMTU1LDcgKzE1Niw4IEBAIGludCBoYXNoX19tYXBfa2VybmVsX3BhZ2Uo dW5zaWduZWQgbG9uZyBlYSwgdW5zaWduZWQgbG9uZyBwYSwgcGdwcm90X3QgcHJvdCkKPiAgIAlC VUlMRF9CVUdfT04oVEFTS19TSVpFX1VTRVI2NCA+IEhfUEdUQUJMRV9SQU5HRSk7Cj4gICAJaWYg KHNsYWJfaXNfYXZhaWxhYmxlKCkpIHsKPiAgIAkJcGdkcCA9IHBnZF9vZmZzZXRfayhlYSk7Cj4g LQkJcHVkcCA9IHB1ZF9hbGxvYygmaW5pdF9tbSwgcGdkcCwgZWEpOwo+ICsJCXA0ZHAgPSBwNGRf b2Zmc2V0KHBnZHAsIGVhKTsKPiArCQlwdWRwID0gcHVkX2FsbG9jKCZpbml0X21tLCBwNGRwLCBl YSk7CgpDb3VsZCBiZSBhIHNpbmdsZSBsaW5lLCB3aXRob3V0IGEgbmV3IHZhci4KCi0JCXB1ZHAg PSBwdWRfYWxsb2MoJmluaXRfbW0sIHBnZHAsIGVhKTsKKwkJcHVkcCA9IHB1ZF9hbGxvYygmaW5p dF9tbSwgcDRkX29mZnNldChwZ2RwLCBlYSksIGVhKTsKCgpTYW1lIGtpbmQgb2YgY29tbWVudHMg YXMgYWxyZWFkeSBkb25lIGFwcGx5IHRvIHRoZSByZXN0LgoKQ2hyaXN0b3BoZQoKX19fX19fX19f X19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX19fX18KbGludXgtYXJtLWtlcm5lbCBt YWlsaW5nIGxpc3QKbGludXgtYXJtLWtlcm5lbEBsaXN0cy5pbmZyYWRlYWQub3JnCmh0dHA6Ly9s aXN0cy5pbmZyYWRlYWQub3JnL21haWxtYW4vbGlzdGluZm8vbGludXgtYXJtLWtlcm5lbAo= From mboxrd@z Thu Jan 1 00:00:00 1970 From: Christophe Leroy Date: Sun, 16 Feb 2020 11:41:07 +0100 Subject: [OpenRISC] [PATCH v2 07/13] powerpc: add support for folded p4d page tables In-Reply-To: <20200216081843.28670-8-rppt@kernel.org> References: <20200216081843.28670-1-rppt@kernel.org> <20200216081843.28670-8-rppt@kernel.org> Message-ID: List-Id: MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 8bit To: openrisc@lists.librecores.org Le 16/02/2020 à 09:18, Mike Rapoport a écrit : > From: Mike Rapoport > > Implement primitives necessary for the 4th level folding, add walks of p4d > level where appropriate and replace 5level-fixup.h with pgtable-nop4d.h. I don't think it is worth adding all this additionnals walks of p4d, this patch could be limited to changes like: - pud = pud_offset(pgd, gpa); + pud = pud_offset(p4d_offset(pgd, gpa), gpa); The additionnal walks should be added through another patch the day powerpc need them. See below for more comments. > > Signed-off-by: Mike Rapoport > Tested-by: Christophe Leroy # 8xx and 83xx > --- > arch/powerpc/include/asm/book3s/32/pgtable.h | 1 - > arch/powerpc/include/asm/book3s/64/hash.h | 4 +- > arch/powerpc/include/asm/book3s/64/pgalloc.h | 4 +- > arch/powerpc/include/asm/book3s/64/pgtable.h | 58 ++++++++++-------- > arch/powerpc/include/asm/book3s/64/radix.h | 6 +- > arch/powerpc/include/asm/nohash/32/pgtable.h | 1 - > arch/powerpc/include/asm/nohash/64/pgalloc.h | 2 +- > .../include/asm/nohash/64/pgtable-4k.h | 32 +++++----- > arch/powerpc/include/asm/nohash/64/pgtable.h | 6 +- > arch/powerpc/include/asm/pgtable.h | 8 +++ > arch/powerpc/kvm/book3s_64_mmu_radix.c | 59 ++++++++++++++++--- > arch/powerpc/lib/code-patching.c | 7 ++- > arch/powerpc/mm/book3s32/mmu.c | 2 +- > arch/powerpc/mm/book3s32/tlb.c | 4 +- > arch/powerpc/mm/book3s64/hash_pgtable.c | 4 +- > arch/powerpc/mm/book3s64/radix_pgtable.c | 19 ++++-- > arch/powerpc/mm/book3s64/subpage_prot.c | 6 +- > arch/powerpc/mm/hugetlbpage.c | 28 +++++---- > arch/powerpc/mm/kasan/kasan_init_32.c | 8 +-- > arch/powerpc/mm/mem.c | 4 +- > arch/powerpc/mm/nohash/40x.c | 4 +- > arch/powerpc/mm/nohash/book3e_pgtable.c | 15 +++-- > arch/powerpc/mm/pgtable.c | 25 +++++++- > arch/powerpc/mm/pgtable_32.c | 28 +++++---- > arch/powerpc/mm/pgtable_64.c | 10 ++-- > arch/powerpc/mm/ptdump/hashpagetable.c | 20 ++++++- > arch/powerpc/mm/ptdump/ptdump.c | 22 ++++++- > arch/powerpc/xmon/xmon.c | 17 +++++- > 28 files changed, 284 insertions(+), 120 deletions(-) > > diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h > index 5b39c11e884a..39ec11371be0 100644 > --- a/arch/powerpc/include/asm/book3s/32/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h > @@ -2,7 +2,6 @@ > #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H > #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H > > -#define __ARCH_USE_5LEVEL_HACK > #include > > #include > diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h > index 2781ebf6add4..876d1528c2cf 100644 > --- a/arch/powerpc/include/asm/book3s/64/hash.h > +++ b/arch/powerpc/include/asm/book3s/64/hash.h > @@ -134,9 +134,9 @@ static inline int get_region_id(unsigned long ea) > > #define hash__pmd_bad(pmd) (pmd_val(pmd) & H_PMD_BAD_BITS) > #define hash__pud_bad(pud) (pud_val(pud) & H_PUD_BAD_BITS) > -static inline int hash__pgd_bad(pgd_t pgd) > +static inline int hash__p4d_bad(p4d_t p4d) > { > - return (pgd_val(pgd) == 0); > + return (p4d_val(p4d) == 0); > } > #ifdef CONFIG_STRICT_KERNEL_RWX > extern void hash__mark_rodata_ro(void); > diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h > index a41e91bd0580..69c5b051734f 100644 > --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h > +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h > @@ -85,9 +85,9 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) > kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); > } > > -static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) > +static inline void p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud) > { > - *pgd = __pgd(__pgtable_ptr_val(pud) | PGD_VAL_BITS); > + *pgd = __p4d(__pgtable_ptr_val(pud) | PGD_VAL_BITS); > } > > static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) > diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h > index 201a69e6a355..ddddbafff0ab 100644 > --- a/arch/powerpc/include/asm/book3s/64/pgtable.h > +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h > @@ -2,7 +2,7 @@ > #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ > #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ > > -#include > +#include > > #ifndef __ASSEMBLY__ > #include > @@ -251,7 +251,7 @@ extern unsigned long __pmd_frag_size_shift; > /* Bits to mask out from a PUD to get to the PMD page */ > #define PUD_MASKED_BITS 0xc0000000000000ffUL > /* Bits to mask out from a PGD to get to the PUD page */ > -#define PGD_MASKED_BITS 0xc0000000000000ffUL > +#define P4D_MASKED_BITS 0xc0000000000000ffUL > > /* > * Used as an indicator for rcu callback functions > @@ -949,54 +949,60 @@ static inline bool pud_access_permitted(pud_t pud, bool write) > return pte_access_permitted(pud_pte(pud), write); > } > > -#define pgd_write(pgd) pte_write(pgd_pte(pgd)) > +#define __p4d_raw(x) ((p4d_t) { __pgd_raw(x) }) > +static inline __be64 p4d_raw(p4d_t x) > +{ > + return pgd_raw(x.pgd); > +} > + Shouldn't this be defined in asm/pgtable-be-types.h, just like other __pxx_raw() ? > +#define p4d_write(p4d) pte_write(p4d_pte(p4d)) > > -static inline void pgd_clear(pgd_t *pgdp) > +static inline void p4d_clear(p4d_t *p4dp) > { > - *pgdp = __pgd(0); > + *p4dp = __p4d(0); > } > > -static inline int pgd_none(pgd_t pgd) > +static inline int p4d_none(p4d_t p4d) > { > - return !pgd_raw(pgd); > + return !p4d_raw(p4d); > } > > -static inline int pgd_present(pgd_t pgd) > +static inline int p4d_present(p4d_t p4d) > { > - return !!(pgd_raw(pgd) & cpu_to_be64(_PAGE_PRESENT)); > + return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PRESENT)); > } > > -static inline pte_t pgd_pte(pgd_t pgd) > +static inline pte_t p4d_pte(p4d_t p4d) > { > - return __pte_raw(pgd_raw(pgd)); > + return __pte_raw(p4d_raw(p4d)); > } > > -static inline pgd_t pte_pgd(pte_t pte) > +static inline p4d_t pte_p4d(pte_t pte) > { > - return __pgd_raw(pte_raw(pte)); > + return __p4d_raw(pte_raw(pte)); > } > > -static inline int pgd_bad(pgd_t pgd) > +static inline int p4d_bad(p4d_t p4d) > { > if (radix_enabled()) > - return radix__pgd_bad(pgd); > - return hash__pgd_bad(pgd); > + return radix__p4d_bad(p4d); > + return hash__p4d_bad(p4d); > } > > -#define pgd_access_permitted pgd_access_permitted > -static inline bool pgd_access_permitted(pgd_t pgd, bool write) > +#define p4d_access_permitted p4d_access_permitted > +static inline bool p4d_access_permitted(p4d_t p4d, bool write) > { > - return pte_access_permitted(pgd_pte(pgd), write); > + return pte_access_permitted(p4d_pte(p4d), write); > } > > -extern struct page *pgd_page(pgd_t pgd); > +extern struct page *p4d_page(p4d_t p4d); > > /* Pointers in the page table tree are physical addresses */ > #define __pgtable_ptr_val(ptr) __pa(ptr) > > #define pmd_page_vaddr(pmd) __va(pmd_val(pmd) & ~PMD_MASKED_BITS) > #define pud_page_vaddr(pud) __va(pud_val(pud) & ~PUD_MASKED_BITS) > -#define pgd_page_vaddr(pgd) __va(pgd_val(pgd) & ~PGD_MASKED_BITS) > +#define p4d_page_vaddr(p4d) __va(p4d_val(p4d) & ~P4D_MASKED_BITS) > > #define pgd_index(address) (((address) >> (PGDIR_SHIFT)) & (PTRS_PER_PGD - 1)) > #define pud_index(address) (((address) >> (PUD_SHIFT)) & (PTRS_PER_PUD - 1)) > @@ -1010,8 +1016,8 @@ extern struct page *pgd_page(pgd_t pgd); > > #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) > > -#define pud_offset(pgdp, addr) \ > - (((pud_t *) pgd_page_vaddr(*(pgdp))) + pud_index(addr)) > +#define pud_offset(p4dp, addr) \ > + (((pud_t *) p4d_page_vaddr(*(p4dp))) + pud_index(addr)) > #define pmd_offset(pudp,addr) \ > (((pmd_t *) pud_page_vaddr(*(pudp))) + pmd_index(addr)) > #define pte_offset_kernel(dir,addr) \ > @@ -1368,6 +1374,12 @@ static inline bool pud_is_leaf(pud_t pud) > return !!(pud_raw(pud) & cpu_to_be64(_PAGE_PTE)); > } > > +#define p4d_is_leaf p4d_is_leaf > +static inline bool p4d_is_leaf(p4d_t p4d) > +{ > + return !!(p4d_raw(p4d) & cpu_to_be64(_PAGE_PTE)); > +} > + > #define pgd_is_leaf pgd_is_leaf > #define pgd_leaf pgd_is_leaf > static inline bool pgd_is_leaf(pgd_t pgd) [...] > diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h > index 8cc543ed114c..0a05fddd7881 100644 > --- a/arch/powerpc/include/asm/pgtable.h > +++ b/arch/powerpc/include/asm/pgtable.h > @@ -139,6 +139,14 @@ static inline bool pud_is_leaf(pud_t pud) > } > #endif > > +#ifndef p4d_is_leaf > +#define p4d_is_leaf p4d_is_leaf > +static inline bool p4d_is_leaf(p4d_t p4d) > +{ > + return false; > +} > +#endif > + > #ifndef pgd_is_leaf > #define pgd_is_leaf pgd_is_leaf > static inline bool pgd_is_leaf(pgd_t pgd) > diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c > index 803940d79b73..5aacfa0b27ef 100644 > --- a/arch/powerpc/kvm/book3s_64_mmu_radix.c > +++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c > @@ -494,17 +494,39 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud, > pud_free(kvm->mm, pud); > } > > +static void kvmppc_unmap_free_p4d(struct kvm *kvm, p4d_t *p4d, > + unsigned int lpid) > +{ > + unsigned long iu; > + p4d_t *p = p4d; > + > + for (iu = 0; iu < PTRS_PER_P4D; ++iu, ++p) { > + if (!p4d_present(*p)) > + continue; > + if (p4d_is_leaf(*p)) { > + p4d_clear(p); > + } else { > + pud_t *pud; > + > + pud = pud_offset(p, 0); > + kvmppc_unmap_free_pud(kvm, pud, lpid); > + p4d_clear(p); > + } > + } > + p4d_free(kvm->mm, p4d); > +} > + > void kvmppc_free_pgtable_radix(struct kvm *kvm, pgd_t *pgd, unsigned int lpid) > { > unsigned long ig; > > for (ig = 0; ig < PTRS_PER_PGD; ++ig, ++pgd) { > - pud_t *pud; > + p4d_t *p4d; > > if (!pgd_present(*pgd)) > continue; > - pud = pud_offset(pgd, 0); > - kvmppc_unmap_free_pud(kvm, pud, lpid); > + p4d = p4d_offset(pgd, 0); > + kvmppc_unmap_free_p4d(kvm, p4d, lpid); > pgd_clear(pgd); > } > } > @@ -566,6 +588,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > unsigned long *rmapp, struct rmap_nested **n_rmap) > { > pgd_t *pgd; > + p4d_t *p4d, *new_p4d = NULL; > pud_t *pud, *new_pud = NULL; > pmd_t *pmd, *new_pmd = NULL; > pte_t *ptep, *new_ptep = NULL; > @@ -573,9 +596,15 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > > /* Traverse the guest's 2nd-level tree, allocate new levels needed */ > pgd = pgtable + pgd_index(gpa); > - pud = NULL; > + p4d = NULL; > if (pgd_present(*pgd)) > - pud = pud_offset(pgd, gpa); > + p4d = p4d_offset(pgd, gpa); > + else > + new_p4d = p4d_alloc_one(kvm->mm, gpa); > + > + pud = NULL; > + if (p4d_present(*p4d)) > + pud = pud_offset(p4d, gpa); Is it worth adding all this new code ? My understanding is that the series objective is to get rid of __ARCH_HAS_5LEVEL_HACK, to to add support for 5 levels to an architecture that not need it (at least for now). If we want to add support for 5 levels, it can be done later in another patch. Here I think your change could be limited to: - pud = pud_offset(pgd, gpa); + pud = pud_offset(p4d_offset(pgd, gpa), gpa); > else > new_pud = pud_alloc_one(kvm->mm, gpa); > > @@ -597,12 +626,18 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte, > /* Now traverse again under the lock and change the tree */ > ret = -ENOMEM; > if (pgd_none(*pgd)) { > + if (!new_p4d) > + goto out_unlock; > + pgd_populate(kvm->mm, pgd, new_p4d); > + new_p4d = NULL; > + } > + if (p4d_none(*p4d)) { > if (!new_pud) > goto out_unlock; > - pgd_populate(kvm->mm, pgd, new_pud); > + p4d_populate(kvm->mm, p4d, new_pud); > new_pud = NULL; > } > - pud = pud_offset(pgd, gpa); > + pud = pud_offset(p4d, gpa); > if (pud_is_leaf(*pud)) { > unsigned long hgpa = gpa & PUD_MASK; > > @@ -1220,6 +1255,7 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf, > pgd_t *pgt; > struct kvm_nested_guest *nested; > pgd_t pgd, *pgdp; > + p4d_t p4d, *p4dp; > pud_t pud, *pudp; > pmd_t pmd, *pmdp; > pte_t *ptep; > @@ -1298,7 +1334,14 @@ static ssize_t debugfs_radix_read(struct file *file, char __user *buf, > continue; > } > > - pudp = pud_offset(&pgd, gpa); > + p4dp = p4d_offset(&pgd, gpa); > + p4d = READ_ONCE(*p4dp); > + if (!(p4d_val(p4d) & _PAGE_PRESENT)) { > + gpa = (gpa & P4D_MASK) + P4D_SIZE; > + continue; > + } > + > + pudp = pud_offset(&p4d, gpa); Same, here you are forcing a useless read with READ_ONCE(). Your change could be limited to - pudp = pud_offset(&pgd, gpa); + pudp = pud_offset(p4d_offset(&pgd, gpa), gpa); This comment applies to many other places. > pud = READ_ONCE(*pudp); > if (!(pud_val(pud) & _PAGE_PRESENT)) { > gpa = (gpa & PUD_MASK) + PUD_SIZE; > diff --git a/arch/powerpc/lib/code-patching.c b/arch/powerpc/lib/code-patching.c > index 3345f039a876..7a59f6863cec 100644 > --- a/arch/powerpc/lib/code-patching.c > +++ b/arch/powerpc/lib/code-patching.c > @@ -107,13 +107,18 @@ static inline int unmap_patch_area(unsigned long addr) > pte_t *ptep; > pmd_t *pmdp; > pud_t *pudp; > + p4d_t *p4dp; > pgd_t *pgdp; > > pgdp = pgd_offset_k(addr); > if (unlikely(!pgdp)) > return -EINVAL; > > - pudp = pud_offset(pgdp, addr); > + p4dp = p4d_offset(pgdp, addr); > + if (unlikely(!p4dp)) > + return -EINVAL; > + > + pudp = pud_offset(p4dp, addr); > if (unlikely(!pudp)) > return -EINVAL; > > diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c > index 0a1c65a2c565..b2fc3e71165c 100644 > --- a/arch/powerpc/mm/book3s32/mmu.c > +++ b/arch/powerpc/mm/book3s32/mmu.c > @@ -312,7 +312,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea) > > if (!Hash) > return; > - pmd = pmd_offset(pud_offset(pgd_offset(mm, ea), ea), ea); > + pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, ea), ea), ea), ea); If we continue like this, in ten years this like is going to be many kilometers long. I think the above would be worth a generic helper. > if (!pmd_none(*pmd)) > add_hash_page(mm->context.id, ea, pmd_val(*pmd)); > } > diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c > index 2fcd321040ff..175bc33b41b7 100644 > --- a/arch/powerpc/mm/book3s32/tlb.c > +++ b/arch/powerpc/mm/book3s32/tlb.c > @@ -87,7 +87,7 @@ static void flush_range(struct mm_struct *mm, unsigned long start, > if (start >= end) > return; > end = (end - 1) | ~PAGE_MASK; > - pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start); > + pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, start), start), start), start); > for (;;) { > pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1; > if (pmd_end > end) > @@ -145,7 +145,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) > return; > } > mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm; > - pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr); > + pmd = pmd_offset(pud_offset(p4d_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr), vmaddr); > if (!pmd_none(*pmd)) > flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1); > } > diff --git a/arch/powerpc/mm/book3s64/hash_pgtable.c b/arch/powerpc/mm/book3s64/hash_pgtable.c > index 64733b9cb20a..9cd15937e88a 100644 > --- a/arch/powerpc/mm/book3s64/hash_pgtable.c > +++ b/arch/powerpc/mm/book3s64/hash_pgtable.c > @@ -148,6 +148,7 @@ void hash__vmemmap_remove_mapping(unsigned long start, > int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) > { > pgd_t *pgdp; > + p4d_t *p4dp; > pud_t *pudp; > pmd_t *pmdp; > pte_t *ptep; > @@ -155,7 +156,8 @@ int hash__map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot) > BUILD_BUG_ON(TASK_SIZE_USER64 > H_PGTABLE_RANGE); > if (slab_is_available()) { > pgdp = pgd_offset_k(ea); > - pudp = pud_alloc(&init_mm, pgdp, ea); > + p4dp = p4d_offset(pgdp, ea); > + pudp = pud_alloc(&init_mm, p4dp, ea); Could be a single line, without a new var. - pudp = pud_alloc(&init_mm, pgdp, ea); + pudp = pud_alloc(&init_mm, p4d_offset(pgdp, ea), ea); Same kind of comments as already done apply to the rest. Christophe