From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from mail-ob0-f169.google.com (mail-ob0-f169.google.com [209.85.214.169]) by kanga.kvack.org (Postfix) with ESMTP id 4DACD830C6 for ; Mon, 8 Feb 2016 04:21:10 -0500 (EST) Received: by mail-ob0-f169.google.com with SMTP id wb13so146204873obb.1 for ; Mon, 08 Feb 2016 01:21:10 -0800 (PST) Received: from e33.co.us.ibm.com (e33.co.us.ibm.com. [32.97.110.151]) by mx.google.com with ESMTPS id rm7si10342834oeb.42.2016.02.08.01.21.09 for (version=TLS1 cipher=AES128-SHA bits=128/128); Mon, 08 Feb 2016 01:21:09 -0800 (PST) Received: from localhost by e33.co.us.ibm.com with IBM ESMTP SMTP Gateway: Authorized Use Only! Violators will be prosecuted for from ; Mon, 8 Feb 2016 02:21:09 -0700 Received: from b03cxnp08028.gho.boulder.ibm.com (b03cxnp08028.gho.boulder.ibm.com [9.17.130.20]) by d03dlp01.boulder.ibm.com (Postfix) with ESMTP id 304D01FF0045 for ; Mon, 8 Feb 2016 02:09:16 -0700 (MST) Received: from d03av04.boulder.ibm.com (d03av04.boulder.ibm.com [9.17.195.170]) by b03cxnp08028.gho.boulder.ibm.com (8.14.9/8.14.9/NCO v10.0) with ESMTP id u189L6lV30736392 for ; Mon, 8 Feb 2016 02:21:06 -0700 Received: from d03av04.boulder.ibm.com (loopback [127.0.0.1]) by d03av04.boulder.ibm.com (8.14.4/8.14.4/NCO v10.0 AVout) with ESMTP id u189L5AN007534 for ; Mon, 8 Feb 2016 02:21:06 -0700 From: "Aneesh Kumar K.V" Subject: [PATCH V2 06/29] powerpc/mm: Copy pgalloc (part 3) Date: Mon, 8 Feb 2016 14:50:18 +0530 Message-Id: <1454923241-6681-7-git-send-email-aneesh.kumar@linux.vnet.ibm.com> In-Reply-To: <1454923241-6681-1-git-send-email-aneesh.kumar@linux.vnet.ibm.com> References: <1454923241-6681-1-git-send-email-aneesh.kumar@linux.vnet.ibm.com> Sender: owner-linux-mm@kvack.org List-ID: To: benh@kernel.crashing.org, paulus@samba.org, mpe@ellerman.id.au Cc: linuxppc-dev@lists.ozlabs.org, linux-mm@kvack.org, "Aneesh Kumar K.V" 64bit book3s now always have 4 level page table irrespective of linux page size. Move the related code out of #ifdef Signed-off-by: Aneesh Kumar K.V --- arch/powerpc/include/asm/book3s/64/pgalloc.h | 55 +++++++++------------------- 1 file changed, 18 insertions(+), 37 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index 5bb6852fa771..f06ad7354d68 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -51,7 +51,6 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); } -#ifndef CONFIG_PPC_64K_PAGES static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) { pgd_set(pgd, (unsigned long)pud); @@ -79,6 +78,14 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_set(pmd, (unsigned long)pte); } +/* + * FIXME!! + * Between 4K and 64K pages, we differ in what is stored in pmd. ie. + * typedef pte_t *pgtable_t; -> 64K + * typedef struct page *pgtable_t; -> 4k + */ +#ifndef CONFIG_PPC_64K_PAGES + static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page) { @@ -176,36 +183,6 @@ extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); extern void __tlb_remove_table(void *_table); #endif -#ifndef __PAGETABLE_PUD_FOLDED -/* book3s 64 is 4 level page table */ -static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) -{ - pgd_set(pgd, (unsigned long)pud); -} - -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), - GFP_KERNEL|__GFP_REPEAT); -} - -static inline void pud_free(struct mm_struct *mm, pud_t *pud) -{ - kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud); -} -#endif - -static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) -{ - pud_set(pud, (unsigned long)pmd); -} - -static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, - pte_t *pte) -{ - pmd_set(pmd, (unsigned long)pte); -} - static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page) { @@ -258,13 +235,17 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd); } -#define __pmd_free_tlb(tlb, pmd, addr) \ - pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX) -#ifndef __PAGETABLE_PUD_FOLDED -#define __pud_free_tlb(tlb, pud, addr) \ - pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE) +static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long address) +{ + return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX); +} -#endif /* __PAGETABLE_PUD_FOLDED */ +static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, + unsigned long address) +{ + pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE); +} #define check_pgt_cache() do { } while (0) -- 2.5.0 -- To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to majordomo@kvack.org. For more info on Linux MM, see: http://www.linux-mm.org/ . Don't email: email@kvack.org From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from e37.co.us.ibm.com (e37.co.us.ibm.com [32.97.110.158]) (using TLSv1 with cipher CAMELLIA256-SHA (256/256 bits)) (No client certificate requested) by lists.ozlabs.org (Postfix) with ESMTPS id 312C71A02D7 for ; Mon, 8 Feb 2016 20:21:10 +1100 (AEDT) Received: from localhost by e37.co.us.ibm.com with IBM ESMTP SMTP Gateway: Authorized Use Only! Violators will be prosecuted for from ; Mon, 8 Feb 2016 02:21:08 -0700 Received: from b03cxnp08027.gho.boulder.ibm.com (b03cxnp08027.gho.boulder.ibm.com [9.17.130.19]) by d03dlp03.boulder.ibm.com (Postfix) with ESMTP id 2BD9019D8041 for ; Mon, 8 Feb 2016 02:09:05 -0700 (MST) Received: from d03av04.boulder.ibm.com (d03av04.boulder.ibm.com [9.17.195.170]) by b03cxnp08027.gho.boulder.ibm.com (8.14.9/8.14.9/NCO v10.0) with ESMTP id u189L6pg24379412 for ; Mon, 8 Feb 2016 02:21:06 -0700 Received: from d03av04.boulder.ibm.com (loopback [127.0.0.1]) by d03av04.boulder.ibm.com (8.14.4/8.14.4/NCO v10.0 AVout) with ESMTP id u189L5AP007534 for ; Mon, 8 Feb 2016 02:21:06 -0700 From: "Aneesh Kumar K.V" To: benh@kernel.crashing.org, paulus@samba.org, mpe@ellerman.id.au Cc: linuxppc-dev@lists.ozlabs.org, linux-mm@kvack.org, "Aneesh Kumar K.V" Subject: [PATCH V2 06/29] powerpc/mm: Copy pgalloc (part 3) Date: Mon, 8 Feb 2016 14:50:18 +0530 Message-Id: <1454923241-6681-7-git-send-email-aneesh.kumar@linux.vnet.ibm.com> In-Reply-To: <1454923241-6681-1-git-send-email-aneesh.kumar@linux.vnet.ibm.com> References: <1454923241-6681-1-git-send-email-aneesh.kumar@linux.vnet.ibm.com> List-Id: Linux on PowerPC Developers Mail List List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , 64bit book3s now always have 4 level page table irrespective of linux page size. Move the related code out of #ifdef Signed-off-by: Aneesh Kumar K.V --- arch/powerpc/include/asm/book3s/64/pgalloc.h | 55 +++++++++------------------- 1 file changed, 18 insertions(+), 37 deletions(-) diff --git a/arch/powerpc/include/asm/book3s/64/pgalloc.h b/arch/powerpc/include/asm/book3s/64/pgalloc.h index 5bb6852fa771..f06ad7354d68 100644 --- a/arch/powerpc/include/asm/book3s/64/pgalloc.h +++ b/arch/powerpc/include/asm/book3s/64/pgalloc.h @@ -51,7 +51,6 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); } -#ifndef CONFIG_PPC_64K_PAGES static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) { pgd_set(pgd, (unsigned long)pud); @@ -79,6 +78,14 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pmd_set(pmd, (unsigned long)pte); } +/* + * FIXME!! + * Between 4K and 64K pages, we differ in what is stored in pmd. ie. + * typedef pte_t *pgtable_t; -> 64K + * typedef struct page *pgtable_t; -> 4k + */ +#ifndef CONFIG_PPC_64K_PAGES + static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page) { @@ -176,36 +183,6 @@ extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); extern void __tlb_remove_table(void *_table); #endif -#ifndef __PAGETABLE_PUD_FOLDED -/* book3s 64 is 4 level page table */ -static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud) -{ - pgd_set(pgd, (unsigned long)pud); -} - -static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) -{ - return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), - GFP_KERNEL|__GFP_REPEAT); -} - -static inline void pud_free(struct mm_struct *mm, pud_t *pud) -{ - kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud); -} -#endif - -static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) -{ - pud_set(pud, (unsigned long)pmd); -} - -static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, - pte_t *pte) -{ - pmd_set(pmd, (unsigned long)pte); -} - static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page) { @@ -258,13 +235,17 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd); } -#define __pmd_free_tlb(tlb, pmd, addr) \ - pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX) -#ifndef __PAGETABLE_PUD_FOLDED -#define __pud_free_tlb(tlb, pud, addr) \ - pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE) +static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, + unsigned long address) +{ + return pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX); +} -#endif /* __PAGETABLE_PUD_FOLDED */ +static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, + unsigned long address) +{ + pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE); +} #define check_pgt_cache() do { } while (0) -- 2.5.0