From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.8 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 9061CC433EF for ; Sat, 18 Sep 2021 08:10:35 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id 6047961212 for ; Sat, 18 Sep 2021 08:10:35 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S243825AbhIRIL4 (ORCPT ); Sat, 18 Sep 2021 04:11:56 -0400 Received: from szxga02-in.huawei.com ([45.249.212.188]:9894 "EHLO szxga02-in.huawei.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S243653AbhIRILy (ORCPT ); Sat, 18 Sep 2021 04:11:54 -0400 Received: from dggemv704-chm.china.huawei.com (unknown [172.30.72.57]) by szxga02-in.huawei.com (SkyGuard) with ESMTP id 4HBNgF07rGz8yQD; Sat, 18 Sep 2021 16:06:01 +0800 (CST) Received: from dggpemm500009.china.huawei.com (7.185.36.225) by dggemv704-chm.china.huawei.com (10.3.19.47) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2308.8; Sat, 18 Sep 2021 16:10:30 +0800 Received: from huawei.com (10.175.113.32) by dggpemm500009.china.huawei.com (7.185.36.225) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2308.8; Sat, 18 Sep 2021 16:10:29 +0800 From: Liu Shixin To: Alexander Potapenko , Marco Elver , Dmitry Vyukov , Catalin Marinas , Will Deacon CC: , , , Liu Shixin Subject: [PATCH] arm64: remove page granularity limitation from KFENCE Date: Sat, 18 Sep 2021 16:38:49 +0800 Message-ID: <20210918083849.2696287-1-liushixin2@huawei.com> X-Mailer: git-send-email 2.18.0.huawei.25 MIME-Version: 1.0 Content-Type: text/plain X-Originating-IP: [10.175.113.32] X-ClientProxiedBy: dggems702-chm.china.huawei.com (10.3.19.179) To dggpemm500009.china.huawei.com (7.185.36.225) X-CFilter-Loop: Reflected Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Currently if KFENCE is enabled in arm64, the entire linear map will be mapped at page granularity which seems overkilled. Actually only the kfence pool requires to be mapped at page granularity. We can remove the restriction from KFENCE and force the linear mapping of the kfence pool at page granularity later in arch_kfence_init_pool(). Signed-off-by: Liu Shixin --- arch/arm64/include/asm/kfence.h | 69 ++++++++++++++++++++++++++++++++- arch/arm64/mm/mmu.c | 4 +- 2 files changed, 70 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index aa855c6a0ae6..bee101eced0b 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -8,9 +8,76 @@ #ifndef __ASM_KFENCE_H #define __ASM_KFENCE_H +#include #include +#include -static inline bool arch_kfence_init_pool(void) { return true; } +static inline int split_pud_page(pud_t *pud, unsigned long addr) +{ + int i; + pmd_t *pmd = pmd_alloc_one(&init_mm, addr); + unsigned long pfn = PFN_DOWN(__pa(addr)); + + if (!pmd) + return -ENOMEM; + + for (i = 0; i < PTRS_PER_PMD; i++) + set_pmd(pmd + i, pmd_mkhuge(pfn_pmd(pfn + i * PTRS_PER_PTE, PAGE_KERNEL))); + + smp_wmb(); /* See comment in __pte_alloc */ + pud_populate(&init_mm, pud, pmd); + flush_tlb_kernel_range(addr, addr + PUD_SIZE); + return 0; +} + +static inline int split_pmd_page(pmd_t *pmd, unsigned long addr) +{ + int i; + pte_t *pte = pte_alloc_one_kernel(&init_mm); + unsigned long pfn = PFN_DOWN(__pa(addr)); + + if (!pte) + return -ENOMEM; + + for (i = 0; i < PTRS_PER_PTE; i++) + set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL)); + + smp_wmb(); /* See comment in __pte_alloc */ + pmd_populate_kernel(&init_mm, pmd, pte); + + flush_tlb_kernel_range(addr, addr + PMD_SIZE); + return 0; +} + +static inline bool arch_kfence_init_pool(void) +{ + unsigned long addr; + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr); + addr += PAGE_SIZE) { + pgd = pgd_offset(&init_mm, addr); + if (pgd_leaf(*pgd)) + return false; + p4d = p4d_offset(pgd, addr); + if (p4d_leaf(*p4d)) + return false; + pud = pud_offset(p4d, addr); + if (pud_leaf(*pud)) { + if (split_pud_page(pud, addr & PUD_MASK)) + return false; + } + pmd = pmd_offset(pud, addr); + if (pmd_leaf(*pmd)) { + if (split_pmd_page(pmd, addr & PMD_MASK)) + return false; + } + } + return true; +} static inline bool kfence_protect_page(unsigned long addr, bool protect) { diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index cfd9deb347c3..b2c79ccfb1c5 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -516,7 +516,7 @@ static void __init map_mem(pgd_t *pgdp) */ BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end)); - if (can_set_direct_map() || crash_mem_map || IS_ENABLED(CONFIG_KFENCE)) + if (can_set_direct_map() || crash_mem_map) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; /* @@ -1485,7 +1485,7 @@ int arch_add_memory(int nid, u64 start, u64 size, * KFENCE requires linear map to be mapped at page granularity, so that * it is possible to protect/unprotect single pages in the KFENCE pool. */ - if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE)) + if (can_set_direct_map()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), -- 2.18.0.huawei.25 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-17.1 required=3.0 tests=BAYES_00,DKIMWL_WL_HIGH, DKIM_SIGNED,DKIM_VALID,HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER, INCLUDES_PATCH,MAILING_LIST_MULTI,SPF_HELO_NONE,SPF_PASS,USER_AGENT_GIT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 064DDC433F5 for ; Sat, 18 Sep 2021 08:12:53 +0000 (UTC) Received: from bombadil.infradead.org (bombadil.infradead.org [198.137.202.133]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPS id BC76A604DA for ; Sat, 18 Sep 2021 08:12:52 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.4.1 mail.kernel.org BC76A604DA Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=huawei.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=lists.infradead.org DKIM-Signature: v=1; a=rsa-sha256; q=dns/txt; c=relaxed/relaxed; d=lists.infradead.org; s=bombadil.20210309; h=Sender: Content-Transfer-Encoding:Content-Type:List-Subscribe:List-Help:List-Post: List-Archive:List-Unsubscribe:List-Id:MIME-Version:Message-ID:Date:Subject:CC :To:From:Reply-To:Content-ID:Content-Description:Resent-Date:Resent-From: Resent-Sender:Resent-To:Resent-Cc:Resent-Message-ID:In-Reply-To:References: List-Owner; bh=Asda1Z6v+CSH2Yuj6FQvf7qjtu1TNI3OOEGxhjXp2ss=; b=QMZ1bmXQUGeqTL iKqxTnftcMgKefNndNgD4jbRsJ4x0Sv2QxN7Fj8mXWI1kNdlDtqhaKIJR7Z4dyYiTWGWfqQb5okfH QQpp9Mfao9BV1lg2ifyWTPnQhuT/W+8WPCa643Mw412lsKROR4IwtCvWRUhM+4N0tll+nmwitnDy4 j5dE6Xy9GoDTFBwTQoA3QLQSjm6CfaaOt1Bj+kbrzGDDlReuKzCYIKQWDtNIlLztYumJQuH05yq5U cFiO1nGT3/bN8WoYsN4fpeYTLEJpUbuSG3OhJ4N5h3igyzA2sDjl36sTpdMFZC1M55qLycT1tzRpr wmoqNPKupo5VGxVLjlfg==; Received: from localhost ([::1] helo=bombadil.infradead.org) by bombadil.infradead.org with esmtp (Exim 4.94.2 #2 (Red Hat Linux)) id 1mRVQq-00FaGI-Fj; Sat, 18 Sep 2021 08:10:44 +0000 Received: from szxga02-in.huawei.com ([45.249.212.188]) by bombadil.infradead.org with esmtps (Exim 4.94.2 #2 (Red Hat Linux)) id 1mRVQl-00FaFL-Ee for linux-arm-kernel@lists.infradead.org; Sat, 18 Sep 2021 08:10:41 +0000 Received: from dggemv704-chm.china.huawei.com (unknown [172.30.72.57]) by szxga02-in.huawei.com (SkyGuard) with ESMTP id 4HBNgF07rGz8yQD; Sat, 18 Sep 2021 16:06:01 +0800 (CST) Received: from dggpemm500009.china.huawei.com (7.185.36.225) by dggemv704-chm.china.huawei.com (10.3.19.47) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2308.8; Sat, 18 Sep 2021 16:10:30 +0800 Received: from huawei.com (10.175.113.32) by dggpemm500009.china.huawei.com (7.185.36.225) with Microsoft SMTP Server (version=TLS1_2, cipher=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256) id 15.1.2308.8; Sat, 18 Sep 2021 16:10:29 +0800 From: Liu Shixin To: Alexander Potapenko , Marco Elver , Dmitry Vyukov , Catalin Marinas , Will Deacon CC: , , , Liu Shixin Subject: [PATCH] arm64: remove page granularity limitation from KFENCE Date: Sat, 18 Sep 2021 16:38:49 +0800 Message-ID: <20210918083849.2696287-1-liushixin2@huawei.com> X-Mailer: git-send-email 2.18.0.huawei.25 MIME-Version: 1.0 X-Originating-IP: [10.175.113.32] X-ClientProxiedBy: dggems702-chm.china.huawei.com (10.3.19.179) To dggpemm500009.china.huawei.com (7.185.36.225) X-CFilter-Loop: Reflected X-CRM114-Version: 20100106-BlameMichelson ( TRE 0.8.0 (BSD) ) MR-646709E3 X-CRM114-CacheID: sfid-20210918_011039_903605_59763E88 X-CRM114-Status: GOOD ( 14.02 ) X-BeenThere: linux-arm-kernel@lists.infradead.org X-Mailman-Version: 2.1.34 Precedence: list List-Id: List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Sender: "linux-arm-kernel" Errors-To: linux-arm-kernel-bounces+linux-arm-kernel=archiver.kernel.org@lists.infradead.org Currently if KFENCE is enabled in arm64, the entire linear map will be mapped at page granularity which seems overkilled. Actually only the kfence pool requires to be mapped at page granularity. We can remove the restriction from KFENCE and force the linear mapping of the kfence pool at page granularity later in arch_kfence_init_pool(). Signed-off-by: Liu Shixin --- arch/arm64/include/asm/kfence.h | 69 ++++++++++++++++++++++++++++++++- arch/arm64/mm/mmu.c | 4 +- 2 files changed, 70 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index aa855c6a0ae6..bee101eced0b 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -8,9 +8,76 @@ #ifndef __ASM_KFENCE_H #define __ASM_KFENCE_H +#include #include +#include -static inline bool arch_kfence_init_pool(void) { return true; } +static inline int split_pud_page(pud_t *pud, unsigned long addr) +{ + int i; + pmd_t *pmd = pmd_alloc_one(&init_mm, addr); + unsigned long pfn = PFN_DOWN(__pa(addr)); + + if (!pmd) + return -ENOMEM; + + for (i = 0; i < PTRS_PER_PMD; i++) + set_pmd(pmd + i, pmd_mkhuge(pfn_pmd(pfn + i * PTRS_PER_PTE, PAGE_KERNEL))); + + smp_wmb(); /* See comment in __pte_alloc */ + pud_populate(&init_mm, pud, pmd); + flush_tlb_kernel_range(addr, addr + PUD_SIZE); + return 0; +} + +static inline int split_pmd_page(pmd_t *pmd, unsigned long addr) +{ + int i; + pte_t *pte = pte_alloc_one_kernel(&init_mm); + unsigned long pfn = PFN_DOWN(__pa(addr)); + + if (!pte) + return -ENOMEM; + + for (i = 0; i < PTRS_PER_PTE; i++) + set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL)); + + smp_wmb(); /* See comment in __pte_alloc */ + pmd_populate_kernel(&init_mm, pmd, pte); + + flush_tlb_kernel_range(addr, addr + PMD_SIZE); + return 0; +} + +static inline bool arch_kfence_init_pool(void) +{ + unsigned long addr; + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr); + addr += PAGE_SIZE) { + pgd = pgd_offset(&init_mm, addr); + if (pgd_leaf(*pgd)) + return false; + p4d = p4d_offset(pgd, addr); + if (p4d_leaf(*p4d)) + return false; + pud = pud_offset(p4d, addr); + if (pud_leaf(*pud)) { + if (split_pud_page(pud, addr & PUD_MASK)) + return false; + } + pmd = pmd_offset(pud, addr); + if (pmd_leaf(*pmd)) { + if (split_pmd_page(pmd, addr & PMD_MASK)) + return false; + } + } + return true; +} static inline bool kfence_protect_page(unsigned long addr, bool protect) { diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index cfd9deb347c3..b2c79ccfb1c5 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -516,7 +516,7 @@ static void __init map_mem(pgd_t *pgdp) */ BUILD_BUG_ON(pgd_index(direct_map_end - 1) == pgd_index(direct_map_end)); - if (can_set_direct_map() || crash_mem_map || IS_ENABLED(CONFIG_KFENCE)) + if (can_set_direct_map() || crash_mem_map) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; /* @@ -1485,7 +1485,7 @@ int arch_add_memory(int nid, u64 start, u64 size, * KFENCE requires linear map to be mapped at page granularity, so that * it is possible to protect/unprotect single pages in the KFENCE pool. */ - if (can_set_direct_map() || IS_ENABLED(CONFIG_KFENCE)) + if (can_set_direct_map()) flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), -- 2.18.0.huawei.25 _______________________________________________ linux-arm-kernel mailing list linux-arm-kernel@lists.infradead.org http://lists.infradead.org/mailman/listinfo/linux-arm-kernel