From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: from usa-sjc-mx-foss1.foss.arm.com ([217.140.101.70]:39888 "EHLO foss.arm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1730385AbeK3EMY (ORCPT ); Thu, 29 Nov 2018 23:12:24 -0500 From: Vincenzo Frascino Subject: [PATCH v2 11/28] arm64: compat: Refactor aarch32_alloc_vdso_pages() Date: Thu, 29 Nov 2018 17:05:13 +0000 Message-ID: <20181129170530.37789-12-vincenzo.frascino@arm.com> In-Reply-To: <20181129170530.37789-1-vincenzo.frascino@arm.com> References: <20181129170530.37789-1-vincenzo.frascino@arm.com> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Sender: linux-arch-owner@vger.kernel.org List-ID: To: linux-arch@vger.kernel.org, linux-arm-kernel@lists.infradead.org Cc: Catalin Marinas , Will Deacon , Arnd Bergmann , Russell King , Ralf Baechle , Paul Burton , Daniel Lezcano , Thomas Gleixner , Mark Salyzyn , Peter Collingbourne Message-ID: <20181129170513.CtMGQFzalKn5di4la_MrQxZj50zFqbv3gODI9tu09k0@z> aarch32_alloc_vdso_pages() needs to the refactored to make it easier to disable kuser helpers. This patch divides the function in aarch32_alloc_kuser_vdso_page() and aarch32_alloc_sigreturn_vdso_page(). Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Vincenzo Frascino --- arch/arm64/kernel/vdso.c | 49 ++++++++++++++++++++++++++-------------- 1 file changed, 32 insertions(+), 17 deletions(-) diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 88b674079d07..cee7205eefc5 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -74,40 +74,55 @@ static const struct vm_special_mapping aarch32_vdso_spec[2] = { }, }; -static int __init aarch32_alloc_vdso_pages(void) +static int aarch32_alloc_kuser_vdso_page(void) { extern char __kuser_helper_start[], __kuser_helper_end[]; - extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; - int kuser_sz = __kuser_helper_end - __kuser_helper_start; - int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; - unsigned long vdso_pages[2]; - - vdso_pages[0] = get_zeroed_page(GFP_ATOMIC); - if (!vdso_pages[0]) - return -ENOMEM; + unsigned long vdso_page; - vdso_pages[1] = get_zeroed_page(GFP_ATOMIC); - if (!vdso_pages[1]) + vdso_page = get_zeroed_page(GFP_ATOMIC); + if (!vdso_page) return -ENOMEM; /* kuser helpers */ - memcpy((void *)(vdso_pages[0] + 0x1000 - kuser_sz), + memcpy((void *)(vdso_page + 0x1000 - kuser_sz), __kuser_helper_start, kuser_sz); + flush_icache_range(vdso_page, vdso_page + PAGE_SIZE); + + aarch32_vdso_pages[0] = virt_to_page(vdso_page); + + return 0; +} + +static int aarch32_alloc_sigreturn_vdso_page(void) +{ + extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[]; + int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start; + unsigned long vdso_page; + + vdso_page = get_zeroed_page(GFP_ATOMIC); + if (!vdso_page) + return -ENOMEM; + /* sigreturn code */ - memcpy((void *)vdso_pages[1], + memcpy((void *)vdso_page, __aarch32_sigret_code_start, sigret_sz); - flush_icache_range(vdso_pages[0], vdso_pages[0] + PAGE_SIZE); - flush_icache_range(vdso_pages[1], vdso_pages[1] + PAGE_SIZE); + flush_icache_range(vdso_page, vdso_page + PAGE_SIZE); - aarch32_vdso_pages[0] = virt_to_page(vdso_pages[0]); - aarch32_vdso_pages[1] = virt_to_page(vdso_pages[1]); + aarch32_vdso_pages[1] = virt_to_page(vdso_page); return 0; + +} + +static int __init aarch32_alloc_vdso_pages(void) +{ + return aarch32_alloc_kuser_vdso_page() & + aarch32_alloc_sigreturn_vdso_page(); } arch_initcall(aarch32_alloc_vdso_pages); -- 2.19.2