From mboxrd@z Thu Jan 1 00:00:00 1970 From: Andrew Morton Subject: [patch 030/127] {x86,powerpc,microblaze}/kmap: move preempt disable Date: Thu, 04 Jun 2020 16:47:38 -0700 Message-ID: <20200604234738.ChH-8UNsP%akpm@linux-foundation.org> References: <20200604164523.e15f3177f4b69dcb4f2534a1@linux-foundation.org> Reply-To: linux-kernel@vger.kernel.org Mime-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: quoted-printable Return-path: Received: from mail.kernel.org ([198.145.29.99]:43436 "EHLO mail.kernel.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725930AbgFDXrl (ORCPT ); Thu, 4 Jun 2020 19:47:41 -0400 In-Reply-To: <20200604164523.e15f3177f4b69dcb4f2534a1@linux-foundation.org> Sender: mm-commits-owner@vger.kernel.org List-Id: mm-commits@vger.kernel.org To: akpm@linux-foundation.org, benh@kernel.crashing.org, bp@alien8.de, chris@zankel.net, christian.koenig@amd.com, dan.j.williams@intel.com, daniel.vetter@ffwll.ch, dave.hansen@linux.intel.com, davem@davemloft.net, deller@gmx.de, hch@lst.de, hpa@zytor.com, ira.weiny@intel.com, James.Bottomley@HansenPartnership.com, jcmvbkbc@gmail.com, linux-mm@kvack.org, luto@kernel.org, mingo@redhat.com, mm-commits@vger.kernel.org, paulus@samba.org, peterz@infradead.org, tglx@linutronix.de, torvalds@linux-foundation.org, tsbogend@alpha.franken.de, viro@zeniv.linux.org.uk =46rom: Ira Weiny Subject: {x86,powerpc,microblaze}/kmap: move preempt disable During this kmap() conversion series we must maintain bisect-ability. To do this, kmap_atomic_prot() in x86, powerpc, and microblaze need to remain functional. Create a temporary inline version of kmap_atomic_prot within these architectures so we can rework their kmap_atomic() calls and then lift kmap_atomic_prot() to the core. Link: http://lkml.kernel.org/r/20200507150004.1423069-6-ira.weiny@intel.com Signed-off-by: Ira Weiny Reviewed-by: Christoph Hellwig Suggested-by: Al Viro Cc: Andy Lutomirski Cc: Benjamin Herrenschmidt Cc: Borislav Petkov Cc: Christian K=C3=B6nig Cc: Chris Zankel Cc: Daniel Vetter Cc: Dan Williams Cc: Dave Hansen Cc: "David S. Miller" Cc: Helge Deller Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Max Filippov Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Signed-off-by: Andrew Morton --- arch/microblaze/include/asm/highmem.h | 11 ++++++++++- arch/microblaze/mm/highmem.c | 10 ++-------- arch/powerpc/include/asm/highmem.h | 11 ++++++++++- arch/powerpc/mm/highmem.c | 9 ++------- arch/x86/include/asm/highmem.h | 11 ++++++++++- arch/x86/mm/highmem_32.c | 10 ++-------- 6 files changed, 36 insertions(+), 26 deletions(-) --- a/arch/microblaze/include/asm/highmem.h~x86powerpcmicroblaze-kmap-move-= preempt-disable +++ a/arch/microblaze/include/asm/highmem.h @@ -51,7 +51,16 @@ extern pte_t *pkmap_page_table; #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) =20 -extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); +extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot); +static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) +{ + preempt_disable(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + return kmap_atomic_high_prot(page, prot); +} extern void __kunmap_atomic(void *kvaddr); =20 static inline void *kmap_atomic(struct page *page) --- a/arch/microblaze/mm/highmem.c~x86powerpcmicroblaze-kmap-move-preempt-d= isable +++ a/arch/microblaze/mm/highmem.c @@ -32,18 +32,12 @@ */ #include =20 -void *kmap_atomic_prot(struct page *page, pgprot_t prot) +void *kmap_atomic_high_prot(struct page *page, pgprot_t prot) { =20 unsigned long vaddr; int idx, type; =20 - preempt_disable(); - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); - - type =3D kmap_atomic_idx_push(); idx =3D type + KM_TYPE_NR*smp_processor_id(); vaddr =3D __fix_to_virt(FIX_KMAP_BEGIN + idx); @@ -55,7 +49,7 @@ void *kmap_atomic_prot(struct page *page =20 return (void *) vaddr; } -EXPORT_SYMBOL(kmap_atomic_prot); +EXPORT_SYMBOL(kmap_atomic_high_prot); =20 void __kunmap_atomic(void *kvaddr) { --- a/arch/powerpc/include/asm/highmem.h~x86powerpcmicroblaze-kmap-move-pre= empt-disable +++ a/arch/powerpc/include/asm/highmem.h @@ -59,7 +59,16 @@ extern pte_t *pkmap_page_table; #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) =20 -extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); +extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot); +static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) +{ + preempt_disable(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + return kmap_atomic_high_prot(page, prot); +} extern void __kunmap_atomic(void *kvaddr); =20 static inline void *kmap_atomic(struct page *page) --- a/arch/powerpc/mm/highmem.c~x86powerpcmicroblaze-kmap-move-preempt-disa= ble +++ a/arch/powerpc/mm/highmem.c @@ -30,16 +30,11 @@ * be used in IRQ contexts, so in some (very limited) cases we need * it. */ -void *kmap_atomic_prot(struct page *page, pgprot_t prot) +void *kmap_atomic_high_prot(struct page *page, pgprot_t prot) { unsigned long vaddr; int idx, type; =20 - preempt_disable(); - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); - type =3D kmap_atomic_idx_push(); idx =3D type + KM_TYPE_NR*smp_processor_id(); vaddr =3D __fix_to_virt(FIX_KMAP_BEGIN + idx); @@ -49,7 +44,7 @@ void *kmap_atomic_prot(struct page *page =20 return (void*) vaddr; } -EXPORT_SYMBOL(kmap_atomic_prot); +EXPORT_SYMBOL(kmap_atomic_high_prot); =20 void __kunmap_atomic(void *kvaddr) { --- a/arch/x86/include/asm/highmem.h~x86powerpcmicroblaze-kmap-move-preempt= -disable +++ a/arch/x86/include/asm/highmem.h @@ -58,7 +58,16 @@ extern unsigned long highstart_pfn, high #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) =20 -void *kmap_atomic_prot(struct page *page, pgprot_t prot); +extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot); +static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) +{ + preempt_disable(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + return kmap_atomic_high_prot(page, prot); +} void *kmap_atomic(struct page *page); void __kunmap_atomic(void *kvaddr); void *kmap_atomic_pfn(unsigned long pfn); --- a/arch/x86/mm/highmem_32.c~x86powerpcmicroblaze-kmap-move-preempt-disab= le +++ a/arch/x86/mm/highmem_32.c @@ -12,17 +12,11 @@ * However when holding an atomic kmap it is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ -void *kmap_atomic_prot(struct page *page, pgprot_t prot) +void *kmap_atomic_high_prot(struct page *page, pgprot_t prot) { unsigned long vaddr; int idx, type; =20 - preempt_disable(); - pagefault_disable(); - - if (!PageHighMem(page)) - return page_address(page); From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-3.8 required=3.0 tests=DKIMWL_WL_HIGH,DKIM_SIGNED, DKIM_VALID,HEADER_FROM_DIFFERENT_DOMAINS,MAILING_LIST_MULTI,SIGNED_OFF_BY, SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED autolearn=no autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 0E5CCC433DF for ; Thu, 4 Jun 2020 23:47:42 +0000 (UTC) Received: from kanga.kvack.org (kanga.kvack.org [205.233.56.17]) by mail.kernel.org (Postfix) with ESMTP id B7821206A2 for ; Thu, 4 Jun 2020 23:47:41 +0000 (UTC) Authentication-Results: mail.kernel.org; dkim=pass (1024-bit key) header.d=kernel.org header.i=@kernel.org header.b="Jfmbh1iD" DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org B7821206A2 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=linux-foundation.org Authentication-Results: mail.kernel.org; spf=pass smtp.mailfrom=owner-linux-mm@kvack.org Received: by kanga.kvack.org (Postfix) id 635AC28001B; Thu, 4 Jun 2020 19:47:41 -0400 (EDT) Received: by kanga.kvack.org (Postfix, from userid 40) id 5BECC280005; Thu, 4 Jun 2020 19:47:41 -0400 (EDT) X-Delivered-To: int-list-linux-mm@kvack.org Received: by kanga.kvack.org (Postfix, from userid 63042) id 4FBAE28001B; Thu, 4 Jun 2020 19:47:41 -0400 (EDT) X-Delivered-To: linux-mm@kvack.org Received: from forelay.hostedemail.com (smtprelay0233.hostedemail.com [216.40.44.233]) by kanga.kvack.org (Postfix) with ESMTP id 36241280005 for ; Thu, 4 Jun 2020 19:47:41 -0400 (EDT) Received: from smtpin25.hostedemail.com (10.5.19.251.rfc1918.com [10.5.19.251]) by forelay05.hostedemail.com (Postfix) with ESMTP id EE54D181AC9BF for ; Thu, 4 Jun 2020 23:47:40 +0000 (UTC) X-FDA: 76893169080.25.bread38_2512c7326d9b Received: from filter.hostedemail.com (10.5.16.251.rfc1918.com [10.5.16.251]) by smtpin25.hostedemail.com (Postfix) with ESMTP id CD5DA1804E3A0 for ; Thu, 4 Jun 2020 23:47:40 +0000 (UTC) X-HE-Tag: bread38_2512c7326d9b X-Filterd-Recvd-Size: 8787 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by imf02.hostedemail.com (Postfix) with ESMTP for ; Thu, 4 Jun 2020 23:47:40 +0000 (UTC) Received: from localhost.localdomain (c-73-231-172-41.hsd1.ca.comcast.net [73.231.172.41]) (using TLSv1.2 with cipher ECDHE-RSA-AES256-GCM-SHA384 (256/256 bits)) (No client certificate requested) by mail.kernel.org (Postfix) with ESMTPSA id CDFE0207DA; Thu, 4 Jun 2020 23:47:38 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=default; t=1591314459; bh=C4nsi72BGTlh8G43N1papi2vuCP4xE4w7q1i7VIU3k8=; h=Date:From:To:Subject:In-Reply-To:From; b=Jfmbh1iDlWKuNsrhv91bCsHaqw8lcKxR2t0QO8y89iF0oyZhGkHde9NCsWIBpw65v TLhgEeamJrxN9za3unQeKnDUKQtyHQO5Igpnoehujsur96m8qty7MjvQ9MVmMKXA52 abBJ4gyPU9/RAilSThnZu9yG3kENUsMREc88wYSE= Date: Thu, 04 Jun 2020 16:47:38 -0700 From: Andrew Morton To: akpm@linux-foundation.org, benh@kernel.crashing.org, bp@alien8.de, chris@zankel.net, christian.koenig@amd.com, dan.j.williams@intel.com, daniel.vetter@ffwll.ch, dave.hansen@linux.intel.com, davem@davemloft.net, deller@gmx.de, hch@lst.de, hpa@zytor.com, ira.weiny@intel.com, James.Bottomley@HansenPartnership.com, jcmvbkbc@gmail.com, linux-mm@kvack.org, luto@kernel.org, mingo@redhat.com, mm-commits@vger.kernel.org, paulus@samba.org, peterz@infradead.org, tglx@linutronix.de, torvalds@linux-foundation.org, tsbogend@alpha.franken.de, viro@zeniv.linux.org.uk Subject: [patch 030/127] {x86,powerpc,microblaze}/kmap: move preempt disable Message-ID: <20200604234738.ChH-8UNsP%akpm@linux-foundation.org> In-Reply-To: <20200604164523.e15f3177f4b69dcb4f2534a1@linux-foundation.org> User-Agent: s-nail v14.8.16 MIME-Version: 1.0 Content-Type: text/plain; charset=utf-8 Content-Transfer-Encoding: quoted-printable X-Rspamd-Queue-Id: CD5DA1804E3A0 X-Spamd-Result: default: False [0.00 / 100.00] X-Rspamd-Server: rspam02 X-Bogosity: Ham, tests=bogofilter, spamicity=0.000000, version=1.2.4 Sender: owner-linux-mm@kvack.org Precedence: bulk X-Loop: owner-majordomo@kvack.org List-ID: =46rom: Ira Weiny Subject: {x86,powerpc,microblaze}/kmap: move preempt disable During this kmap() conversion series we must maintain bisect-ability. To do this, kmap_atomic_prot() in x86, powerpc, and microblaze need to remain functional. Create a temporary inline version of kmap_atomic_prot within these architectures so we can rework their kmap_atomic() calls and then lift kmap_atomic_prot() to the core. Link: http://lkml.kernel.org/r/20200507150004.1423069-6-ira.weiny@intel.com Signed-off-by: Ira Weiny Reviewed-by: Christoph Hellwig Suggested-by: Al Viro Cc: Andy Lutomirski Cc: Benjamin Herrenschmidt Cc: Borislav Petkov Cc: Christian K=C3=B6nig Cc: Chris Zankel Cc: Daniel Vetter Cc: Dan Williams Cc: Dave Hansen Cc: "David S. Miller" Cc: Helge Deller Cc: "H. Peter Anvin" Cc: Ingo Molnar Cc: "James E.J. Bottomley" Cc: Max Filippov Cc: Paul Mackerras Cc: Peter Zijlstra Cc: Thomas Bogendoerfer Cc: Thomas Gleixner Signed-off-by: Andrew Morton --- arch/microblaze/include/asm/highmem.h | 11 ++++++++++- arch/microblaze/mm/highmem.c | 10 ++-------- arch/powerpc/include/asm/highmem.h | 11 ++++++++++- arch/powerpc/mm/highmem.c | 9 ++------- arch/x86/include/asm/highmem.h | 11 ++++++++++- arch/x86/mm/highmem_32.c | 10 ++-------- 6 files changed, 36 insertions(+), 26 deletions(-) --- a/arch/microblaze/include/asm/highmem.h~x86powerpcmicroblaze-kmap-move-= preempt-disable +++ a/arch/microblaze/include/asm/highmem.h @@ -51,7 +51,16 @@ extern pte_t *pkmap_page_table; #define PKMAP_NR(virt) ((virt - PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) =20 -extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); +extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot); +static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) +{ + preempt_disable(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + return kmap_atomic_high_prot(page, prot); +} extern void __kunmap_atomic(void *kvaddr); =20 static inline void *kmap_atomic(struct page *page) --- a/arch/microblaze/mm/highmem.c~x86powerpcmicroblaze-kmap-move-preempt-d= isable +++ a/arch/microblaze/mm/highmem.c @@ -32,18 +32,12 @@ */ #include =20 -void *kmap_atomic_prot(struct page *page, pgprot_t prot) +void *kmap_atomic_high_prot(struct page *page, pgprot_t prot) { =20 unsigned long vaddr; int idx, type; =20 - preempt_disable(); - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); - - type =3D kmap_atomic_idx_push(); idx =3D type + KM_TYPE_NR*smp_processor_id(); vaddr =3D __fix_to_virt(FIX_KMAP_BEGIN + idx); @@ -55,7 +49,7 @@ void *kmap_atomic_prot(struct page *page =20 return (void *) vaddr; } -EXPORT_SYMBOL(kmap_atomic_prot); +EXPORT_SYMBOL(kmap_atomic_high_prot); =20 void __kunmap_atomic(void *kvaddr) { --- a/arch/powerpc/include/asm/highmem.h~x86powerpcmicroblaze-kmap-move-pre= empt-disable +++ a/arch/powerpc/include/asm/highmem.h @@ -59,7 +59,16 @@ extern pte_t *pkmap_page_table; #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) =20 -extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); +extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot); +static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) +{ + preempt_disable(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + return kmap_atomic_high_prot(page, prot); +} extern void __kunmap_atomic(void *kvaddr); =20 static inline void *kmap_atomic(struct page *page) --- a/arch/powerpc/mm/highmem.c~x86powerpcmicroblaze-kmap-move-preempt-disa= ble +++ a/arch/powerpc/mm/highmem.c @@ -30,16 +30,11 @@ * be used in IRQ contexts, so in some (very limited) cases we need * it. */ -void *kmap_atomic_prot(struct page *page, pgprot_t prot) +void *kmap_atomic_high_prot(struct page *page, pgprot_t prot) { unsigned long vaddr; int idx, type; =20 - preempt_disable(); - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); - type =3D kmap_atomic_idx_push(); idx =3D type + KM_TYPE_NR*smp_processor_id(); vaddr =3D __fix_to_virt(FIX_KMAP_BEGIN + idx); @@ -49,7 +44,7 @@ void *kmap_atomic_prot(struct page *page =20 return (void*) vaddr; } -EXPORT_SYMBOL(kmap_atomic_prot); +EXPORT_SYMBOL(kmap_atomic_high_prot); =20 void __kunmap_atomic(void *kvaddr) { --- a/arch/x86/include/asm/highmem.h~x86powerpcmicroblaze-kmap-move-preempt= -disable +++ a/arch/x86/include/asm/highmem.h @@ -58,7 +58,16 @@ extern unsigned long highstart_pfn, high #define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) =20 -void *kmap_atomic_prot(struct page *page, pgprot_t prot); +extern void *kmap_atomic_high_prot(struct page *page, pgprot_t prot); +static inline void *kmap_atomic_prot(struct page *page, pgprot_t prot) +{ + preempt_disable(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + return kmap_atomic_high_prot(page, prot); +} void *kmap_atomic(struct page *page); void __kunmap_atomic(void *kvaddr); void *kmap_atomic_pfn(unsigned long pfn); --- a/arch/x86/mm/highmem_32.c~x86powerpcmicroblaze-kmap-move-preempt-disab= le +++ a/arch/x86/mm/highmem_32.c @@ -12,17 +12,11 @@ * However when holding an atomic kmap it is not legal to sleep, so atomic * kmaps are appropriate for short, tight code paths only. */ -void *kmap_atomic_prot(struct page *page, pgprot_t prot) +void *kmap_atomic_high_prot(struct page *page, pgprot_t prot) { unsigned long vaddr; int idx, type; =20 - preempt_disable(); - pagefault_disable(); - - if (!PageHighMem(page)) - return page_address(page); - type =3D kmap_atomic_idx_push(); idx =3D type + KM_TYPE_NR*smp_processor_id(); vaddr =3D __fix_to_virt(FIX_KMAP_BEGIN + idx); @@ -32,7 +26,7 @@ void *kmap_atomic_prot(struct page *page =20 return (void *)vaddr; } -EXPORT_SYMBOL(kmap_atomic_prot); +EXPORT_SYMBOL(kmap_atomic_high_prot); =20 void *kmap_atomic(struct page *page) { _