kernel-hardening.lists.openwall.com archive mirror
 help / color / mirror / Atom feed
From: Laura Abbott <labbott@redhat.com>
To: Khalid Aziz <khalid.aziz@oracle.com>,
	juergh@gmail.com, tycho@tycho.ws, jsteckli@amazon.de,
	ak@linux.intel.com, torvalds@linux-foundation.org,
	liran.alon@oracle.com, keescook@google.com,
	konrad.wilk@oracle.com
Cc: deepa.srinivasan@oracle.com, chris.hyser@oracle.com,
	tyhicks@canonical.com, dwmw@amazon.co.uk,
	andrew.cooper3@citrix.com, jcm@redhat.com,
	boris.ostrovsky@oracle.com, kanth.ghatraju@oracle.com,
	joao.m.martins@oracle.com, jmattson@google.com,
	pradeep.vincent@oracle.com, john.haxby@oracle.com,
	tglx@linutronix.de, kirill.shutemov@linux.intel.com, hch@lst.de,
	steven.sistare@oracle.com, kernel-hardening@lists.openwall.com,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org, x86@kernel.org,
	"Vasileios P . Kemerlis" <vpk@cs.columbia.edu>,
	Juerg Haefliger <juerg.haefliger@canonical.com>,
	Tycho Andersen <tycho@docker.com>,
	Marco Benatto <marco.antonio.780@gmail.com>,
	David Woodhouse <dwmw2@infradead.org>
Subject: Re: [RFC PATCH v7 14/16] EXPERIMENTAL: xpfo, mm: optimize spin lock usage in xpfo_kmap
Date: Wed, 16 Jan 2019 16:18:14 -0800	[thread overview]
Message-ID: <b2ffc4fd-e449-b6da-7070-4f182d44dd5b@redhat.com> (raw)
In-Reply-To: <7e8e17f519ae87a91fc6cbb57b8b27094c96305c.1547153058.git.khalid.aziz@oracle.com>

On 1/10/19 1:09 PM, Khalid Aziz wrote:
> From: Julian Stecklina <jsteckli@amazon.de>
> 
> We can reduce spin lock usage in xpfo_kmap to the 0->1 transition of
> the mapcount. This means that xpfo_kmap() can now race and that we
> get spurious page faults.
> 
> The page fault handler helps the system make forward progress by
> fixing the page table instead of allowing repeated page faults until
> the right xpfo_kmap went through.
> 
> Model-checked with up to 4 concurrent callers with Spin.
> 

This needs the spurious check for arm64 as well. This at
least gets me booting but could probably use more review:

diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 7d9571f4ae3d..8f425848cbb9 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -32,6 +32,7 @@
  #include <linux/perf_event.h>
  #include <linux/preempt.h>
  #include <linux/hugetlb.h>
+#include <linux/xpfo.h>
  
  #include <asm/bug.h>
  #include <asm/cmpxchg.h>
@@ -289,6 +290,9 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
         if (!is_el1_instruction_abort(esr) && fixup_exception(regs))
                 return;
  
+       if (xpfo_spurious_fault(addr))
+               return;
+
         if (is_el1_permission_fault(addr, esr, regs)) {
                 if (esr & ESR_ELx_WNR)
                         msg = "write to read-only memory";


> Signed-off-by: Julian Stecklina <jsteckli@amazon.de>
> Cc: x86@kernel.org
> Cc: kernel-hardening@lists.openwall.com
> Cc: Vasileios P. Kemerlis <vpk@cs.columbia.edu>
> Cc: Juerg Haefliger <juerg.haefliger@canonical.com>
> Cc: Tycho Andersen <tycho@docker.com>
> Cc: Marco Benatto <marco.antonio.780@gmail.com>
> Cc: David Woodhouse <dwmw2@infradead.org>
> Signed-off-by: Khalid Aziz <khalid.aziz@oracle.com>
> ---
>   arch/x86/mm/fault.c  |  4 ++++
>   include/linux/xpfo.h |  4 ++++
>   mm/xpfo.c            | 50 +++++++++++++++++++++++++++++++++++++-------
>   3 files changed, 51 insertions(+), 7 deletions(-)
> 
> diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
> index ba51652fbd33..207081dcd572 100644
> --- a/arch/x86/mm/fault.c
> +++ b/arch/x86/mm/fault.c
> @@ -18,6 +18,7 @@
>   #include <linux/uaccess.h>		/* faulthandler_disabled()	*/
>   #include <linux/efi.h>			/* efi_recover_from_page_fault()*/
>   #include <linux/mm_types.h>
> +#include <linux/xpfo.h>
>   
>   #include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
>   #include <asm/traps.h>			/* dotraplinkage, ...		*/
> @@ -1218,6 +1219,9 @@ do_kern_addr_fault(struct pt_regs *regs, unsigned long hw_error_code,
>   	if (kprobes_fault(regs))
>   		return;
>   
> +	if (xpfo_spurious_fault(address))
> +		return;
> +
>   	/*
>   	 * Note, despite being a "bad area", there are quite a few
>   	 * acceptable reasons to get here, such as erratum fixups
> diff --git a/include/linux/xpfo.h b/include/linux/xpfo.h
> index ea5188882f49..58dd243637d2 100644
> --- a/include/linux/xpfo.h
> +++ b/include/linux/xpfo.h
> @@ -54,6 +54,8 @@ bool xpfo_enabled(void);
>   
>   phys_addr_t user_virt_to_phys(unsigned long addr);
>   
> +bool xpfo_spurious_fault(unsigned long addr);
> +
>   #else /* !CONFIG_XPFO */
>   
>   static inline void xpfo_init_single_page(struct page *page) { }
> @@ -81,6 +83,8 @@ static inline bool xpfo_enabled(void) { return false; }
>   
>   static inline phys_addr_t user_virt_to_phys(unsigned long addr) { return 0; }
>   
> +static inline bool xpfo_spurious_fault(unsigned long addr) { return false; }
> +
>   #endif /* CONFIG_XPFO */
>   
>   #endif /* _LINUX_XPFO_H */
> diff --git a/mm/xpfo.c b/mm/xpfo.c
> index dbf20efb0499..85079377c91d 100644
> --- a/mm/xpfo.c
> +++ b/mm/xpfo.c
> @@ -119,6 +119,16 @@ void xpfo_free_pages(struct page *page, int order)
>   	}
>   }
>   
> +static void xpfo_do_map(void *kaddr, struct page *page)
> +{
> +	spin_lock(&page->xpfo_lock);
> +	if (PageXpfoUnmapped(page)) {
> +		set_kpte(kaddr, page, PAGE_KERNEL);
> +		ClearPageXpfoUnmapped(page);
> +	}
> +	spin_unlock(&page->xpfo_lock);
> +}
> +
>   void xpfo_kmap(void *kaddr, struct page *page)
>   {
>   	if (!static_branch_unlikely(&xpfo_inited))
> @@ -127,17 +137,12 @@ void xpfo_kmap(void *kaddr, struct page *page)
>   	if (!PageXpfoUser(page))
>   		return;
>   
> -	spin_lock(&page->xpfo_lock);
> -
>   	/*
>   	 * The page was previously allocated to user space, so map it back
>   	 * into the kernel. No TLB flush required.
>   	 */
> -	if ((atomic_inc_return(&page->xpfo_mapcount) == 1) &&
> -	    TestClearPageXpfoUnmapped(page))
> -		set_kpte(kaddr, page, PAGE_KERNEL);
> -
> -	spin_unlock(&page->xpfo_lock);
> +	if (atomic_inc_return(&page->xpfo_mapcount) == 1)
> +		xpfo_do_map(kaddr, page);
>   }
>   EXPORT_SYMBOL(xpfo_kmap);
>   
> @@ -204,3 +209,34 @@ void xpfo_temp_unmap(const void *addr, size_t size, void **mapping,
>   			kunmap_atomic(mapping[i]);
>   }
>   EXPORT_SYMBOL(xpfo_temp_unmap);
> +
> +bool xpfo_spurious_fault(unsigned long addr)
> +{
> +	struct page *page;
> +	bool spurious;
> +	int mapcount;
> +
> +	if (!static_branch_unlikely(&xpfo_inited))
> +		return false;
> +
> +	/* XXX Is this sufficient to guard against calling virt_to_page() on a
> +	 * virtual address that has no corresponding struct page? */
> +	if (!virt_addr_valid(addr))
> +		return false;
> +
> +	page = virt_to_page(addr);
> +	mapcount = atomic_read(&page->xpfo_mapcount);
> +	spurious = PageXpfoUser(page) && mapcount;
> +
> +	/* Guarantee forward progress in case xpfo_kmap() raced. */
> +	if (spurious && PageXpfoUnmapped(page)) {
> +		xpfo_do_map((void *)(addr & PAGE_MASK), page);
> +	}
> +
> +	if (unlikely(!spurious))
> +		printk("XPFO non-spurious fault %lx user=%d unmapped=%d mapcount=%d\n",
> +			addr, PageXpfoUser(page), PageXpfoUnmapped(page),
> +			mapcount);
> +
> +	return spurious;
> +}
> 

  reply	other threads:[~2019-01-17  0:18 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-01-10 21:09 [RFC PATCH v7 00/16] Add support for eXclusive Page Frame Ownership Khalid Aziz
2019-01-10 21:09 ` [RFC PATCH v7 01/16] mm: add MAP_HUGETLB support to vm_mmap Khalid Aziz
2019-01-10 21:09 ` [RFC PATCH v7 02/16] x86: always set IF before oopsing from page fault Khalid Aziz
2019-01-10 21:09 ` [RFC PATCH v7 03/16] mm, x86: Add support for eXclusive Page Frame Ownership (XPFO) Khalid Aziz
2019-01-10 21:09 ` [RFC PATCH v7 04/16] swiotlb: Map the buffer if it was unmapped by XPFO Khalid Aziz
2019-01-23 14:16   ` Konrad Rzeszutek Wilk
2019-01-10 21:09 ` [RFC PATCH v7 05/16] arm64/mm: Add support for XPFO Khalid Aziz
2019-01-23 14:20   ` Konrad Rzeszutek Wilk
2019-02-12 15:45     ` Khalid Aziz
2019-01-23 14:24   ` Konrad Rzeszutek Wilk
2019-02-12 15:52     ` Khalid Aziz
2019-02-12 20:01       ` Laura Abbott
2019-02-12 20:34         ` Khalid Aziz
2019-01-10 21:09 ` [RFC PATCH v7 06/16] xpfo: add primitives for mapping underlying memory Khalid Aziz
2019-01-10 21:09 ` [RFC PATCH v7 07/16] arm64/mm, xpfo: temporarily map dcache regions Khalid Aziz
2019-01-11 14:54   ` Tycho Andersen
2019-01-11 18:28     ` Khalid Aziz
2019-01-11 19:50       ` Tycho Andersen
2019-01-23 14:56   ` Konrad Rzeszutek Wilk
2019-01-10 21:09 ` [RFC PATCH v7 08/16] arm64/mm: disable section/contiguous mappings if XPFO is enabled Khalid Aziz
2019-01-10 21:09 ` [RFC PATCH v7 09/16] mm: add a user_virt_to_phys symbol Khalid Aziz
2019-01-23 15:03   ` Konrad Rzeszutek Wilk
2019-01-10 21:09 ` [RFC PATCH v7 10/16] lkdtm: Add test for XPFO Khalid Aziz
2019-01-10 21:09 ` [RFC PATCH v7 11/16] mm, x86: omit TLB flushing by default for XPFO page table modifications Khalid Aziz
2019-01-10 21:09 ` [RFC PATCH v7 12/16] xpfo, mm: remove dependency on CONFIG_PAGE_EXTENSION Khalid Aziz
2019-01-10 21:09 ` [RFC PATCH v7 13/16] xpfo, mm: optimize spinlock usage in xpfo_kunmap Khalid Aziz
2019-01-10 21:09 ` [RFC PATCH v7 14/16] EXPERIMENTAL: xpfo, mm: optimize spin lock usage in xpfo_kmap Khalid Aziz
2019-01-17  0:18   ` Laura Abbott [this message]
2019-01-17 15:14     ` Khalid Aziz
2019-01-10 21:09 ` [RFC PATCH v7 15/16] xpfo, mm: Fix hang when booting with "xpfotlbflush" Khalid Aziz
2019-01-10 21:09 ` [RFC PATCH v7 16/16] xpfo, mm: Defer TLB flushes for non-current CPUs (x86 only) Khalid Aziz
2019-01-10 23:07 ` [RFC PATCH v7 00/16] Add support for eXclusive Page Frame Ownership Kees Cook
2019-01-11  0:20   ` Khalid Aziz
2019-01-11  0:44   ` Andy Lutomirski
2019-01-11 21:45     ` Khalid Aziz
2019-01-10 23:40 ` Dave Hansen
2019-01-11  9:59   ` Peter Zijlstra
2019-01-11 18:21   ` Khalid Aziz
2019-01-11 20:42     ` Dave Hansen
2019-01-11 21:06       ` Andy Lutomirski
2019-01-11 23:25         ` Khalid Aziz
2019-01-11 23:23       ` Khalid Aziz
2019-01-16  1:28 ` Laura Abbott
     [not found] ` <ciirm8o98gzm4z.fsf@u54ee758033e858cfa736.ant.amazon.com>
2019-01-16 15:16   ` Khalid Aziz
2019-01-17 23:38 ` Laura Abbott

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=b2ffc4fd-e449-b6da-7070-4f182d44dd5b@redhat.com \
    --to=labbott@redhat.com \
    --cc=ak@linux.intel.com \
    --cc=andrew.cooper3@citrix.com \
    --cc=boris.ostrovsky@oracle.com \
    --cc=chris.hyser@oracle.com \
    --cc=deepa.srinivasan@oracle.com \
    --cc=dwmw2@infradead.org \
    --cc=dwmw@amazon.co.uk \
    --cc=hch@lst.de \
    --cc=jcm@redhat.com \
    --cc=jmattson@google.com \
    --cc=joao.m.martins@oracle.com \
    --cc=john.haxby@oracle.com \
    --cc=jsteckli@amazon.de \
    --cc=juerg.haefliger@canonical.com \
    --cc=juergh@gmail.com \
    --cc=kanth.ghatraju@oracle.com \
    --cc=keescook@google.com \
    --cc=kernel-hardening@lists.openwall.com \
    --cc=khalid.aziz@oracle.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=konrad.wilk@oracle.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=liran.alon@oracle.com \
    --cc=marco.antonio.780@gmail.com \
    --cc=pradeep.vincent@oracle.com \
    --cc=steven.sistare@oracle.com \
    --cc=tglx@linutronix.de \
    --cc=torvalds@linux-foundation.org \
    --cc=tycho@docker.com \
    --cc=tycho@tycho.ws \
    --cc=tyhicks@canonical.com \
    --cc=vpk@cs.columbia.edu \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).