All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrey Konovalov <andreyknvl@google.com>
To: Andrey Ryabinin <aryabinin@virtuozzo.com>,
	Alexander Potapenko <glider@google.com>,
	Dmitry Vyukov <dvyukov@google.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will.deacon@arm.com>,
	Christoph Lameter <cl@linux.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Mark Rutland <mark.rutland@arm.com>,
	Nick Desaulniers <ndesaulniers@google.com>,
	Marc Zyngier <marc.zyngier@arm.com>,
	Dave Martin <dave.martin@arm.com>,
	Ard Biesheuvel <ard.biesheuvel@linaro.org>,
	"Eric W . Biederman" <ebiederm@xmission.com>,
	Ingo Molnar <mingo@kernel.org>,
	Paul Lawrence <paullawrence@google.com>,
	Geert Uytterhoeven <geert@linux-m68k.org>,
	Arnd Bergmann <arnd@arndb.de>,
	"Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>,
	Greg Kroah-Hartman <gregkh@linuxfoundation.org>,
	Kate Stewart <kstewart@linuxfoundation.org>,
	Mike Rapoport <rppt@linux.vnet.ibm.com>,
	kasan-dev@googlegroups.com, linux-doc@vger.kernel.org,
	linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	linux-sparse@vger.kernel.org, linux-mm@kvack.org,
	linux-kbuild@vger.kernel.org
Cc: Kostya Serebryany <kcc@google.com>,
	Evgeniy Stepanov <eugenis@google.com>,
	Lee Smith <Lee.Smith@arm.com>,
	Ramana Radhakrishnan <Ramana.Radhakrishnan@arm.com>,
	Jacob Bramley <Jacob.Bramley@arm.com>,
	Ruben Ayrapetyan <Ruben.Ayrapetyan@arm.com>,
	Jann Horn <jannh@google.com>, Mark Brand <markbrand@google.com>,
	Chintan Pandya <cpandya@codeaurora.org>,
	Vishwath Mohan <vishwath@google.com>,
	Andrey Konovalov <andreyknvl@google.com>
Subject: [PATCH v11 13/24] kasan, arm64: fix up fault handling logic
Date: Mon, 19 Nov 2018 18:26:29 +0100	[thread overview]
Message-ID: <99c747edfba8ea4b93d9c70aac47ea86ef258b86.1542648335.git.andreyknvl@google.com> (raw)
In-Reply-To: <cover.1542648335.git.andreyknvl@google.com>

Right now arm64 fault handling code removes pointer tags from addresses
covered by TTBR0 in faults taken from both EL0 and EL1, but doesn't do
that for pointers covered by TTBR1.

This patch adds two helper functions is_ttbr0_addr() and is_ttbr1_addr(),
where the latter one accounts for the fact that TTBR1 pointers might be
tagged when tag-based KASAN is in use, and uses these helper functions to
perform pointer checks in arch/arm64/mm/fault.c.

Suggested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 arch/arm64/mm/fault.c | 31 ++++++++++++++++++++++---------
 1 file changed, 22 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 7d9571f4ae3d..6023d4752701 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -40,6 +40,7 @@
 #include <asm/daifflags.h>
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
+#include <asm/kasan.h>
 #include <asm/sysreg.h>
 #include <asm/system_misc.h>
 #include <asm/pgtable.h>
@@ -132,6 +133,18 @@ static void mem_abort_decode(unsigned int esr)
 		data_abort_decode(esr);
 }
 
+static inline bool is_ttbr0_addr(unsigned long addr)
+{
+        /* entry assembly clears tags for TTBR0 addrs */
+        return addr < TASK_SIZE;
+}
+
+static inline bool is_ttbr1_addr(unsigned long addr)
+{
+        /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
+        return arch_kasan_reset_tag(addr) >= VA_START;
+}
+
 /*
  * Dump out the page tables associated with 'addr' in the currently active mm.
  */
@@ -141,7 +154,7 @@ void show_pte(unsigned long addr)
 	pgd_t *pgdp;
 	pgd_t pgd;
 
-	if (addr < TASK_SIZE) {
+	if (is_ttbr0_addr(addr)) {
 		/* TTBR0 */
 		mm = current->active_mm;
 		if (mm == &init_mm) {
@@ -149,7 +162,7 @@ void show_pte(unsigned long addr)
 				 addr);
 			return;
 		}
-	} else if (addr >= VA_START) {
+	} else if (is_ttbr1_addr(addr)) {
 		/* TTBR1 */
 		mm = &init_mm;
 	} else {
@@ -254,7 +267,7 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
 	if (fsc_type == ESR_ELx_FSC_PERM)
 		return true;
 
-	if (addr < TASK_SIZE && system_uses_ttbr0_pan())
+	if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan())
 		return fsc_type == ESR_ELx_FSC_FAULT &&
 			(regs->pstate & PSR_PAN_BIT);
 
@@ -319,7 +332,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr)
 	 * type", so we ignore this wrinkle and just return the translation
 	 * fault.)
 	 */
-	if (current->thread.fault_address >= TASK_SIZE) {
+	if (!is_ttbr0_addr(current->thread.fault_address)) {
 		switch (ESR_ELx_EC(esr)) {
 		case ESR_ELx_EC_DABT_LOW:
 			/*
@@ -455,7 +468,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
 		mm_flags |= FAULT_FLAG_WRITE;
 	}
 
-	if (addr < TASK_SIZE && is_el1_permission_fault(addr, esr, regs)) {
+	if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
 		/* regs->orig_addr_limit may be 0 if we entered from EL0 */
 		if (regs->orig_addr_limit == KERNEL_DS)
 			die_kernel_fault("access to user memory with fs=KERNEL_DS",
@@ -603,7 +616,7 @@ static int __kprobes do_translation_fault(unsigned long addr,
 					  unsigned int esr,
 					  struct pt_regs *regs)
 {
-	if (addr < TASK_SIZE)
+	if (is_ttbr0_addr(addr))
 		return do_page_fault(addr, esr, regs);
 
 	do_bad_area(addr, esr, regs);
@@ -758,7 +771,7 @@ asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
 	 * re-enabled IRQs. If the address is a kernel address, apply
 	 * BP hardening prior to enabling IRQs and pre-emption.
 	 */
-	if (addr > TASK_SIZE)
+	if (!is_ttbr0_addr(addr))
 		arm64_apply_bp_hardening();
 
 	local_daif_restore(DAIF_PROCCTX);
@@ -771,7 +784,7 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
 					   struct pt_regs *regs)
 {
 	if (user_mode(regs)) {
-		if (instruction_pointer(regs) > TASK_SIZE)
+		if (!is_ttbr0_addr(instruction_pointer(regs)))
 			arm64_apply_bp_hardening();
 		local_daif_restore(DAIF_PROCCTX);
 	}
@@ -825,7 +838,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
 	if (interrupts_enabled(regs))
 		trace_hardirqs_off();
 
-	if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE)
+	if (user_mode(regs) && !is_ttbr0_addr(instruction_pointer(regs)))
 		arm64_apply_bp_hardening();
 
 	if (!inf->fn(addr, esr, regs)) {
-- 
2.19.1.1215.g8438c0b245-goog


WARNING: multiple messages have this Message-ID (diff)
From: andreyknvl@google.com (Andrey Konovalov)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH v11 13/24] kasan, arm64: fix up fault handling logic
Date: Mon, 19 Nov 2018 18:26:29 +0100	[thread overview]
Message-ID: <99c747edfba8ea4b93d9c70aac47ea86ef258b86.1542648335.git.andreyknvl@google.com> (raw)
In-Reply-To: <cover.1542648335.git.andreyknvl@google.com>

Right now arm64 fault handling code removes pointer tags from addresses
covered by TTBR0 in faults taken from both EL0 and EL1, but doesn't do
that for pointers covered by TTBR1.

This patch adds two helper functions is_ttbr0_addr() and is_ttbr1_addr(),
where the latter one accounts for the fact that TTBR1 pointers might be
tagged when tag-based KASAN is in use, and uses these helper functions to
perform pointer checks in arch/arm64/mm/fault.c.

Suggested-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 arch/arm64/mm/fault.c | 31 ++++++++++++++++++++++---------
 1 file changed, 22 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 7d9571f4ae3d..6023d4752701 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -40,6 +40,7 @@
 #include <asm/daifflags.h>
 #include <asm/debug-monitors.h>
 #include <asm/esr.h>
+#include <asm/kasan.h>
 #include <asm/sysreg.h>
 #include <asm/system_misc.h>
 #include <asm/pgtable.h>
@@ -132,6 +133,18 @@ static void mem_abort_decode(unsigned int esr)
 		data_abort_decode(esr);
 }
 
+static inline bool is_ttbr0_addr(unsigned long addr)
+{
+        /* entry assembly clears tags for TTBR0 addrs */
+        return addr < TASK_SIZE;
+}
+
+static inline bool is_ttbr1_addr(unsigned long addr)
+{
+        /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
+        return arch_kasan_reset_tag(addr) >= VA_START;
+}
+
 /*
  * Dump out the page tables associated with 'addr' in the currently active mm.
  */
@@ -141,7 +154,7 @@ void show_pte(unsigned long addr)
 	pgd_t *pgdp;
 	pgd_t pgd;
 
-	if (addr < TASK_SIZE) {
+	if (is_ttbr0_addr(addr)) {
 		/* TTBR0 */
 		mm = current->active_mm;
 		if (mm == &init_mm) {
@@ -149,7 +162,7 @@ void show_pte(unsigned long addr)
 				 addr);
 			return;
 		}
-	} else if (addr >= VA_START) {
+	} else if (is_ttbr1_addr(addr)) {
 		/* TTBR1 */
 		mm = &init_mm;
 	} else {
@@ -254,7 +267,7 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
 	if (fsc_type == ESR_ELx_FSC_PERM)
 		return true;
 
-	if (addr < TASK_SIZE && system_uses_ttbr0_pan())
+	if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan())
 		return fsc_type == ESR_ELx_FSC_FAULT &&
 			(regs->pstate & PSR_PAN_BIT);
 
@@ -319,7 +332,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr)
 	 * type", so we ignore this wrinkle and just return the translation
 	 * fault.)
 	 */
-	if (current->thread.fault_address >= TASK_SIZE) {
+	if (!is_ttbr0_addr(current->thread.fault_address)) {
 		switch (ESR_ELx_EC(esr)) {
 		case ESR_ELx_EC_DABT_LOW:
 			/*
@@ -455,7 +468,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
 		mm_flags |= FAULT_FLAG_WRITE;
 	}
 
-	if (addr < TASK_SIZE && is_el1_permission_fault(addr, esr, regs)) {
+	if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
 		/* regs->orig_addr_limit may be 0 if we entered from EL0 */
 		if (regs->orig_addr_limit == KERNEL_DS)
 			die_kernel_fault("access to user memory with fs=KERNEL_DS",
@@ -603,7 +616,7 @@ static int __kprobes do_translation_fault(unsigned long addr,
 					  unsigned int esr,
 					  struct pt_regs *regs)
 {
-	if (addr < TASK_SIZE)
+	if (is_ttbr0_addr(addr))
 		return do_page_fault(addr, esr, regs);
 
 	do_bad_area(addr, esr, regs);
@@ -758,7 +771,7 @@ asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
 	 * re-enabled IRQs. If the address is a kernel address, apply
 	 * BP hardening prior to enabling IRQs and pre-emption.
 	 */
-	if (addr > TASK_SIZE)
+	if (!is_ttbr0_addr(addr))
 		arm64_apply_bp_hardening();
 
 	local_daif_restore(DAIF_PROCCTX);
@@ -771,7 +784,7 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
 					   struct pt_regs *regs)
 {
 	if (user_mode(regs)) {
-		if (instruction_pointer(regs) > TASK_SIZE)
+		if (!is_ttbr0_addr(instruction_pointer(regs)))
 			arm64_apply_bp_hardening();
 		local_daif_restore(DAIF_PROCCTX);
 	}
@@ -825,7 +838,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
 	if (interrupts_enabled(regs))
 		trace_hardirqs_off();
 
-	if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE)
+	if (user_mode(regs) && !is_ttbr0_addr(instruction_pointer(regs)))
 		arm64_apply_bp_hardening();
 
 	if (!inf->fn(addr, esr, regs)) {
-- 
2.19.1.1215.g8438c0b245-goog

  parent reply	other threads:[~2018-11-19 17:29 UTC|newest]

Thread overview: 68+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-11-19 17:26 [PATCH v11 00/24] kasan: add software tag-based mode for arm64 Andrey Konovalov
2018-11-19 17:26 ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 01/24] kasan, mm: change hooks signatures Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 02/24] kasan, slub: handle pointer tags in early_kmem_cache_node_alloc Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 03/24] kasan: move common generic and tag-based code to common.c Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 04/24] kasan: rename source files to reflect the new naming scheme Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 05/24] kasan: add CONFIG_KASAN_GENERIC and CONFIG_KASAN_SW_TAGS Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-23 17:43   ` Mark Rutland
2018-11-23 17:43     ` Mark Rutland
2018-11-23 17:43     ` Mark Rutland
2018-11-27 16:12     ` Andrey Konovalov
2018-11-27 16:12       ` Andrey Konovalov
2018-11-27 16:12       ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 06/24] kasan, arm64: adjust shadow size for tag-based mode Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 07/24] kasan: rename kasan_zero_page to kasan_early_shadow_page Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 08/24] kasan: initialize shadow to 0xff for tag-based mode Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 09/24] arm64: move untagged_addr macro from uaccess.h to memory.h Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-23 17:37   ` Mark Rutland
2018-11-23 17:37     ` Mark Rutland
2018-11-23 17:37     ` Mark Rutland
2018-11-27 16:04     ` Andrey Konovalov
2018-11-27 16:04       ` Andrey Konovalov
2018-11-27 16:04       ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 10/24] kasan: add tag related helper functions Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 11/24] kasan, arm64: untag address in _virt_addr_is_linear Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 12/24] kasan: preassign tags to objects with ctors or SLAB_TYPESAFE_BY_RCU Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` Andrey Konovalov [this message]
2018-11-19 17:26   ` [PATCH v11 13/24] kasan, arm64: fix up fault handling logic Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 14/24] kasan, arm64: enable top byte ignore for the kernel Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 15/24] kasan, mm: perform untagged pointers comparison in krealloc Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 16/24] kasan: split out generic_report.c from report.c Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 17/24] kasan: add bug reporting routines for tag-based mode Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 18/24] mm: move obj_to_index to include/linux/slab_def.h Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 19/24] kasan: add hooks implementation for tag-based mode Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 20/24] kasan, arm64: add brk handler for inline instrumentation Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 21/24] kasan, mm, arm64: tag non slab memory allocated via pagealloc Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 22/24] kasan: add __must_check annotations to kasan hooks Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 23/24] kasan: update documentation Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:26 ` [PATCH v11 24/24] kasan: add SPDX-License-Identifier mark to source files Andrey Konovalov
2018-11-19 17:26   ` Andrey Konovalov
2018-11-19 17:28 ` [PATCH v11 00/24] kasan: add software tag-based mode for arm64 Andrey Konovalov
2018-11-19 17:28   ` Andrey Konovalov
2018-11-19 17:28   ` Andrey Konovalov
2018-11-19 17:32   ` Mark Rutland
2018-11-19 17:32     ` Mark Rutland
2018-11-19 17:32     ` Mark Rutland

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=99c747edfba8ea4b93d9c70aac47ea86ef258b86.1542648335.git.andreyknvl@google.com \
    --to=andreyknvl@google.com \
    --cc=Jacob.Bramley@arm.com \
    --cc=Lee.Smith@arm.com \
    --cc=Ramana.Radhakrishnan@arm.com \
    --cc=Ruben.Ayrapetyan@arm.com \
    --cc=akpm@linux-foundation.org \
    --cc=ard.biesheuvel@linaro.org \
    --cc=arnd@arndb.de \
    --cc=aryabinin@virtuozzo.com \
    --cc=catalin.marinas@arm.com \
    --cc=cl@linux.com \
    --cc=cpandya@codeaurora.org \
    --cc=dave.martin@arm.com \
    --cc=dvyukov@google.com \
    --cc=ebiederm@xmission.com \
    --cc=eugenis@google.com \
    --cc=geert@linux-m68k.org \
    --cc=glider@google.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=jannh@google.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=kcc@google.com \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=kstewart@linuxfoundation.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-kbuild@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-sparse@vger.kernel.org \
    --cc=marc.zyngier@arm.com \
    --cc=mark.rutland@arm.com \
    --cc=markbrand@google.com \
    --cc=mingo@kernel.org \
    --cc=ndesaulniers@google.com \
    --cc=paullawrence@google.com \
    --cc=rppt@linux.vnet.ibm.com \
    --cc=vishwath@google.com \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.