All of lore.kernel.org
 help / color / mirror / Atom feed
From: Wengang Wang <wen.gang.wang@oracle.com>
To: linux-mm@kvack.org, aryabinin@virtuozzo.com
Cc: wen.gang.wang@oracle.com, glider@google.com, dvyukov@google.com
Subject: [PATCH 3/5] mm/kasan: do advanced check
Date: Fri, 17 Nov 2017 14:30:41 -0800	[thread overview]
Message-ID: <20171117223043.7277-4-wen.gang.wang@oracle.com> (raw)
In-Reply-To: <20171117223043.7277-1-wen.gang.wang@oracle.com>

This is the 3rd patch in the Kasan advanced check feature.
It does advanced check in the poison check functions and report for
advanced check.

Signed-off-by: Wengang Wang <wen.gang.wang@oracle.com>

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5017269..ba00594 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -16,6 +16,13 @@ struct task_struct;
 #include <asm/kasan.h>
 #include <asm/pgtable.h>
 
+/* advanced check type */
+enum kasan_adv_chk_type {
+	/* write access is allowed only for the owner */
+	KASAN_ADVCHK_OWNER,
+	__KASAN_ADVCHK_TYPE_COUNT,
+};
+
 extern unsigned char kasan_zero_page[PAGE_SIZE];
 extern pte_t kasan_zero_pte[PTRS_PER_PTE];
 extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 4501422..e945df7 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -40,6 +40,51 @@
 #include "kasan.h"
 #include "../slab.h"
 
+struct kasan_adv_check kasan_adv_checks[(1 << KASAN_CHECK_BITS)-2];
+static int kasan_adv_nr_checks;
+static DEFINE_SPINLOCK(kasan_adv_lock);
+
+/* we don't take lock kasan_adv_lock. Locking can either cause deadload
+ * or kill the performance further.
+ * We are still safe without lock since kasan_adv_nr_checks increases only.
+ * The worst and rare case is kasan_adv_nr_checks is stale (smaller than it
+ * really is) and we miss a check.
+ */
+struct kasan_adv_check *get_check_by_nr(int nr)
+{
+	if (nr > kasan_adv_nr_checks || nr <= 0)
+		return NULL;
+	return &kasan_adv_checks[nr-1];
+}
+
+static __always_inline bool adv_check(bool write, s8 check)
+{
+	struct kasan_adv_check *chk = get_check_by_nr(check);
+
+	if (likely(chk)) {
+		bool violation = chk->ac_check_func(write, chk->ac_data);
+
+		if (unlikely(violation))
+			chk->ac_violation = violation;
+		return violation;
+	}
+	return false;
+}
+
+static __always_inline unsigned long adv_check_shadow(const s8 *shadow_addr,
+					     size_t shadow_size, bool write)
+{
+	s8 check;
+	int i;
+
+	for (i = 0; i < shadow_size; i++) {
+		check = kasan_get_check(*(shadow_addr + i));
+		if (unlikely(check && adv_check(write, check)))
+			return (unsigned long)(shadow_addr + i);
+	}
+	return 0;
+}
+
 void kasan_enable_current(void)
 {
 	current->kasan_depth++;
@@ -128,8 +173,11 @@ static __always_inline bool memory_is_poisoned_1(unsigned long addr, bool write)
 
 	if (unlikely(shadow_value)) {
 		s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
-		return unlikely(last_accessible_byte >=
-				KASAN_GET_POISON(shadow_value));
+		if (unlikely(KASAN_GET_POISON(shadow_value) &&
+			last_accessible_byte >= KASAN_GET_POISON(shadow_value)))
+			return true;
+		if (unlikely(kasan_get_check(shadow_value)))
+			return adv_check(write, kasan_get_check(shadow_value));
 	}
 
 	return false;
@@ -145,9 +193,14 @@ static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
 	 * Access crosses 8(shadow size)-byte boundary. Such access maps
 	 * into 2 shadow bytes, so we need to check them both.
 	 */
-	if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
-		return KASAN_GET_POISON(*shadow_addr) ||
-		       memory_is_poisoned_1(addr + size - 1, write);
+	if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1)) {
+		u8 check = kasan_get_check(*shadow_addr);
+
+		if (unlikely(KASAN_GET_POISON(*shadow_addr)))
+			return true;
+		if (unlikely(check && adv_check(write, check)))
+			return true;
+	}
 
 	return memory_is_poisoned_1(addr + size - 1, write);
 }
@@ -157,21 +210,31 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr,
 {
 	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
 
-	/* Unaligned 16-bytes access maps into 3 shadow bytes. */
-	if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
-		return KASAN_GET_POISON_16(*shadow_addr) ||
-		       memory_is_poisoned_1(addr + 15, write);
+	if (unlikely(KASAN_GET_POISON_16(*shadow_addr)))
+		return true;
+
+	if (unlikely(adv_check_shadow((s8 *)shadow_addr, 2, write)))
+		return true;
 
-	return *shadow_addr;
+	if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
+		return false;
+
+	/* Unaligned 16-bytes access maps into 3 shadow bytes. */
+	return memory_is_poisoned_1(addr + 15, write);
 }
 
 static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
 						      size_t size,
 						      bool write)
 {
+	int check;
+
 	while (size) {
 		if (unlikely(KASAN_GET_POISON(*start)))
 			return (unsigned long)start;
+		check = kasan_get_check(*start);
+		if (unlikely(check && adv_check(write, check)))
+			return (unsigned long)start;
 		start++;
 		size--;
 	}
@@ -202,6 +265,9 @@ static __always_inline unsigned long memory_is_nonzero(const void *start,
 	while (words) {
 		if (unlikely(KASAN_GET_POISON_64(*(u64 *)start)))
 			return bytes_is_nonzero(start, 8, write);
+		ret = adv_check_shadow(start, sizeof(u64), write);
+		if (unlikely(ret))
+			return (unsigned long)ret;
 		start += 8;
 		words--;
 	}
@@ -227,6 +293,11 @@ static __always_inline bool memory_is_poisoned_n(unsigned long addr,
 			((long)(last_byte & KASAN_SHADOW_MASK) >=
 			KASAN_GET_POISON(*last_shadow))))
 			return true;
+		else {
+			s8 check = kasan_get_check(*last_shadow);
+
+			return unlikely(check && adv_check(write, check));
+		}
 	}
 	return false;
 }
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index df7fbfe..2e2af6d 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -111,6 +111,16 @@ struct kasan_free_meta {
 	struct qlist_node quarantine_link;
 };
 
+struct kasan_adv_check {
+	enum kasan_adv_chk_type	ac_type;
+	bool			(*ac_check_func)(bool, void *);
+	void			*ac_data;
+	char			*ac_msg;
+	bool			ac_violation;
+};
+
+extern struct kasan_adv_check *get_check_by_nr(int nr);
+
 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
 					const void *object);
 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index caf3a13..403bae1 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -57,10 +57,26 @@ static bool addr_has_shadow(struct kasan_access_info *info)
 		kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
 }
 
+static bool is_clean_byte(u8 shadow_val)
+{
+	u8 poison = KASAN_GET_POISON(shadow_val);
+	u8 check = kasan_get_check(shadow_val);
+
+	if (poison > 0 && poison <= KASAN_SHADOW_SCALE_SIZE - 1) {
+		struct kasan_adv_check *chk = get_check_by_nr(check);
+
+		if (chk && chk->ac_violation)
+			return false;
+		return true;
+	}
+
+	return false;
+}
+
 static const char *get_shadow_bug_type(struct kasan_access_info *info)
 {
 	const char *bug_type = "unknown-crash";
-	u8 *shadow_addr;
+	u8 *shadow_addr, check;
 	s8 poison;
 
 	info->first_bad_addr = find_first_bad_addr(info->access_addr,
@@ -68,12 +84,15 @@ static const char *get_shadow_bug_type(struct kasan_access_info *info)
 
 	shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
 	poison = KASAN_GET_POISON(*shadow_addr);
+	check = kasan_get_check(*shadow_addr);
 	/*
 	 * If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look
 	 * at the next shadow byte to determine the type of the bad access.
 	 */
-	if (poison > 0 && poison <= KASAN_SHADOW_SCALE_SIZE - 1)
+	if (is_clean_byte(*shadow_addr)) {
 		poison = KASAN_GET_POISON(*(shadow_addr + 1));
+		check = check = kasan_get_check(*(shadow_addr + 1));
+	}
 
 	if (poison < 0)
 		poison |= KASAN_CHECK_MASK;
@@ -108,6 +127,15 @@ static const char *get_shadow_bug_type(struct kasan_access_info *info)
 		break;
 	}
 
+	if (check) {
+		struct kasan_adv_check *chk = get_check_by_nr(check);
+
+		if (chk && chk->ac_violation) {
+			bug_type = chk->ac_msg;
+			chk->ac_violation = false;
+		}
+	}
+
 	return bug_type;
 }
 
-- 
2.9.4

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2017-11-17 22:30 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-11-17 22:30 [PATCH 0/5] mm/kasan: advanced check Wengang Wang
2017-11-17 22:30 ` [PATCH 1/5] mm/kasan: make space in shadow bytes for " Wengang Wang
2017-11-17 22:30 ` [PATCH 2/5] mm/kasan: pass access mode to poison check functions Wengang Wang
2017-11-17 22:30 ` Wengang Wang [this message]
2017-11-17 22:30 ` [PATCH 4/5] mm/kasan: register check and bind it to memory Wengang Wang
2017-11-17 22:30 ` [PATCH 5/5] mm/kasan: add advanced check test case Wengang Wang
2017-11-17 22:32 ` [PATCH 0/5] mm/kasan: advanced check Wengang Wang
2017-11-17 22:56 ` Dmitry Vyukov
2017-11-20  1:50   ` Joonsoo Kim
2017-11-20  8:41     ` Dmitry Vyukov
2017-11-20 20:05       ` Wengang
2017-11-20 20:20         ` Dmitry Vyukov
2017-11-20 20:29           ` Wengang
2017-11-21  9:54             ` Dmitry Vyukov
2017-11-21 19:17               ` Wengang Wang
2017-11-22  8:48                 ` Dmitry Vyukov
2017-11-22 21:09                   ` Wengang Wang
2017-11-20 19:56     ` Wengang
2017-11-22  4:30       ` Joonsoo Kim
2017-11-22  8:51         ` Dmitry Vyukov
2017-11-23  6:07           ` Joonsoo Kim
2017-11-22 19:43         ` Wengang Wang
2017-11-23  6:23           ` Joonsoo Kim
2017-11-23  6:35             ` Joonsoo Kim
2017-11-22 12:04     ` Andrey Ryabinin
2017-11-23  5:57       ` Joonsoo Kim
2017-11-22 12:04 ` Andrey Ryabinin
2017-11-22 19:29   ` Wengang Wang
2017-11-26 19:37     ` Wengang Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20171117223043.7277-4-wen.gang.wang@oracle.com \
    --to=wen.gang.wang@oracle.com \
    --cc=aryabinin@virtuozzo.com \
    --cc=dvyukov@google.com \
    --cc=glider@google.com \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.