All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrey Konovalov <andreyknvl@google.com>
To: Dmitry Vyukov <dvyukov@google.com>,
	Vincenzo Frascino <vincenzo.frascino@arm.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	kasan-dev@googlegroups.com
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>,
	Alexander Potapenko <glider@google.com>,
	Marco Elver <elver@google.com>,
	Evgenii Stepanov <eugenis@google.com>,
	Elena Petrova <lenaptr@google.com>,
	Branislav Rankov <Branislav.Rankov@arm.com>,
	Kevin Brodsky <kevin.brodsky@arm.com>,
	Will Deacon <will.deacon@arm.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	linux-arm-kernel@lists.infradead.org, linux-mm@kvack.org,
	linux-kernel@vger.kernel.org,
	Andrey Konovalov <andreyknvl@google.com>
Subject: [PATCH 05/35] kasan: rename KASAN_SHADOW_* to KASAN_GRANULE_*
Date: Fri, 14 Aug 2020 19:26:47 +0200	[thread overview]
Message-ID: <d4f7c14e57341ae52df4fde5425e2ae5d24534dd.1597425745.git.andreyknvl@google.com> (raw)
In-Reply-To: <cover.1597425745.git.andreyknvl@google.com>

This is a preparatory commit for the upcoming addition of a new hardware
tag-based (MTE-based) KASAN mode.

The new mode won't be using shadow memory, but will still use the concept
of memory granules. Rename KASAN_SHADOW_SCALE_SIZE to KASAN_GRANULE_SIZE,
and KASAN_SHADOW_MASK to KASAN_GRANULE_MASK.

Also use MASK when used as a mask, otherwise use SIZE.

No functional changes.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 Documentation/dev-tools/kasan.rst |  2 +-
 lib/test_kasan.c                  |  2 +-
 mm/kasan/common.c                 | 39 ++++++++++++++++---------------
 mm/kasan/generic.c                | 14 +++++------
 mm/kasan/generic_report.c         |  8 +++----
 mm/kasan/init.c                   |  8 +++----
 mm/kasan/kasan.h                  |  4 ++--
 mm/kasan/report.c                 | 10 ++++----
 mm/kasan/tags_report.c            |  2 +-
 9 files changed, 45 insertions(+), 44 deletions(-)

diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst
index 38fd5681fade..a3030fc6afe5 100644
--- a/Documentation/dev-tools/kasan.rst
+++ b/Documentation/dev-tools/kasan.rst
@@ -264,7 +264,7 @@ Most mappings in vmalloc space are small, requiring less than a full
 page of shadow space. Allocating a full shadow page per mapping would
 therefore be wasteful. Furthermore, to ensure that different mappings
 use different shadow pages, mappings would have to be aligned to
-``KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE``.
+``KASAN_GRANULE_SIZE * PAGE_SIZE``.
 
 Instead, we share backing space across multiple mappings. We allocate
 a backing page when a mapping in vmalloc space uses a particular page
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 5d3f496893ef..247a14f40016 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -25,7 +25,7 @@
 
 #include "../mm/kasan/kasan.h"
 
-#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
+#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
 
 /*
  * We assign some test results to these globals to make sure the tests
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 65933b27df81..c9daf2c33651 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -111,7 +111,7 @@ void *memcpy(void *dest, const void *src, size_t len)
 
 /*
  * Poisons the shadow memory for 'size' bytes starting from 'addr'.
- * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
+ * Memory addresses should be aligned to KASAN_GRANULE_SIZE.
  */
 void kasan_poison_memory(const void *address, size_t size, u8 value)
 {
@@ -143,13 +143,13 @@ void kasan_unpoison_memory(const void *address, size_t size)
 
 	kasan_poison_memory(address, size, tag);
 
-	if (size & KASAN_SHADOW_MASK) {
+	if (size & KASAN_GRANULE_MASK) {
 		u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
 
 		if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
 			*shadow = tag;
 		else
-			*shadow = size & KASAN_SHADOW_MASK;
+			*shadow = size & KASAN_GRANULE_MASK;
 	}
 }
 
@@ -301,7 +301,7 @@ void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
 {
 	kasan_poison_memory(object,
-			round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
+			round_up(cache->object_size, KASAN_GRANULE_SIZE),
 			KASAN_KMALLOC_REDZONE);
 }
 
@@ -373,7 +373,7 @@ static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
 {
 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
 		return shadow_byte < 0 ||
-			shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
+			shadow_byte >= KASAN_GRANULE_SIZE;
 
 	/* else CONFIG_KASAN_SW_TAGS: */
 	if ((u8)shadow_byte == KASAN_TAG_INVALID)
@@ -412,7 +412,7 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
 		return true;
 	}
 
-	rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
+	rounded_up_size = round_up(cache->object_size, KASAN_GRANULE_SIZE);
 	kasan_poison_memory(object, rounded_up_size, KASAN_KMALLOC_FREE);
 
 	if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
@@ -445,9 +445,9 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
 		return NULL;
 
 	redzone_start = round_up((unsigned long)(object + size),
-				KASAN_SHADOW_SCALE_SIZE);
+				KASAN_GRANULE_SIZE);
 	redzone_end = round_up((unsigned long)object + cache->object_size,
-				KASAN_SHADOW_SCALE_SIZE);
+				KASAN_GRANULE_SIZE);
 
 	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
 		tag = assign_tag(cache, object, false, keep_tag);
@@ -491,7 +491,7 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
 
 	page = virt_to_page(ptr);
 	redzone_start = round_up((unsigned long)(ptr + size),
-				KASAN_SHADOW_SCALE_SIZE);
+				KASAN_GRANULE_SIZE);
 	redzone_end = (unsigned long)ptr + page_size(page);
 
 	kasan_unpoison_memory(ptr, size);
@@ -589,8 +589,8 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
 	shadow_size = nr_shadow_pages << PAGE_SHIFT;
 	shadow_end = shadow_start + shadow_size;
 
-	if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
-		WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
+	if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) ||
+		WARN_ON(start_kaddr % (KASAN_GRANULE_SIZE << PAGE_SHIFT)))
 		return NOTIFY_BAD;
 
 	switch (action) {
@@ -748,7 +748,7 @@ void kasan_poison_vmalloc(const void *start, unsigned long size)
 	if (!is_vmalloc_or_module_addr(start))
 		return;
 
-	size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
+	size = round_up(size, KASAN_GRANULE_SIZE);
 	kasan_poison_memory(start, size, KASAN_VMALLOC_INVALID);
 }
 
@@ -861,22 +861,22 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
 	unsigned long region_start, region_end;
 	unsigned long size;
 
-	region_start = ALIGN(start, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
-	region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
+	region_start = ALIGN(start, PAGE_SIZE * KASAN_GRANULE_SIZE);
+	region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_GRANULE_SIZE);
 
 	free_region_start = ALIGN(free_region_start,
-				  PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
+				  PAGE_SIZE * KASAN_GRANULE_SIZE);
 
 	if (start != region_start &&
 	    free_region_start < region_start)
-		region_start -= PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
+		region_start -= PAGE_SIZE * KASAN_GRANULE_SIZE;
 
 	free_region_end = ALIGN_DOWN(free_region_end,
-				     PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
+				     PAGE_SIZE * KASAN_GRANULE_SIZE);
 
 	if (end != region_end &&
 	    free_region_end > region_end)
-		region_end += PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
+		region_end += PAGE_SIZE * KASAN_GRANULE_SIZE;
 
 	shadow_start = kasan_mem_to_shadow((void *)region_start);
 	shadow_end = kasan_mem_to_shadow((void *)region_end);
@@ -902,7 +902,8 @@ int kasan_module_alloc(void *addr, size_t size)
 	unsigned long shadow_start;
 
 	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
-	scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
+	scaled_size = (size + KASAN_GRANULE_SIZE - 1) >>
+				KASAN_SHADOW_SCALE_SHIFT;
 	shadow_size = round_up(scaled_size, PAGE_SIZE);
 
 	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 4b5f905198d8..f6d68aa9872f 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -51,7 +51,7 @@ static __always_inline bool memory_is_poisoned_1(unsigned long addr)
 	s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
 
 	if (unlikely(shadow_value)) {
-		s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
+		s8 last_accessible_byte = addr & KASAN_GRANULE_MASK;
 		return unlikely(last_accessible_byte >= shadow_value);
 	}
 
@@ -67,7 +67,7 @@ static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
 	 * Access crosses 8(shadow size)-byte boundary. Such access maps
 	 * into 2 shadow bytes, so we need to check them both.
 	 */
-	if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
+	if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
 		return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
 
 	return memory_is_poisoned_1(addr + size - 1);
@@ -78,7 +78,7 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr)
 	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
 
 	/* Unaligned 16-bytes access maps into 3 shadow bytes. */
-	if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
+	if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE)))
 		return *shadow_addr || memory_is_poisoned_1(addr + 15);
 
 	return *shadow_addr;
@@ -139,7 +139,7 @@ static __always_inline bool memory_is_poisoned_n(unsigned long addr,
 		s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
 
 		if (unlikely(ret != (unsigned long)last_shadow ||
-			((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
+			((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
 			return true;
 	}
 	return false;
@@ -205,7 +205,7 @@ void kasan_cache_shutdown(struct kmem_cache *cache)
 
 static void register_global(struct kasan_global *global)
 {
-	size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
+	size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
 
 	kasan_unpoison_memory(global->beg, global->size);
 
@@ -279,10 +279,10 @@ EXPORT_SYMBOL(__asan_handle_no_return);
 /* Emitted by compiler to poison alloca()ed objects. */
 void __asan_alloca_poison(unsigned long addr, size_t size)
 {
-	size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
+	size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
 	size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
 			rounded_up_size;
-	size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
+	size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
 
 	const void *left_redzone = (const void *)(addr -
 			KASAN_ALLOCA_REDZONE_SIZE);
diff --git a/mm/kasan/generic_report.c b/mm/kasan/generic_report.c
index a38c7a9e192a..4dce1633b082 100644
--- a/mm/kasan/generic_report.c
+++ b/mm/kasan/generic_report.c
@@ -39,7 +39,7 @@ void *find_first_bad_addr(void *addr, size_t size)
 	void *p = addr;
 
 	while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p)))
-		p += KASAN_SHADOW_SCALE_SIZE;
+		p += KASAN_GRANULE_SIZE;
 	return p;
 }
 
@@ -51,14 +51,14 @@ static const char *get_shadow_bug_type(struct kasan_access_info *info)
 	shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
 
 	/*
-	 * If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look
+	 * If shadow byte value is in [0, KASAN_GRANULE_SIZE) we can look
 	 * at the next shadow byte to determine the type of the bad access.
 	 */
-	if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1)
+	if (*shadow_addr > 0 && *shadow_addr <= KASAN_GRANULE_SIZE - 1)
 		shadow_addr++;
 
 	switch (*shadow_addr) {
-	case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
+	case 0 ... KASAN_GRANULE_SIZE - 1:
 		/*
 		 * In theory it's still possible to see these shadow values
 		 * due to a data race in the kernel code.
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index fe6be0be1f76..754b641c83c7 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -447,8 +447,8 @@ void kasan_remove_zero_shadow(void *start, unsigned long size)
 	end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
 
 	if (WARN_ON((unsigned long)start %
-			(KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
-	    WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
+			(KASAN_GRANULE_SIZE * PAGE_SIZE)) ||
+	    WARN_ON(size % (KASAN_GRANULE_SIZE * PAGE_SIZE)))
 		return;
 
 	for (; addr < end; addr = next) {
@@ -482,8 +482,8 @@ int kasan_add_zero_shadow(void *start, unsigned long size)
 	shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
 
 	if (WARN_ON((unsigned long)start %
-			(KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
-	    WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
+			(KASAN_GRANULE_SIZE * PAGE_SIZE)) ||
+	    WARN_ON(size % (KASAN_GRANULE_SIZE * PAGE_SIZE)))
 		return -EINVAL;
 
 	ret = kasan_populate_early_shadow(shadow_start, shadow_end);
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 03450d3b31f7..c31e2c739301 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -5,8 +5,8 @@
 #include <linux/kasan.h>
 #include <linux/stackdepot.h>
 
-#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
-#define KASAN_SHADOW_MASK       (KASAN_SHADOW_SCALE_SIZE - 1)
+#define KASAN_GRANULE_SIZE	(1UL << KASAN_SHADOW_SCALE_SHIFT)
+#define KASAN_GRANULE_MASK	(KASAN_GRANULE_SIZE - 1)
 
 #define KASAN_TAG_KERNEL	0xFF /* native kernel pointers tag */
 #define KASAN_TAG_INVALID	0xFE /* inaccessible memory tag */
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 4f49fa6cd1aa..7c025d792e2f 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -317,24 +317,24 @@ static bool __must_check get_address_stack_frame_info(const void *addr,
 		return false;
 
 	aligned_addr = round_down((unsigned long)addr, sizeof(long));
-	mem_ptr = round_down(aligned_addr, KASAN_SHADOW_SCALE_SIZE);
+	mem_ptr = round_down(aligned_addr, KASAN_GRANULE_SIZE);
 	shadow_ptr = kasan_mem_to_shadow((void *)aligned_addr);
 	shadow_bottom = kasan_mem_to_shadow(end_of_stack(current));
 
 	while (shadow_ptr >= shadow_bottom && *shadow_ptr != KASAN_STACK_LEFT) {
 		shadow_ptr--;
-		mem_ptr -= KASAN_SHADOW_SCALE_SIZE;
+		mem_ptr -= KASAN_GRANULE_SIZE;
 	}
 
 	while (shadow_ptr >= shadow_bottom && *shadow_ptr == KASAN_STACK_LEFT) {
 		shadow_ptr--;
-		mem_ptr -= KASAN_SHADOW_SCALE_SIZE;
+		mem_ptr -= KASAN_GRANULE_SIZE;
 	}
 
 	if (shadow_ptr < shadow_bottom)
 		return false;
 
-	frame = (const unsigned long *)(mem_ptr + KASAN_SHADOW_SCALE_SIZE);
+	frame = (const unsigned long *)(mem_ptr + KASAN_GRANULE_SIZE);
 	if (frame[0] != KASAN_CURRENT_STACK_FRAME_MAGIC) {
 		pr_err("KASAN internal error: frame info validation failed; invalid marker: %lu\n",
 		       frame[0]);
@@ -572,6 +572,6 @@ void kasan_non_canonical_hook(unsigned long addr)
 	else
 		bug_type = "maybe wild-memory-access";
 	pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
-		 orig_addr, orig_addr + KASAN_SHADOW_MASK);
+		 orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
 }
 #endif
diff --git a/mm/kasan/tags_report.c b/mm/kasan/tags_report.c
index bee43717d6f0..6ddb55676a7c 100644
--- a/mm/kasan/tags_report.c
+++ b/mm/kasan/tags_report.c
@@ -81,7 +81,7 @@ void *find_first_bad_addr(void *addr, size_t size)
 	void *end = p + size;
 
 	while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p))
-		p += KASAN_SHADOW_SCALE_SIZE;
+		p += KASAN_GRANULE_SIZE;
 	return p;
 }
 
-- 
2.28.0.220.ged08abb693-goog


WARNING: multiple messages have this Message-ID (diff)
From: Andrey Konovalov <andreyknvl@google.com>
To: Dmitry Vyukov <dvyukov@google.com>,
	Vincenzo Frascino <vincenzo.frascino@arm.com>,
	 Catalin Marinas <catalin.marinas@arm.com>,
	kasan-dev@googlegroups.com
Cc: Marco Elver <elver@google.com>,
	Elena Petrova <lenaptr@google.com>,
	Andrey Konovalov <andreyknvl@google.com>,
	Kevin Brodsky <kevin.brodsky@arm.com>,
	Will Deacon <will.deacon@arm.com>,
	Branislav Rankov <Branislav.Rankov@arm.com>,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	Alexander Potapenko <glider@google.com>,
	linux-arm-kernel@lists.infradead.org,
	Andrey Ryabinin <aryabinin@virtuozzo.com>,
	Andrew Morton <akpm@linux-foundation.org>,
	Evgenii Stepanov <eugenis@google.com>
Subject: [PATCH 05/35] kasan: rename KASAN_SHADOW_* to KASAN_GRANULE_*
Date: Fri, 14 Aug 2020 19:26:47 +0200	[thread overview]
Message-ID: <d4f7c14e57341ae52df4fde5425e2ae5d24534dd.1597425745.git.andreyknvl@google.com> (raw)
In-Reply-To: <cover.1597425745.git.andreyknvl@google.com>

This is a preparatory commit for the upcoming addition of a new hardware
tag-based (MTE-based) KASAN mode.

The new mode won't be using shadow memory, but will still use the concept
of memory granules. Rename KASAN_SHADOW_SCALE_SIZE to KASAN_GRANULE_SIZE,
and KASAN_SHADOW_MASK to KASAN_GRANULE_MASK.

Also use MASK when used as a mask, otherwise use SIZE.

No functional changes.

Signed-off-by: Andrey Konovalov <andreyknvl@google.com>
---
 Documentation/dev-tools/kasan.rst |  2 +-
 lib/test_kasan.c                  |  2 +-
 mm/kasan/common.c                 | 39 ++++++++++++++++---------------
 mm/kasan/generic.c                | 14 +++++------
 mm/kasan/generic_report.c         |  8 +++----
 mm/kasan/init.c                   |  8 +++----
 mm/kasan/kasan.h                  |  4 ++--
 mm/kasan/report.c                 | 10 ++++----
 mm/kasan/tags_report.c            |  2 +-
 9 files changed, 45 insertions(+), 44 deletions(-)

diff --git a/Documentation/dev-tools/kasan.rst b/Documentation/dev-tools/kasan.rst
index 38fd5681fade..a3030fc6afe5 100644
--- a/Documentation/dev-tools/kasan.rst
+++ b/Documentation/dev-tools/kasan.rst
@@ -264,7 +264,7 @@ Most mappings in vmalloc space are small, requiring less than a full
 page of shadow space. Allocating a full shadow page per mapping would
 therefore be wasteful. Furthermore, to ensure that different mappings
 use different shadow pages, mappings would have to be aligned to
-``KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE``.
+``KASAN_GRANULE_SIZE * PAGE_SIZE``.
 
 Instead, we share backing space across multiple mappings. We allocate
 a backing page when a mapping in vmalloc space uses a particular page
diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index 5d3f496893ef..247a14f40016 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -25,7 +25,7 @@
 
 #include "../mm/kasan/kasan.h"
 
-#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_SHADOW_SCALE_SIZE)
+#define OOB_TAG_OFF (IS_ENABLED(CONFIG_KASAN_GENERIC) ? 0 : KASAN_GRANULE_SIZE)
 
 /*
  * We assign some test results to these globals to make sure the tests
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 65933b27df81..c9daf2c33651 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -111,7 +111,7 @@ void *memcpy(void *dest, const void *src, size_t len)
 
 /*
  * Poisons the shadow memory for 'size' bytes starting from 'addr'.
- * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
+ * Memory addresses should be aligned to KASAN_GRANULE_SIZE.
  */
 void kasan_poison_memory(const void *address, size_t size, u8 value)
 {
@@ -143,13 +143,13 @@ void kasan_unpoison_memory(const void *address, size_t size)
 
 	kasan_poison_memory(address, size, tag);
 
-	if (size & KASAN_SHADOW_MASK) {
+	if (size & KASAN_GRANULE_MASK) {
 		u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
 
 		if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
 			*shadow = tag;
 		else
-			*shadow = size & KASAN_SHADOW_MASK;
+			*shadow = size & KASAN_GRANULE_MASK;
 	}
 }
 
@@ -301,7 +301,7 @@ void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
 {
 	kasan_poison_memory(object,
-			round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
+			round_up(cache->object_size, KASAN_GRANULE_SIZE),
 			KASAN_KMALLOC_REDZONE);
 }
 
@@ -373,7 +373,7 @@ static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
 {
 	if (IS_ENABLED(CONFIG_KASAN_GENERIC))
 		return shadow_byte < 0 ||
-			shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
+			shadow_byte >= KASAN_GRANULE_SIZE;
 
 	/* else CONFIG_KASAN_SW_TAGS: */
 	if ((u8)shadow_byte == KASAN_TAG_INVALID)
@@ -412,7 +412,7 @@ static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
 		return true;
 	}
 
-	rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
+	rounded_up_size = round_up(cache->object_size, KASAN_GRANULE_SIZE);
 	kasan_poison_memory(object, rounded_up_size, KASAN_KMALLOC_FREE);
 
 	if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
@@ -445,9 +445,9 @@ static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
 		return NULL;
 
 	redzone_start = round_up((unsigned long)(object + size),
-				KASAN_SHADOW_SCALE_SIZE);
+				KASAN_GRANULE_SIZE);
 	redzone_end = round_up((unsigned long)object + cache->object_size,
-				KASAN_SHADOW_SCALE_SIZE);
+				KASAN_GRANULE_SIZE);
 
 	if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
 		tag = assign_tag(cache, object, false, keep_tag);
@@ -491,7 +491,7 @@ void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
 
 	page = virt_to_page(ptr);
 	redzone_start = round_up((unsigned long)(ptr + size),
-				KASAN_SHADOW_SCALE_SIZE);
+				KASAN_GRANULE_SIZE);
 	redzone_end = (unsigned long)ptr + page_size(page);
 
 	kasan_unpoison_memory(ptr, size);
@@ -589,8 +589,8 @@ static int __meminit kasan_mem_notifier(struct notifier_block *nb,
 	shadow_size = nr_shadow_pages << PAGE_SHIFT;
 	shadow_end = shadow_start + shadow_size;
 
-	if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
-		WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
+	if (WARN_ON(mem_data->nr_pages % KASAN_GRANULE_SIZE) ||
+		WARN_ON(start_kaddr % (KASAN_GRANULE_SIZE << PAGE_SHIFT)))
 		return NOTIFY_BAD;
 
 	switch (action) {
@@ -748,7 +748,7 @@ void kasan_poison_vmalloc(const void *start, unsigned long size)
 	if (!is_vmalloc_or_module_addr(start))
 		return;
 
-	size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
+	size = round_up(size, KASAN_GRANULE_SIZE);
 	kasan_poison_memory(start, size, KASAN_VMALLOC_INVALID);
 }
 
@@ -861,22 +861,22 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
 	unsigned long region_start, region_end;
 	unsigned long size;
 
-	region_start = ALIGN(start, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
-	region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
+	region_start = ALIGN(start, PAGE_SIZE * KASAN_GRANULE_SIZE);
+	region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_GRANULE_SIZE);
 
 	free_region_start = ALIGN(free_region_start,
-				  PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
+				  PAGE_SIZE * KASAN_GRANULE_SIZE);
 
 	if (start != region_start &&
 	    free_region_start < region_start)
-		region_start -= PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
+		region_start -= PAGE_SIZE * KASAN_GRANULE_SIZE;
 
 	free_region_end = ALIGN_DOWN(free_region_end,
-				     PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
+				     PAGE_SIZE * KASAN_GRANULE_SIZE);
 
 	if (end != region_end &&
 	    free_region_end > region_end)
-		region_end += PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
+		region_end += PAGE_SIZE * KASAN_GRANULE_SIZE;
 
 	shadow_start = kasan_mem_to_shadow((void *)region_start);
 	shadow_end = kasan_mem_to_shadow((void *)region_end);
@@ -902,7 +902,8 @@ int kasan_module_alloc(void *addr, size_t size)
 	unsigned long shadow_start;
 
 	shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
-	scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
+	scaled_size = (size + KASAN_GRANULE_SIZE - 1) >>
+				KASAN_SHADOW_SCALE_SHIFT;
 	shadow_size = round_up(scaled_size, PAGE_SIZE);
 
 	if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index 4b5f905198d8..f6d68aa9872f 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -51,7 +51,7 @@ static __always_inline bool memory_is_poisoned_1(unsigned long addr)
 	s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
 
 	if (unlikely(shadow_value)) {
-		s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
+		s8 last_accessible_byte = addr & KASAN_GRANULE_MASK;
 		return unlikely(last_accessible_byte >= shadow_value);
 	}
 
@@ -67,7 +67,7 @@ static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
 	 * Access crosses 8(shadow size)-byte boundary. Such access maps
 	 * into 2 shadow bytes, so we need to check them both.
 	 */
-	if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
+	if (unlikely(((addr + size - 1) & KASAN_GRANULE_MASK) < size - 1))
 		return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
 
 	return memory_is_poisoned_1(addr + size - 1);
@@ -78,7 +78,7 @@ static __always_inline bool memory_is_poisoned_16(unsigned long addr)
 	u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
 
 	/* Unaligned 16-bytes access maps into 3 shadow bytes. */
-	if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
+	if (unlikely(!IS_ALIGNED(addr, KASAN_GRANULE_SIZE)))
 		return *shadow_addr || memory_is_poisoned_1(addr + 15);
 
 	return *shadow_addr;
@@ -139,7 +139,7 @@ static __always_inline bool memory_is_poisoned_n(unsigned long addr,
 		s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
 
 		if (unlikely(ret != (unsigned long)last_shadow ||
-			((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
+			((long)(last_byte & KASAN_GRANULE_MASK) >= *last_shadow)))
 			return true;
 	}
 	return false;
@@ -205,7 +205,7 @@ void kasan_cache_shutdown(struct kmem_cache *cache)
 
 static void register_global(struct kasan_global *global)
 {
-	size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
+	size_t aligned_size = round_up(global->size, KASAN_GRANULE_SIZE);
 
 	kasan_unpoison_memory(global->beg, global->size);
 
@@ -279,10 +279,10 @@ EXPORT_SYMBOL(__asan_handle_no_return);
 /* Emitted by compiler to poison alloca()ed objects. */
 void __asan_alloca_poison(unsigned long addr, size_t size)
 {
-	size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
+	size_t rounded_up_size = round_up(size, KASAN_GRANULE_SIZE);
 	size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
 			rounded_up_size;
-	size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
+	size_t rounded_down_size = round_down(size, KASAN_GRANULE_SIZE);
 
 	const void *left_redzone = (const void *)(addr -
 			KASAN_ALLOCA_REDZONE_SIZE);
diff --git a/mm/kasan/generic_report.c b/mm/kasan/generic_report.c
index a38c7a9e192a..4dce1633b082 100644
--- a/mm/kasan/generic_report.c
+++ b/mm/kasan/generic_report.c
@@ -39,7 +39,7 @@ void *find_first_bad_addr(void *addr, size_t size)
 	void *p = addr;
 
 	while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p)))
-		p += KASAN_SHADOW_SCALE_SIZE;
+		p += KASAN_GRANULE_SIZE;
 	return p;
 }
 
@@ -51,14 +51,14 @@ static const char *get_shadow_bug_type(struct kasan_access_info *info)
 	shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
 
 	/*
-	 * If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look
+	 * If shadow byte value is in [0, KASAN_GRANULE_SIZE) we can look
 	 * at the next shadow byte to determine the type of the bad access.
 	 */
-	if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1)
+	if (*shadow_addr > 0 && *shadow_addr <= KASAN_GRANULE_SIZE - 1)
 		shadow_addr++;
 
 	switch (*shadow_addr) {
-	case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
+	case 0 ... KASAN_GRANULE_SIZE - 1:
 		/*
 		 * In theory it's still possible to see these shadow values
 		 * due to a data race in the kernel code.
diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index fe6be0be1f76..754b641c83c7 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -447,8 +447,8 @@ void kasan_remove_zero_shadow(void *start, unsigned long size)
 	end = addr + (size >> KASAN_SHADOW_SCALE_SHIFT);
 
 	if (WARN_ON((unsigned long)start %
-			(KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
-	    WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
+			(KASAN_GRANULE_SIZE * PAGE_SIZE)) ||
+	    WARN_ON(size % (KASAN_GRANULE_SIZE * PAGE_SIZE)))
 		return;
 
 	for (; addr < end; addr = next) {
@@ -482,8 +482,8 @@ int kasan_add_zero_shadow(void *start, unsigned long size)
 	shadow_end = shadow_start + (size >> KASAN_SHADOW_SCALE_SHIFT);
 
 	if (WARN_ON((unsigned long)start %
-			(KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)) ||
-	    WARN_ON(size % (KASAN_SHADOW_SCALE_SIZE * PAGE_SIZE)))
+			(KASAN_GRANULE_SIZE * PAGE_SIZE)) ||
+	    WARN_ON(size % (KASAN_GRANULE_SIZE * PAGE_SIZE)))
 		return -EINVAL;
 
 	ret = kasan_populate_early_shadow(shadow_start, shadow_end);
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 03450d3b31f7..c31e2c739301 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -5,8 +5,8 @@
 #include <linux/kasan.h>
 #include <linux/stackdepot.h>
 
-#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
-#define KASAN_SHADOW_MASK       (KASAN_SHADOW_SCALE_SIZE - 1)
+#define KASAN_GRANULE_SIZE	(1UL << KASAN_SHADOW_SCALE_SHIFT)
+#define KASAN_GRANULE_MASK	(KASAN_GRANULE_SIZE - 1)
 
 #define KASAN_TAG_KERNEL	0xFF /* native kernel pointers tag */
 #define KASAN_TAG_INVALID	0xFE /* inaccessible memory tag */
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 4f49fa6cd1aa..7c025d792e2f 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -317,24 +317,24 @@ static bool __must_check get_address_stack_frame_info(const void *addr,
 		return false;
 
 	aligned_addr = round_down((unsigned long)addr, sizeof(long));
-	mem_ptr = round_down(aligned_addr, KASAN_SHADOW_SCALE_SIZE);
+	mem_ptr = round_down(aligned_addr, KASAN_GRANULE_SIZE);
 	shadow_ptr = kasan_mem_to_shadow((void *)aligned_addr);
 	shadow_bottom = kasan_mem_to_shadow(end_of_stack(current));
 
 	while (shadow_ptr >= shadow_bottom && *shadow_ptr != KASAN_STACK_LEFT) {
 		shadow_ptr--;
-		mem_ptr -= KASAN_SHADOW_SCALE_SIZE;
+		mem_ptr -= KASAN_GRANULE_SIZE;
 	}
 
 	while (shadow_ptr >= shadow_bottom && *shadow_ptr == KASAN_STACK_LEFT) {
 		shadow_ptr--;
-		mem_ptr -= KASAN_SHADOW_SCALE_SIZE;
+		mem_ptr -= KASAN_GRANULE_SIZE;
 	}
 
 	if (shadow_ptr < shadow_bottom)
 		return false;
 
-	frame = (const unsigned long *)(mem_ptr + KASAN_SHADOW_SCALE_SIZE);
+	frame = (const unsigned long *)(mem_ptr + KASAN_GRANULE_SIZE);
 	if (frame[0] != KASAN_CURRENT_STACK_FRAME_MAGIC) {
 		pr_err("KASAN internal error: frame info validation failed; invalid marker: %lu\n",
 		       frame[0]);
@@ -572,6 +572,6 @@ void kasan_non_canonical_hook(unsigned long addr)
 	else
 		bug_type = "maybe wild-memory-access";
 	pr_alert("KASAN: %s in range [0x%016lx-0x%016lx]\n", bug_type,
-		 orig_addr, orig_addr + KASAN_SHADOW_MASK);
+		 orig_addr, orig_addr + KASAN_GRANULE_SIZE - 1);
 }
 #endif
diff --git a/mm/kasan/tags_report.c b/mm/kasan/tags_report.c
index bee43717d6f0..6ddb55676a7c 100644
--- a/mm/kasan/tags_report.c
+++ b/mm/kasan/tags_report.c
@@ -81,7 +81,7 @@ void *find_first_bad_addr(void *addr, size_t size)
 	void *end = p + size;
 
 	while (p < end && tag == *(u8 *)kasan_mem_to_shadow(p))
-		p += KASAN_SHADOW_SCALE_SIZE;
+		p += KASAN_GRANULE_SIZE;
 	return p;
 }
 
-- 
2.28.0.220.ged08abb693-goog


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2020-08-14 17:27 UTC|newest]

Thread overview: 252+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-14 17:26 [PATCH 00/35] kasan: add hardware tag-based mode for arm64 Andrey Konovalov
2020-08-14 17:26 ` Andrey Konovalov
2020-08-14 17:26 ` Andrey Konovalov
2020-08-14 17:26 ` [PATCH 01/35] kasan: KASAN_VMALLOC depends on KASAN_GENERIC Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26 ` [PATCH 02/35] kasan: group vmalloc code Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26 ` [PATCH 03/35] kasan: shadow declarations only for software modes Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-09-18 14:55   ` Marco Elver
2020-09-18 14:55     ` Marco Elver
2020-09-18 14:56     ` Andrey Konovalov
2020-09-18 14:56       ` Andrey Konovalov
2020-09-18 14:56       ` Andrey Konovalov
2020-08-14 17:26 ` [PATCH 04/35] kasan: rename (un)poison_shadow to (un)poison_memory Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26 ` Andrey Konovalov [this message]
2020-08-14 17:26   ` [PATCH 05/35] kasan: rename KASAN_SHADOW_* to KASAN_GRANULE_* Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26 ` [PATCH 06/35] kasan: only build init.c for software modes Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26 ` [PATCH 07/35] kasan: split out shadow.c from common.c Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26 ` [PATCH 08/35] kasan: rename generic/tags_report.c files Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26 ` [PATCH 09/35] kasan: don't duplicate config dependencies Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26 ` [PATCH 10/35] kasan: hide invalid free check implementation Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26 ` [PATCH 11/35] kasan: decode stack frame only with KASAN_STACK_ENABLE Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26 ` [PATCH 12/35] kasan, arm64: only init shadow for software modes Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26 ` [PATCH 13/35] kasan, arm64: only use kasan_depth " Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26 ` [PATCH 14/35] kasan: rename addr_has_shadow to addr_has_metadata Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26 ` [PATCH 15/35] kasan: rename print_shadow_for_address to print_memory_metadata Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26 ` [PATCH 16/35] kasan: kasan_non_canonical_hook only for software modes Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26 ` [PATCH 17/35] kasan: rename SHADOW layout macros to META Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:26   ` Andrey Konovalov
2020-08-14 17:27 ` [PATCH 18/35] kasan: separate metadata_fetch_row for each mode Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27 ` [PATCH 19/35] kasan: don't allow SW_TAGS with ARM64_MTE Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-27  8:04   ` Catalin Marinas
2020-08-27  8:04     ` Catalin Marinas
2020-08-27  9:54     ` Vincenzo Frascino
2020-08-27  9:54       ` Vincenzo Frascino
2020-08-27 12:02       ` Andrey Konovalov
2020-08-27 12:02         ` Andrey Konovalov
2020-08-27 12:02         ` Andrey Konovalov
2020-08-14 17:27 ` [PATCH 20/35] arm64: mte: Add in-kernel MTE helpers Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-27  9:38   ` Catalin Marinas
2020-08-27  9:38     ` Catalin Marinas
2020-08-27 10:31     ` Vincenzo Frascino
2020-08-27 10:31       ` Vincenzo Frascino
2020-08-27 11:10       ` Catalin Marinas
2020-08-27 11:10         ` Catalin Marinas
2020-08-27 11:24         ` Vincenzo Frascino
2020-08-27 11:24           ` Vincenzo Frascino
2020-08-27 12:46     ` Andrey Konovalov
2020-08-27 12:46       ` Andrey Konovalov
2020-08-27 12:46       ` Andrey Konovalov
2020-09-08 13:23     ` Andrey Konovalov
2020-09-08 13:23       ` Andrey Konovalov
2020-09-08 13:23       ` Andrey Konovalov
2020-09-08 14:50       ` Catalin Marinas
2020-09-08 14:50         ` Catalin Marinas
2020-08-14 17:27 ` [PATCH 21/35] arm64: mte: Add in-kernel tag fault handler Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-27  9:54   ` Catalin Marinas
2020-08-27  9:54     ` Catalin Marinas
2020-08-27 10:44     ` Vincenzo Frascino
2020-08-27 10:44       ` Vincenzo Frascino
2020-08-27 12:31     ` Andrey Konovalov
2020-08-27 12:31       ` Andrey Konovalov
2020-08-27 12:31       ` Andrey Konovalov
2020-08-27 13:10       ` Catalin Marinas
2020-08-27 13:10         ` Catalin Marinas
2020-08-27 13:34         ` Andrey Konovalov
2020-08-27 13:34           ` Andrey Konovalov
2020-08-27 13:34           ` Andrey Konovalov
2020-08-27 14:56           ` Catalin Marinas
2020-08-27 14:56             ` Catalin Marinas
2020-08-27 19:14             ` Evgenii Stepanov
2020-08-27 19:14               ` Evgenii Stepanov
2020-08-27 19:14               ` Evgenii Stepanov
2020-08-28  9:56               ` Catalin Marinas
2020-08-28  9:56                 ` Catalin Marinas
2020-08-14 17:27 ` [PATCH 22/35] arm64: mte: Enable in-kernel MTE Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-27 10:01   ` Catalin Marinas
2020-08-27 10:01     ` Catalin Marinas
2020-08-27 10:46     ` Vincenzo Frascino
2020-08-27 10:46       ` Vincenzo Frascino
2020-09-08 14:39   ` Andrey Konovalov
2020-09-08 14:39     ` Andrey Konovalov
2020-09-08 14:39     ` Andrey Konovalov
2020-09-08 14:52     ` Catalin Marinas
2020-09-08 14:52       ` Catalin Marinas
2020-08-14 17:27 ` [PATCH 23/35] arm64: mte: Convert gcr_user into an exclude mask Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27 ` [PATCH 24/35] arm64: mte: Switch GCR_EL1 in kernel entry and exit Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-27 10:38   ` Catalin Marinas
2020-08-27 10:38     ` Catalin Marinas
2020-08-27 10:56     ` Vincenzo Frascino
2020-08-27 10:56       ` Vincenzo Frascino
2020-08-27 12:16       ` Catalin Marinas
2020-08-27 12:16         ` Catalin Marinas
2020-09-08 14:02         ` Andrey Konovalov
2020-09-08 14:02           ` Andrey Konovalov
2020-09-08 14:02           ` Andrey Konovalov
2020-09-08 14:53           ` Andrey Konovalov
2020-09-08 14:53             ` Andrey Konovalov
2020-09-08 14:53             ` Andrey Konovalov
2020-09-08 15:39           ` Catalin Marinas
2020-09-08 15:39             ` Catalin Marinas
2020-09-08 19:41             ` Derrick McKee
2020-09-08 19:41               ` Derrick McKee
2020-09-08 19:41               ` Derrick McKee
2020-09-08 13:58     ` Andrey Konovalov
2020-09-08 13:58       ` Andrey Konovalov
2020-09-08 13:58       ` Andrey Konovalov
2020-09-08 15:16       ` Catalin Marinas
2020-09-08 15:16         ` Catalin Marinas
2020-08-14 17:27 ` [PATCH 25/35] kasan: introduce CONFIG_KASAN_HW_TAGS Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-27 11:33   ` Vincenzo Frascino
2020-08-27 11:33     ` Vincenzo Frascino
2020-08-27 12:22     ` Andrey Konovalov
2020-08-27 12:22       ` Andrey Konovalov
2020-08-27 12:22       ` Andrey Konovalov
2020-08-14 17:27 ` [PATCH 26/35] kasan, arm64: Enable TBI EL1 Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-27 10:40   ` Catalin Marinas
2020-08-27 10:40     ` Catalin Marinas
2020-08-27 11:05     ` Vincenzo Frascino
2020-08-27 11:05       ` Vincenzo Frascino
2020-08-27 11:13       ` Catalin Marinas
2020-08-27 11:13         ` Catalin Marinas
2020-08-27 11:17         ` Vincenzo Frascino
2020-08-27 11:17           ` Vincenzo Frascino
2020-08-27 12:43           ` Andrey Konovalov
2020-08-27 12:43             ` Andrey Konovalov
2020-08-27 12:43             ` Andrey Konovalov
2020-08-27 13:45             ` Vincenzo Frascino
2020-08-27 13:45               ` Vincenzo Frascino
2020-08-27 14:36               ` Andrey Konovalov
2020-08-27 14:36                 ` Andrey Konovalov
2020-08-27 14:36                 ` Andrey Konovalov
2020-09-08 13:18     ` Andrey Konovalov
2020-09-08 13:18       ` Andrey Konovalov
2020-09-08 13:18       ` Andrey Konovalov
2020-09-08 14:06       ` Catalin Marinas
2020-09-08 14:06         ` Catalin Marinas
2020-09-08 14:12         ` Andrey Konovalov
2020-09-08 14:12           ` Andrey Konovalov
2020-09-08 14:12           ` Andrey Konovalov
2020-09-08 14:41           ` Catalin Marinas
2020-09-08 14:41             ` Catalin Marinas
2020-08-14 17:27 ` [PATCH 27/35] kasan, arm64: align allocations for HW_TAGS Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27 ` [PATCH 28/35] kasan: define KASAN_GRANULE_SIZE " Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-27 10:41   ` Catalin Marinas
2020-08-27 10:41     ` Catalin Marinas
2020-08-27 11:07     ` Vincenzo Frascino
2020-08-27 11:07       ` Vincenzo Frascino
2020-08-27 12:05       ` Andrey Konovalov
2020-08-27 12:05         ` Andrey Konovalov
2020-08-27 12:05         ` Andrey Konovalov
2020-08-14 17:27 ` [PATCH 29/35] kasan, x86, s390: update undef CONFIG_KASAN Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27 ` [PATCH 30/35] kasan, arm64: expand CONFIG_KASAN checks Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27 ` [PATCH 31/35] kasan, arm64: implement HW_TAGS runtime Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-27 10:45   ` Catalin Marinas
2020-08-27 10:45     ` Catalin Marinas
2020-08-27 11:35     ` Vincenzo Frascino
2020-08-27 11:35       ` Vincenzo Frascino
2020-08-27 12:37     ` Andrey Konovalov
2020-08-27 12:37       ` Andrey Konovalov
2020-08-27 12:37       ` Andrey Konovalov
2020-08-14 17:27 ` [PATCH 32/35] kasan, arm64: print report from tag fault handler Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-27 10:48   ` Catalin Marinas
2020-08-27 10:48     ` Catalin Marinas
2020-08-27 12:11     ` Vincenzo Frascino
2020-08-27 12:11       ` Vincenzo Frascino
2020-08-27 12:34     ` Andrey Konovalov
2020-08-27 12:34       ` Andrey Konovalov
2020-08-27 12:34       ` Andrey Konovalov
2020-08-27 14:21       ` Catalin Marinas
2020-08-27 14:21         ` Catalin Marinas
2020-08-14 17:27 ` [PATCH 33/35] kasan, slub: reset tags when accessing metadata Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27 ` [PATCH 34/35] kasan, arm64: enable CONFIG_KASAN_HW_TAGS Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27 ` [PATCH 35/35] kasan: add documentation for hardware tag-based mode Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-14 17:27   ` Andrey Konovalov
2020-08-28 11:12   ` Marco Elver
2020-08-28 11:12     ` Marco Elver
2020-08-28 12:28     ` Andrey Konovalov
2020-08-28 12:28       ` Andrey Konovalov
2020-08-28 12:28       ` Andrey Konovalov
2020-09-14 19:06 ` [PATCH 00/35] kasan: add hardware tag-based mode for arm64 Derrick McKee
2020-09-14 20:04   ` Andrey Konovalov
2020-09-14 21:36     ` Derrick McKee
2020-09-14 22:41       ` Derrick McKee
2020-09-14 22:50         ` Andrey Konovalov
2020-09-15  9:46           ` Derrick McKee
2020-09-15 17:51             ` Derrick McKee

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=d4f7c14e57341ae52df4fde5425e2ae5d24534dd.1597425745.git.andreyknvl@google.com \
    --to=andreyknvl@google.com \
    --cc=Branislav.Rankov@arm.com \
    --cc=akpm@linux-foundation.org \
    --cc=aryabinin@virtuozzo.com \
    --cc=catalin.marinas@arm.com \
    --cc=dvyukov@google.com \
    --cc=elver@google.com \
    --cc=eugenis@google.com \
    --cc=glider@google.com \
    --cc=kasan-dev@googlegroups.com \
    --cc=kevin.brodsky@arm.com \
    --cc=lenaptr@google.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=vincenzo.frascino@arm.com \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.