All of lore.kernel.org
 help / color / mirror / Atom feed
From: Alexandru Elisei <alexandru.elisei@arm.com>
To: catalin.marinas@arm.com, will@kernel.org, oliver.upton@linux.dev,
	maz@kernel.org, james.morse@arm.com, suzuki.poulose@arm.com,
	yuzenghui@huawei.com, arnd@arndb.de, akpm@linux-foundation.org,
	mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com,
	vincent.guittot@linaro.org, dietmar.eggemann@arm.com,
	rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de,
	bristot@redhat.com, vschneid@redhat.com, mhiramat@kernel.org,
	rppt@kernel.org, hughd@google.com
Cc: pcc@google.com, steven.price@arm.com, anshuman.khandual@arm.com,
	vincenzo.frascino@arm.com, david@redhat.com, eugenis@google.com,
	kcc@google.com, hyesoo.yu@samsung.com,
	linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev,
	linux-fsdevel@vger.kernel.org, linux-arch@vger.kernel.org,
	linux-mm@kvack.org, linux-trace-kernel@vger.kernel.org
Subject: [PATCH RFC v2 22/27] arm64: mte: swap: Handle tag restoring when missing tag storage
Date: Sun, 19 Nov 2023 16:57:16 +0000	[thread overview]
Message-ID: <20231119165721.9849-23-alexandru.elisei@arm.com> (raw)
In-Reply-To: <20231119165721.9849-1-alexandru.elisei@arm.com>

Linux restores tags when a page is swapped in and there are tags associated
with the swap entry which the new page will replace. The saved tags are
restored even if the page will not be mapped as tagged, to protect against
cases where the page is shared between different VMAs, and is tagged in
some, but untagged in others. By using this approach, the process can still
access the correct tags following an mprotect(PROT_MTE) on the non-MTE
enabled VMA.

But this poses a challenge for managing tag storage: in the scenario above,
when a new page is allocated to be swapped in for the process where it will
be mapped as untagged, the corresponding tag storage block is not reserved.
mte_restore_page_tags_by_swp_entry(), when it restores the saved tags, will
overwrite data in the tag storage block associated with the new page,
leading to data corruption if the block is in use by a process.

Get around this issue by saving the tags in a new xarray, this time indexed
by the page pfn, and then restoring them when tag storage is reserved for
the page.

Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
---
 arch/arm64/include/asm/mte_tag_storage.h |   9 ++
 arch/arm64/include/asm/pgtable.h         |  11 +++
 arch/arm64/kernel/mte_tag_storage.c      |  20 +++-
 arch/arm64/mm/mteswap.c                  | 112 +++++++++++++++++++++++
 4 files changed, 148 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/include/asm/mte_tag_storage.h b/arch/arm64/include/asm/mte_tag_storage.h
index 6a8b19a6a758..a3c38099fe1a 100644
--- a/arch/arm64/include/asm/mte_tag_storage.h
+++ b/arch/arm64/include/asm/mte_tag_storage.h
@@ -37,6 +37,15 @@ bool page_is_tag_storage(struct page *page);
 
 vm_fault_t handle_page_missing_tag_storage(struct vm_fault *vmf);
 vm_fault_t handle_huge_page_missing_tag_storage(struct vm_fault *vmf);
+
+void tags_by_pfn_lock(void);
+void tags_by_pfn_unlock(void);
+
+void *mte_erase_tags_for_pfn(unsigned long pfn);
+bool mte_save_tags_for_pfn(void *tags, unsigned long pfn);
+void mte_restore_tags_for_pfn(unsigned long start_pfn, int order);
+
+vm_fault_t mte_try_transfer_swap_tags(swp_entry_t entry, struct page *page);
 #else
 static inline bool tag_storage_enabled(void)
 {
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 1704411c096d..1a25b7d601c2 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1084,6 +1084,17 @@ static inline void arch_swap_invalidate_area(int type)
 		mte_invalidate_tags_area_by_swp_entry(type);
 }
 
+#ifdef CONFIG_ARM64_MTE_TAG_STORAGE
+#define __HAVE_ARCH_SWAP_PREPARE_TO_RESTORE
+static inline vm_fault_t arch_swap_prepare_to_restore(swp_entry_t entry,
+						      struct folio *folio)
+{
+	if (tag_storage_enabled())
+		return mte_try_transfer_swap_tags(entry, &folio->page);
+	return 0;
+}
+#endif
+
 #define __HAVE_ARCH_SWAP_RESTORE
 static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
 {
diff --git a/arch/arm64/kernel/mte_tag_storage.c b/arch/arm64/kernel/mte_tag_storage.c
index 5096ce859136..6b11bb408b51 100644
--- a/arch/arm64/kernel/mte_tag_storage.c
+++ b/arch/arm64/kernel/mte_tag_storage.c
@@ -547,8 +547,10 @@ int reserve_tag_storage(struct page *page, int order, gfp_t gfp)
 	mutex_lock(&tag_blocks_lock);
 
 	/* Check again, this time with the lock held. */
-	if (page_tag_storage_reserved(page))
-		goto out_unlock;
+	if (page_tag_storage_reserved(page)) {
+		mutex_unlock(&tag_blocks_lock);
+		return 0;
+	}
 
 	/* Make sure existing entries are not freed from out under out feet. */
 	xa_lock_irqsave(&tag_blocks_reserved, flags);
@@ -583,9 +585,10 @@ int reserve_tag_storage(struct page *page, int order, gfp_t gfp)
 	}
 
 	page_set_tag_storage_reserved(page, order);
-out_unlock:
 	mutex_unlock(&tag_blocks_lock);
 
+	mte_restore_tags_for_pfn(page_to_pfn(page), order);
+
 	return 0;
 
 out_error:
@@ -612,7 +615,8 @@ void free_tag_storage(struct page *page, int order)
 	struct tag_region *region;
 	unsigned long page_va;
 	unsigned long flags;
-	int ret;
+	void *tags;
+	int i, ret;
 
 	ret = tag_storage_find_block(page, &start_block, &region);
 	if (WARN_ONCE(ret, "Missing tag storage block for pfn 0x%lx", page_to_pfn(page)))
@@ -622,6 +626,14 @@ void free_tag_storage(struct page *page, int order)
 	/* Avoid writeback of dirty tag cache lines corrupting data. */
 	dcache_inval_tags_poc(page_va, page_va + (PAGE_SIZE << order));
 
+	tags_by_pfn_lock();
+	for (i = 0; i < (1 << order); i++) {
+		tags = mte_erase_tags_for_pfn(page_to_pfn(page + i));
+		if (unlikely(tags))
+			mte_free_tag_buf(tags);
+	}
+	tags_by_pfn_unlock();
+
 	end_block = start_block + order_to_num_blocks(order) * region->block_size;
 
 	xa_lock_irqsave(&tag_blocks_reserved, flags);
diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c
index 2a43746b803f..20d718a514af 100644
--- a/arch/arm64/mm/mteswap.c
+++ b/arch/arm64/mm/mteswap.c
@@ -20,6 +20,114 @@ void mte_free_tag_buf(void *buf)
 	kfree(buf);
 }
 
+#ifdef CONFIG_ARM64_MTE_TAG_STORAGE
+static DEFINE_XARRAY(tags_by_pfn);
+
+void tags_by_pfn_lock(void)
+{
+	xa_lock(&tags_by_pfn);
+}
+
+void tags_by_pfn_unlock(void)
+{
+	xa_unlock(&tags_by_pfn);
+}
+
+void *mte_erase_tags_for_pfn(unsigned long pfn)
+{
+	return __xa_erase(&tags_by_pfn, pfn);
+}
+
+bool mte_save_tags_for_pfn(void *tags, unsigned long pfn)
+{
+	void *entry;
+	int ret;
+
+	ret = xa_reserve(&tags_by_pfn, pfn, GFP_KERNEL);
+	if (ret)
+		return true;
+
+	tags_by_pfn_lock();
+
+	if (page_tag_storage_reserved(pfn_to_page(pfn))) {
+		tags_by_pfn_unlock();
+		return false;
+	}
+
+	entry = __xa_store(&tags_by_pfn, pfn, tags, GFP_ATOMIC);
+	if (xa_is_err(entry)) {
+		xa_release(&tags_by_pfn, pfn);
+		goto out_unlock;
+	} else if (entry) {
+		mte_free_tag_buf(entry);
+	}
+
+out_unlock:
+	tags_by_pfn_unlock();
+	return true;
+}
+
+void mte_restore_tags_for_pfn(unsigned long start_pfn, int order)
+{
+	struct page *page = pfn_to_page(start_pfn);
+	unsigned long pfn;
+	void *tags;
+
+	tags_by_pfn_lock();
+
+	for (pfn = start_pfn; pfn < start_pfn + (1 << order); pfn++, page++) {
+		if (WARN_ON_ONCE(!page_tag_storage_reserved(page)))
+			continue;
+
+		tags = mte_erase_tags_for_pfn(pfn);
+		if (unlikely(tags)) {
+			/*
+			 * Mark the page as tagged so mte_sync_tags() doesn't
+			 * clear the tags.
+			 */
+			WARN_ON_ONCE(!try_page_mte_tagging(page));
+			mte_copy_page_tags_from_buf(page_address(page), tags);
+			set_page_mte_tagged(page);
+			mte_free_tag_buf(tags);
+		}
+	}
+
+	tags_by_pfn_unlock();
+}
+
+/*
+ * Note on locking: swap in/out is done with the folio locked, which eliminates
+ * races with mte_save/restore_page_tags_by_swp_entry.
+ */
+vm_fault_t mte_try_transfer_swap_tags(swp_entry_t entry, struct page *page)
+{
+	void *swap_tags, *pfn_tags;
+	bool saved;
+
+	/*
+	 * mte_restore_page_tags_by_swp_entry() will take care of copying the
+	 * tags over.
+	 */
+	if (likely(page_mte_tagged(page) || page_tag_storage_reserved(page)))
+		return 0;
+
+	swap_tags = xa_load(&tags_by_swp_entry, entry.val);
+	if (!swap_tags)
+		return 0;
+
+	pfn_tags = mte_allocate_tag_buf();
+	if (!pfn_tags)
+		return VM_FAULT_OOM;
+
+	memcpy(pfn_tags, swap_tags, MTE_PAGE_TAG_STORAGE_SIZE);
+	saved = mte_save_tags_for_pfn(pfn_tags, page_to_pfn(page));
+	if (!saved)
+		mte_free_tag_buf(pfn_tags);
+
+	return 0;
+}
+#endif
+
 int mte_save_page_tags_by_swp_entry(struct page *page)
 {
 	void *tags, *ret;
@@ -54,6 +162,10 @@ void mte_restore_page_tags_by_swp_entry(swp_entry_t entry, struct page *page)
 	if (!tags)
 		return;
 
+	/* Tags will be restored when tag storage is reserved. */
+	if (tag_storage_enabled() && unlikely(!page_tag_storage_reserved(page)))
+		return;
+
 	if (try_page_mte_tagging(page)) {
 		mte_copy_page_tags_from_buf(page_address(page), tags);
 		set_page_mte_tagged(page);
-- 
2.42.1


WARNING: multiple messages have this Message-ID (diff)
From: Alexandru Elisei <alexandru.elisei@arm.com>
To: catalin.marinas@arm.com, will@kernel.org, oliver.upton@linux.dev,
	maz@kernel.org, james.morse@arm.com, suzuki.poulose@arm.com,
	yuzenghui@huawei.com, arnd@arndb.de, akpm@linux-foundation.org,
	mingo@redhat.com, peterz@infradead.org, juri.lelli@redhat.com,
	vincent.guittot@linaro.org, dietmar.eggemann@arm.com,
	rostedt@goodmis.org, bsegall@google.com, mgorman@suse.de,
	bristot@redhat.com, vschneid@redhat.com, mhiramat@kernel.org,
	rppt@kernel.org, hughd@google.com
Cc: pcc@google.com, steven.price@arm.com, anshuman.khandual@arm.com,
	vincenzo.frascino@arm.com, david@redhat.com, eugenis@google.com,
	kcc@google.com, hyesoo.yu@samsung.com,
	linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org, kvmarm@lists.linux.dev,
	linux-fsdevel@vger.kernel.org, linux-arch@vger.kernel.org,
	linux-mm@kvack.org, linux-trace-kernel@vger.kernel.org
Subject: [PATCH RFC v2 22/27] arm64: mte: swap: Handle tag restoring when missing tag storage
Date: Sun, 19 Nov 2023 16:57:16 +0000	[thread overview]
Message-ID: <20231119165721.9849-23-alexandru.elisei@arm.com> (raw)
In-Reply-To: <20231119165721.9849-1-alexandru.elisei@arm.com>

Linux restores tags when a page is swapped in and there are tags associated
with the swap entry which the new page will replace. The saved tags are
restored even if the page will not be mapped as tagged, to protect against
cases where the page is shared between different VMAs, and is tagged in
some, but untagged in others. By using this approach, the process can still
access the correct tags following an mprotect(PROT_MTE) on the non-MTE
enabled VMA.

But this poses a challenge for managing tag storage: in the scenario above,
when a new page is allocated to be swapped in for the process where it will
be mapped as untagged, the corresponding tag storage block is not reserved.
mte_restore_page_tags_by_swp_entry(), when it restores the saved tags, will
overwrite data in the tag storage block associated with the new page,
leading to data corruption if the block is in use by a process.

Get around this issue by saving the tags in a new xarray, this time indexed
by the page pfn, and then restoring them when tag storage is reserved for
the page.

Signed-off-by: Alexandru Elisei <alexandru.elisei@arm.com>
---
 arch/arm64/include/asm/mte_tag_storage.h |   9 ++
 arch/arm64/include/asm/pgtable.h         |  11 +++
 arch/arm64/kernel/mte_tag_storage.c      |  20 +++-
 arch/arm64/mm/mteswap.c                  | 112 +++++++++++++++++++++++
 4 files changed, 148 insertions(+), 4 deletions(-)

diff --git a/arch/arm64/include/asm/mte_tag_storage.h b/arch/arm64/include/asm/mte_tag_storage.h
index 6a8b19a6a758..a3c38099fe1a 100644
--- a/arch/arm64/include/asm/mte_tag_storage.h
+++ b/arch/arm64/include/asm/mte_tag_storage.h
@@ -37,6 +37,15 @@ bool page_is_tag_storage(struct page *page);
 
 vm_fault_t handle_page_missing_tag_storage(struct vm_fault *vmf);
 vm_fault_t handle_huge_page_missing_tag_storage(struct vm_fault *vmf);
+
+void tags_by_pfn_lock(void);
+void tags_by_pfn_unlock(void);
+
+void *mte_erase_tags_for_pfn(unsigned long pfn);
+bool mte_save_tags_for_pfn(void *tags, unsigned long pfn);
+void mte_restore_tags_for_pfn(unsigned long start_pfn, int order);
+
+vm_fault_t mte_try_transfer_swap_tags(swp_entry_t entry, struct page *page);
 #else
 static inline bool tag_storage_enabled(void)
 {
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 1704411c096d..1a25b7d601c2 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -1084,6 +1084,17 @@ static inline void arch_swap_invalidate_area(int type)
 		mte_invalidate_tags_area_by_swp_entry(type);
 }
 
+#ifdef CONFIG_ARM64_MTE_TAG_STORAGE
+#define __HAVE_ARCH_SWAP_PREPARE_TO_RESTORE
+static inline vm_fault_t arch_swap_prepare_to_restore(swp_entry_t entry,
+						      struct folio *folio)
+{
+	if (tag_storage_enabled())
+		return mte_try_transfer_swap_tags(entry, &folio->page);
+	return 0;
+}
+#endif
+
 #define __HAVE_ARCH_SWAP_RESTORE
 static inline void arch_swap_restore(swp_entry_t entry, struct folio *folio)
 {
diff --git a/arch/arm64/kernel/mte_tag_storage.c b/arch/arm64/kernel/mte_tag_storage.c
index 5096ce859136..6b11bb408b51 100644
--- a/arch/arm64/kernel/mte_tag_storage.c
+++ b/arch/arm64/kernel/mte_tag_storage.c
@@ -547,8 +547,10 @@ int reserve_tag_storage(struct page *page, int order, gfp_t gfp)
 	mutex_lock(&tag_blocks_lock);
 
 	/* Check again, this time with the lock held. */
-	if (page_tag_storage_reserved(page))
-		goto out_unlock;
+	if (page_tag_storage_reserved(page)) {
+		mutex_unlock(&tag_blocks_lock);
+		return 0;
+	}
 
 	/* Make sure existing entries are not freed from out under out feet. */
 	xa_lock_irqsave(&tag_blocks_reserved, flags);
@@ -583,9 +585,10 @@ int reserve_tag_storage(struct page *page, int order, gfp_t gfp)
 	}
 
 	page_set_tag_storage_reserved(page, order);
-out_unlock:
 	mutex_unlock(&tag_blocks_lock);
 
+	mte_restore_tags_for_pfn(page_to_pfn(page), order);
+
 	return 0;
 
 out_error:
@@ -612,7 +615,8 @@ void free_tag_storage(struct page *page, int order)
 	struct tag_region *region;
 	unsigned long page_va;
 	unsigned long flags;
-	int ret;
+	void *tags;
+	int i, ret;
 
 	ret = tag_storage_find_block(page, &start_block, &region);
 	if (WARN_ONCE(ret, "Missing tag storage block for pfn 0x%lx", page_to_pfn(page)))
@@ -622,6 +626,14 @@ void free_tag_storage(struct page *page, int order)
 	/* Avoid writeback of dirty tag cache lines corrupting data. */
 	dcache_inval_tags_poc(page_va, page_va + (PAGE_SIZE << order));
 
+	tags_by_pfn_lock();
+	for (i = 0; i < (1 << order); i++) {
+		tags = mte_erase_tags_for_pfn(page_to_pfn(page + i));
+		if (unlikely(tags))
+			mte_free_tag_buf(tags);
+	}
+	tags_by_pfn_unlock();
+
 	end_block = start_block + order_to_num_blocks(order) * region->block_size;
 
 	xa_lock_irqsave(&tag_blocks_reserved, flags);
diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c
index 2a43746b803f..20d718a514af 100644
--- a/arch/arm64/mm/mteswap.c
+++ b/arch/arm64/mm/mteswap.c
@@ -20,6 +20,114 @@ void mte_free_tag_buf(void *buf)
 	kfree(buf);
 }
 
+#ifdef CONFIG_ARM64_MTE_TAG_STORAGE
+static DEFINE_XARRAY(tags_by_pfn);
+
+void tags_by_pfn_lock(void)
+{
+	xa_lock(&tags_by_pfn);
+}
+
+void tags_by_pfn_unlock(void)
+{
+	xa_unlock(&tags_by_pfn);
+}
+
+void *mte_erase_tags_for_pfn(unsigned long pfn)
+{
+	return __xa_erase(&tags_by_pfn, pfn);
+}
+
+bool mte_save_tags_for_pfn(void *tags, unsigned long pfn)
+{
+	void *entry;
+	int ret;
+
+	ret = xa_reserve(&tags_by_pfn, pfn, GFP_KERNEL);
+	if (ret)
+		return true;
+
+	tags_by_pfn_lock();
+
+	if (page_tag_storage_reserved(pfn_to_page(pfn))) {
+		tags_by_pfn_unlock();
+		return false;
+	}
+
+	entry = __xa_store(&tags_by_pfn, pfn, tags, GFP_ATOMIC);
+	if (xa_is_err(entry)) {
+		xa_release(&tags_by_pfn, pfn);
+		goto out_unlock;
+	} else if (entry) {
+		mte_free_tag_buf(entry);
+	}
+
+out_unlock:
+	tags_by_pfn_unlock();
+	return true;
+}
+
+void mte_restore_tags_for_pfn(unsigned long start_pfn, int order)
+{
+	struct page *page = pfn_to_page(start_pfn);
+	unsigned long pfn;
+	void *tags;
+
+	tags_by_pfn_lock();
+
+	for (pfn = start_pfn; pfn < start_pfn + (1 << order); pfn++, page++) {
+		if (WARN_ON_ONCE(!page_tag_storage_reserved(page)))
+			continue;
+
+		tags = mte_erase_tags_for_pfn(pfn);
+		if (unlikely(tags)) {
+			/*
+			 * Mark the page as tagged so mte_sync_tags() doesn't
+			 * clear the tags.
+			 */
+			WARN_ON_ONCE(!try_page_mte_tagging(page));
+			mte_copy_page_tags_from_buf(page_address(page), tags);
+			set_page_mte_tagged(page);
+			mte_free_tag_buf(tags);
+		}
+	}
+
+	tags_by_pfn_unlock();
+}
+
+/*
+ * Note on locking: swap in/out is done with the folio locked, which eliminates
+ * races with mte_save/restore_page_tags_by_swp_entry.
+ */
+vm_fault_t mte_try_transfer_swap_tags(swp_entry_t entry, struct page *page)
+{
+	void *swap_tags, *pfn_tags;
+	bool saved;
+
+	/*
+	 * mte_restore_page_tags_by_swp_entry() will take care of copying the
+	 * tags over.
+	 */
+	if (likely(page_mte_tagged(page) || page_tag_storage_reserved(page)))
+		return 0;
+
+	swap_tags = xa_load(&tags_by_swp_entry, entry.val);
+	if (!swap_tags)
+		return 0;
+
+	pfn_tags = mte_allocate_tag_buf();
+	if (!pfn_tags)
+		return VM_FAULT_OOM;
+
+	memcpy(pfn_tags, swap_tags, MTE_PAGE_TAG_STORAGE_SIZE);
+	saved = mte_save_tags_for_pfn(pfn_tags, page_to_pfn(page));
+	if (!saved)
+		mte_free_tag_buf(pfn_tags);
+
+	return 0;
+}
+#endif
+
 int mte_save_page_tags_by_swp_entry(struct page *page)
 {
 	void *tags, *ret;
@@ -54,6 +162,10 @@ void mte_restore_page_tags_by_swp_entry(swp_entry_t entry, struct page *page)
 	if (!tags)
 		return;
 
+	/* Tags will be restored when tag storage is reserved. */
+	if (tag_storage_enabled() && unlikely(!page_tag_storage_reserved(page)))
+		return;
+
 	if (try_page_mte_tagging(page)) {
 		mte_copy_page_tags_from_buf(page_address(page), tags);
 		set_page_mte_tagged(page);
-- 
2.42.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2023-11-19 17:00 UTC|newest]

Thread overview: 198+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-11-19 16:56 [PATCH RFC v2 00/27] Add support for arm64 MTE dynamic tag storage reuse Alexandru Elisei
2023-11-19 16:56 ` Alexandru Elisei
2023-11-19 16:56 ` [PATCH RFC v2 01/27] arm64: mte: Rework naming for tag manipulation functions Alexandru Elisei
2023-11-19 16:56   ` Alexandru Elisei
2023-11-19 16:56 ` [PATCH RFC v2 02/27] arm64: mte: Rename __GFP_ZEROTAGS to __GFP_TAGGED Alexandru Elisei
2023-11-19 16:56   ` Alexandru Elisei
2023-11-19 16:56 ` [PATCH RFC v2 03/27] mm: cma: Make CMA_ALLOC_SUCCESS/FAIL count the number of pages Alexandru Elisei
2023-11-19 16:56   ` Alexandru Elisei
2023-11-19 16:56 ` [PATCH RFC v2 04/27] mm: migrate/mempolicy: Add hook to modify migration target gfp Alexandru Elisei
2023-11-19 16:56   ` Alexandru Elisei
2023-11-25 10:03   ` Mike Rapoport
2023-11-25 10:03     ` Mike Rapoport
2023-11-27 11:52     ` Alexandru Elisei
2023-11-27 11:52       ` Alexandru Elisei
2023-11-28  6:49       ` Mike Rapoport
2023-11-28  6:49         ` Mike Rapoport
2023-11-28 17:21         ` Alexandru Elisei
2023-11-28 17:21           ` Alexandru Elisei
2023-11-19 16:56 ` [PATCH RFC v2 05/27] mm: page_alloc: Add an arch hook to allow prep_new_page() to fail Alexandru Elisei
2023-11-19 16:56   ` Alexandru Elisei
2023-11-24 11:46   ` kernel test robot
2023-11-24 19:35   ` David Hildenbrand
2023-11-24 19:35     ` David Hildenbrand
2023-11-27 12:09     ` Alexandru Elisei
2023-11-27 12:09       ` Alexandru Elisei
2023-11-28 16:57       ` David Hildenbrand
2023-11-28 16:57         ` David Hildenbrand
2023-11-28 17:17         ` Alexandru Elisei
2023-11-28 17:17           ` Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 06/27] mm: page_alloc: Allow an arch to hook early into free_pages_prepare() Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-24 13:19   ` kernel test robot
2023-11-24 19:36   ` David Hildenbrand
2023-11-24 19:36     ` David Hildenbrand
2023-11-27 13:03     ` Alexandru Elisei
2023-11-27 13:03       ` Alexandru Elisei
2023-11-28 16:58       ` David Hildenbrand
2023-11-28 16:58         ` David Hildenbrand
2023-11-28 17:17         ` Alexandru Elisei
2023-11-28 17:17           ` Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 07/27] mm: page_alloc: Add an arch hook to filter MIGRATE_CMA allocations Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 08/27] mm: page_alloc: Partially revert "mm: page_alloc: remove stale CMA guard code" Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 09/27] mm: Allow an arch to hook into folio allocation when VMA is known Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 10/27] mm: Call arch_swap_prepare_to_restore() before arch_swap_restore() Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 11/27] arm64: mte: Reserve tag storage memory Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-29  8:44   ` Hyesoo Yu
2023-11-29  8:44     ` Hyesoo Yu
2023-11-30 11:56     ` Alexandru Elisei
2023-11-30 11:56       ` Alexandru Elisei
2023-12-03 12:14     ` Alexandru Elisei
2023-12-03 12:14       ` Alexandru Elisei
2023-12-08  5:03       ` Hyesoo Yu
2023-12-08  5:03         ` Hyesoo Yu
2023-12-11 14:45         ` Alexandru Elisei
2023-12-11 14:45           ` Alexandru Elisei
2023-12-11 17:29   ` Rob Herring
2023-12-11 17:29     ` Rob Herring
2023-12-12 16:38     ` Alexandru Elisei
2023-12-12 16:38       ` Alexandru Elisei
2023-12-12 18:44       ` Rob Herring
2023-12-12 18:44         ` Rob Herring
2023-12-13 13:04         ` Alexandru Elisei
2023-12-13 13:04           ` Alexandru Elisei
2023-12-13 14:06           ` Rob Herring
2023-12-13 14:06             ` Rob Herring
2023-12-13 14:51             ` Alexandru Elisei
2023-12-13 14:51               ` Alexandru Elisei
2023-12-13 17:22               ` Rob Herring
2023-12-13 17:22                 ` Rob Herring
2023-12-13 17:44                 ` Alexandru Elisei
2023-12-13 17:44                   ` Alexandru Elisei
2023-12-13 20:30                   ` Rob Herring
2023-12-13 20:30                     ` Rob Herring
2023-12-14 15:45                     ` Alexandru Elisei
2023-12-14 15:45                       ` Alexandru Elisei
2023-12-14 18:55                       ` Rob Herring
2023-12-14 18:55                         ` Rob Herring
2023-12-18 10:59                         ` Alexandru Elisei
2023-12-18 10:59                           ` Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 12/27] arm64: mte: Add tag storage pages to the MIGRATE_CMA migratetype Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-24 19:40   ` David Hildenbrand
2023-11-24 19:40     ` David Hildenbrand
2023-11-27 15:01     ` Alexandru Elisei
2023-11-27 15:01       ` Alexandru Elisei
2023-11-28 17:03       ` David Hildenbrand
2023-11-28 17:03         ` David Hildenbrand
2023-11-29 10:44         ` Alexandru Elisei
2023-11-29 10:44           ` Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 13/27] arm64: mte: Make tag storage depend on ARCH_KEEP_MEMBLOCK Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-24 19:51   ` David Hildenbrand
2023-11-24 19:51     ` David Hildenbrand
2023-11-27 15:04     ` Alexandru Elisei
2023-11-27 15:04       ` Alexandru Elisei
2023-11-28 17:05       ` David Hildenbrand
2023-11-28 17:05         ` David Hildenbrand
2023-11-29 10:46         ` Alexandru Elisei
2023-11-29 10:46           ` Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 14/27] arm64: mte: Disable dynamic tag storage management if HW KASAN is enabled Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-24 19:54   ` David Hildenbrand
2023-11-24 19:54     ` David Hildenbrand
2023-11-27 15:07     ` Alexandru Elisei
2023-11-27 15:07       ` Alexandru Elisei
2023-11-28 17:05       ` David Hildenbrand
2023-11-28 17:05         ` David Hildenbrand
2023-11-19 16:57 ` [PATCH RFC v2 15/27] arm64: mte: Check that tag storage blocks are in the same zone Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-24 19:56   ` David Hildenbrand
2023-11-24 19:56     ` David Hildenbrand
2023-11-27 15:10     ` Alexandru Elisei
2023-11-27 15:10       ` Alexandru Elisei
2023-11-29  8:57   ` Hyesoo Yu
2023-11-29  8:57     ` Hyesoo Yu
2023-11-30 12:00     ` Alexandru Elisei
2023-11-30 12:00       ` Alexandru Elisei
2023-12-08  5:27       ` Hyesoo Yu
2023-12-08  5:27         ` Hyesoo Yu
2023-12-11 14:21         ` Alexandru Elisei
2023-12-11 14:21           ` Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 16/27] arm64: mte: Manage tag storage on page allocation Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-29  9:10   ` Hyesoo Yu
2023-11-29  9:10     ` Hyesoo Yu
2023-11-29 13:33     ` Alexandru Elisei
2023-11-29 13:33       ` Alexandru Elisei
2023-12-08  5:29       ` Hyesoo Yu
2023-12-08  5:29         ` Hyesoo Yu
2023-11-19 16:57 ` [PATCH RFC v2 17/27] arm64: mte: Perform CMOs for tag blocks on tagged page allocation/free Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 18/27] arm64: mte: Reserve tag block for the zero page Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-28 17:06   ` David Hildenbrand
2023-11-28 17:06     ` David Hildenbrand
2023-11-29 11:30     ` Alexandru Elisei
2023-11-29 11:30       ` Alexandru Elisei
2023-11-29 13:13       ` David Hildenbrand
2023-11-29 13:13         ` David Hildenbrand
2023-11-29 13:41         ` Alexandru Elisei
2023-11-29 13:41           ` Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 19/27] mm: mprotect: Introduce PAGE_FAULT_ON_ACCESS for mprotect(PROT_MTE) Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-28 17:55   ` David Hildenbrand
2023-11-28 17:55     ` David Hildenbrand
2023-11-28 18:00     ` David Hildenbrand
2023-11-28 18:00       ` David Hildenbrand
2023-11-29 11:55     ` Alexandru Elisei
2023-11-29 11:55       ` Alexandru Elisei
2023-11-29 12:48       ` David Hildenbrand
2023-11-29 12:48         ` David Hildenbrand
2023-11-29  9:27   ` Hyesoo Yu
2023-11-29  9:27     ` Hyesoo Yu
2023-11-30 12:06     ` Alexandru Elisei
2023-11-30 12:06       ` Alexandru Elisei
2023-11-30 12:49       ` David Hildenbrand
2023-11-30 12:49         ` David Hildenbrand
2023-11-30 13:32         ` Alexandru Elisei
2023-11-30 13:32           ` Alexandru Elisei
2023-11-30 13:43           ` David Hildenbrand
2023-11-30 13:43             ` David Hildenbrand
2023-11-30 14:33             ` Alexandru Elisei
2023-11-30 14:33               ` Alexandru Elisei
2023-11-30 14:39               ` David Hildenbrand
2023-11-30 14:39                 ` David Hildenbrand
2023-11-19 16:57 ` [PATCH RFC v2 20/27] mm: hugepage: Handle huge page fault on access Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-22  1:28   ` Peter Collingbourne
2023-11-22  1:28     ` Peter Collingbourne
2023-11-22  9:22     ` Alexandru Elisei
2023-11-22  9:22       ` Alexandru Elisei
2023-11-28 17:56   ` David Hildenbrand
2023-11-28 17:56     ` David Hildenbrand
2023-11-29 11:56     ` Alexandru Elisei
2023-11-29 11:56       ` Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 21/27] mm: arm64: Handle tag storage pages mapped before mprotect(PROT_MTE) Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-28  5:39   ` Peter Collingbourne
2023-11-28  5:39     ` Peter Collingbourne
2023-11-30 17:43     ` Alexandru Elisei
2023-11-30 17:43       ` Alexandru Elisei
2023-11-19 16:57 ` Alexandru Elisei [this message]
2023-11-19 16:57   ` [PATCH RFC v2 22/27] arm64: mte: swap: Handle tag restoring when missing tag storage Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 23/27] arm64: mte: copypage: " Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 24/27] arm64: mte: Handle fatal signal in reserve_tag_storage() Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 25/27] KVM: arm64: Disable MTE if tag storage is enabled Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 26/27] arm64: mte: Fast track reserving tag storage when the block is free Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei
2023-11-19 16:57 ` [PATCH RFC v2 27/27] arm64: mte: Enable dynamic tag storage reuse Alexandru Elisei
2023-11-19 16:57   ` Alexandru Elisei

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231119165721.9849-23-alexandru.elisei@arm.com \
    --to=alexandru.elisei@arm.com \
    --cc=akpm@linux-foundation.org \
    --cc=anshuman.khandual@arm.com \
    --cc=arnd@arndb.de \
    --cc=bristot@redhat.com \
    --cc=bsegall@google.com \
    --cc=catalin.marinas@arm.com \
    --cc=david@redhat.com \
    --cc=dietmar.eggemann@arm.com \
    --cc=eugenis@google.com \
    --cc=hughd@google.com \
    --cc=hyesoo.yu@samsung.com \
    --cc=james.morse@arm.com \
    --cc=juri.lelli@redhat.com \
    --cc=kcc@google.com \
    --cc=kvmarm@lists.linux.dev \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-trace-kernel@vger.kernel.org \
    --cc=maz@kernel.org \
    --cc=mgorman@suse.de \
    --cc=mhiramat@kernel.org \
    --cc=mingo@redhat.com \
    --cc=oliver.upton@linux.dev \
    --cc=pcc@google.com \
    --cc=peterz@infradead.org \
    --cc=rostedt@goodmis.org \
    --cc=rppt@kernel.org \
    --cc=steven.price@arm.com \
    --cc=suzuki.poulose@arm.com \
    --cc=vincent.guittot@linaro.org \
    --cc=vincenzo.frascino@arm.com \
    --cc=vschneid@redhat.com \
    --cc=will@kernel.org \
    --cc=yuzenghui@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.