linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/4] arm64: MTE swap and hibernation support
@ 2020-04-22 14:25 Steven Price
  2020-04-22 14:25 ` [PATCH 1/4] mm: Add PG_ARCH_2 page flag Steven Price
                   ` (3 more replies)
  0 siblings, 4 replies; 12+ messages in thread
From: Steven Price @ 2020-04-22 14:25 UTC (permalink / raw)
  To: linux-arm-kernel, linux-mm
  Cc: Steven Price, linux-arch, Andrew Morton, Arnd Bergmann,
	Catalin Marinas, Hugh Dickins, Vincenzo Frascino, Will Deacon

This adds support for swapping and hibernation with MTE tagged memory.
It's based on Catalin's v3 series[1].

To support swap, a new page flag is added which tracks whether a page
has been mapped into user space with MTE enabled (and therefore may have
valid data in the tags). Arch hooks are added to enable the
saving/restoring of these tags (in memory) when the pages are swapped
out.

Hibernation builds on the swap support by simply copying the tags out of
hardware tag storage into normal memory before the hibernation image is
created. On restore the tags are returned to the hardware tag storage.

Feedback on the approach is welcomed.

[1] https://lore.kernel.org/linux-arm-kernel/20200421142603.3894-1-catalin.marinas@arm.com/

Steven Price (4):
  mm: Add PG_ARCH_2 page flag
  mm: Add arch hooks for saving/restoring tags
  arm64: mte: Enable swap of tagged pages
  arm64: mte: Save tags when hibernating

 arch/arm64/Kconfig                |   2 +-
 arch/arm64/include/asm/mte.h      |   6 ++
 arch/arm64/include/asm/pgtable.h  |  44 ++++++++++++
 arch/arm64/kernel/hibernate.c     | 116 ++++++++++++++++++++++++++++++
 arch/arm64/lib/mte.S              |  50 +++++++++++++
 arch/arm64/mm/Makefile            |   2 +-
 arch/arm64/mm/mteswap.c           |  98 +++++++++++++++++++++++++
 fs/proc/page.c                    |   3 +
 include/asm-generic/pgtable.h     |  23 ++++++
 include/linux/kernel-page-flags.h |   1 +
 include/linux/page-flags.h        |   3 +
 include/trace/events/mmflags.h    |   9 ++-
 mm/Kconfig                        |   3 +
 mm/page_io.c                      |   6 ++
 mm/shmem.c                        |   6 ++
 mm/swapfile.c                     |   2 +
 tools/vm/page-types.c             |   2 +
 17 files changed, 373 insertions(+), 3 deletions(-)
 create mode 100644 arch/arm64/mm/mteswap.c

-- 
2.20.1



^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH 1/4] mm: Add PG_ARCH_2 page flag
  2020-04-22 14:25 [PATCH 0/4] arm64: MTE swap and hibernation support Steven Price
@ 2020-04-22 14:25 ` Steven Price
  2020-04-22 14:25 ` [PATCH 2/4] mm: Add arch hooks for saving/restoring tags Steven Price
                   ` (2 subsequent siblings)
  3 siblings, 0 replies; 12+ messages in thread
From: Steven Price @ 2020-04-22 14:25 UTC (permalink / raw)
  To: linux-arm-kernel, linux-mm
  Cc: Steven Price, linux-arch, Andrew Morton, Arnd Bergmann,
	Catalin Marinas, Hugh Dickins, Vincenzo Frascino, Will Deacon

For arm64 MTE support it is necessary to be able to mark pages that
contain user space visible tags that will need to be saved/restored e.g.
when swapped out.

To support this add a new arch specific flag (PG_ARCH_2) that arch code
can opt into using ARCH_USES_PG_ARCH_2.

Signed-off-by: Steven Price <steven.price@arm.com>
---
 fs/proc/page.c                    | 3 +++
 include/linux/kernel-page-flags.h | 1 +
 include/linux/page-flags.h        | 3 +++
 include/trace/events/mmflags.h    | 9 ++++++++-
 mm/Kconfig                        | 3 +++
 tools/vm/page-types.c             | 2 ++
 6 files changed, 20 insertions(+), 1 deletion(-)

diff --git a/fs/proc/page.c b/fs/proc/page.c
index f909243d4a66..1b6cbe0849a8 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -217,6 +217,9 @@ u64 stable_page_flags(struct page *page)
 	u |= kpf_copy_bit(k, KPF_PRIVATE_2,	PG_private_2);
 	u |= kpf_copy_bit(k, KPF_OWNER_PRIVATE,	PG_owner_priv_1);
 	u |= kpf_copy_bit(k, KPF_ARCH,		PG_arch_1);
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
+	u |= kpf_copy_bit(k, KPF_ARCH_2,	PG_arch_2);
+#endif
 
 	return u;
 };
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index abd20ef93c98..eee1877a354e 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -17,5 +17,6 @@
 #define KPF_ARCH		38
 #define KPF_UNCACHED		39
 #define KPF_SOFTDIRTY		40
+#define KPF_ARCH_2		41
 
 #endif /* LINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 222f6f7b2bb3..1d4971fe4fee 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -135,6 +135,9 @@ enum pageflags {
 #if defined(CONFIG_IDLE_PAGE_TRACKING) && defined(CONFIG_64BIT)
 	PG_young,
 	PG_idle,
+#endif
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
+	PG_arch_2,
 #endif
 	__NR_PAGEFLAGS,
 
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 5fb752034386..5d098029a2d8 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -79,6 +79,12 @@
 #define IF_HAVE_PG_IDLE(flag,string)
 #endif
 
+#ifdef CONFIG_ARCH_USES_PG_ARCH_2
+#define IF_HAVE_PG_ARCH_2(flag,string) ,{1UL << flag, string}
+#else
+#define IF_HAVE_PG_ARCH_2(flag,string)
+#endif
+
 #define __def_pageflag_names						\
 	{1UL << PG_locked,		"locked"	},		\
 	{1UL << PG_waiters,		"waiters"	},		\
@@ -105,7 +111,8 @@ IF_HAVE_PG_MLOCK(PG_mlocked,		"mlocked"	)		\
 IF_HAVE_PG_UNCACHED(PG_uncached,	"uncached"	)		\
 IF_HAVE_PG_HWPOISON(PG_hwpoison,	"hwpoison"	)		\
 IF_HAVE_PG_IDLE(PG_young,		"young"		)		\
-IF_HAVE_PG_IDLE(PG_idle,		"idle"		)
+IF_HAVE_PG_IDLE(PG_idle,		"idle"		)		\
+IF_HAVE_PG_ARCH_2(PG_arch_2,		"arch_2"	)
 
 #define show_page_flags(flags)						\
 	(flags) ? __print_flags(flags, "|",				\
diff --git a/mm/Kconfig b/mm/Kconfig
index c1acc34c1c35..60427ccc3cb8 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -867,4 +867,7 @@ config ARCH_HAS_HUGEPD
 config MAPPING_DIRTY_HELPERS
         bool
 
+config ARCH_USES_PG_ARCH_2
+	bool
+
 endmenu
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index 58c0eab71bca..0517c744b04e 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -78,6 +78,7 @@
 #define KPF_ARCH		38
 #define KPF_UNCACHED		39
 #define KPF_SOFTDIRTY		40
+#define KPF_ARCH_2		41
 
 /* [48-] take some arbitrary free slots for expanding overloaded flags
  * not part of kernel API
@@ -135,6 +136,7 @@ static const char * const page_flag_names[] = {
 	[KPF_ARCH]		= "h:arch",
 	[KPF_UNCACHED]		= "c:uncached",
 	[KPF_SOFTDIRTY]		= "f:softdirty",
+	[KPF_ARCH_2]		= "H:arch_2",
 
 	[KPF_READAHEAD]		= "I:readahead",
 	[KPF_SLOB_FREE]		= "P:slob_free",
-- 
2.20.1



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 2/4] mm: Add arch hooks for saving/restoring tags
  2020-04-22 14:25 [PATCH 0/4] arm64: MTE swap and hibernation support Steven Price
  2020-04-22 14:25 ` [PATCH 1/4] mm: Add PG_ARCH_2 page flag Steven Price
@ 2020-04-22 14:25 ` Steven Price
  2020-04-22 18:08   ` Dave Hansen
  2020-04-22 14:25 ` [PATCH 3/4] arm64: mte: Enable swap of tagged pages Steven Price
  2020-04-22 14:25 ` [PATCH 4/4] arm64: mte: Save tags when hibernating Steven Price
  3 siblings, 1 reply; 12+ messages in thread
From: Steven Price @ 2020-04-22 14:25 UTC (permalink / raw)
  To: linux-arm-kernel, linux-mm
  Cc: Steven Price, linux-arch, Andrew Morton, Arnd Bergmann,
	Catalin Marinas, Hugh Dickins, Vincenzo Frascino, Will Deacon

Arm's Memory Tagging Extension (MTE) adds some metadata (taga) to
every physical page, when swapping pages out to disk it is necessary to
save these tags, and later restore them when reading the pages back.

Add some hooks along with dummy implementations to enable the
arch code to handle this.

Three new hooks are added to the swap code:
 * arch_prepare_to_swap() and
 * arch_swap_invalidate_page() / arch_swap_invalidate_area().
One new hook is added to shmem:
 * arch_swap_restore_tags()

Signed-off-by: Steven Price <steven.price@arm.com>
---
 include/asm-generic/pgtable.h | 23 +++++++++++++++++++++++
 mm/page_io.c                  |  6 ++++++
 mm/shmem.c                    |  6 ++++++
 mm/swapfile.c                 |  2 ++
 4 files changed, 37 insertions(+)

diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 329b8c8ca703..306cee75b9ec 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -475,6 +475,29 @@ static inline int arch_unmap_one(struct mm_struct *mm,
 }
 #endif
 
+#ifndef __HAVE_ARCH_PREPARE_TO_SWAP
+static inline int arch_prepare_to_swap(struct page *page)
+{
+	return 0;
+}
+#endif
+
+#ifndef __HAVE_ARCH_SWAP_INVALIDATE
+static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
+{
+}
+
+static inline void arch_swap_invalidate_area(int type)
+{
+}
+#endif
+
+#ifndef __HAVE_ARCH_SWAP_RESTORE_TAGS
+static inline void arch_swap_restore_tags(swp_entry_t entry, struct page *page)
+{
+}
+#endif
+
 #ifndef __HAVE_ARCH_PGD_OFFSET_GATE
 #define pgd_offset_gate(mm, addr)	pgd_offset(mm, addr)
 #endif
diff --git a/mm/page_io.c b/mm/page_io.c
index 76965be1d40e..7baee316ac99 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -253,6 +253,12 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
 		unlock_page(page);
 		goto out;
 	}
+	/* Arch code may have to preserve more data
+	 * than just the page contents, e.g. memory tags
+	 */
+	ret = arch_prepare_to_swap(page);
+	if (ret)
+		goto out;
 	if (frontswap_store(page) == 0) {
 		set_page_writeback(page);
 		unlock_page(page);
diff --git a/mm/shmem.c b/mm/shmem.c
index 73754ed7af69..1010b91f267e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1658,6 +1658,12 @@ static int shmem_swapin_page(struct inode *inode, pgoff_t index,
 	}
 	wait_on_page_writeback(page);
 
+	/*
+	 * Some architectures may have to restore extra metadata to the
+	 * physical page after reading from swap
+	 */
+	arch_swap_restore_tags(swap, page);
+
 	if (shmem_should_replace_page(page, gfp)) {
 		error = shmem_replace_page(&page, gfp, info, index);
 		if (error)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 5871a2aa86a5..b39c6520b0cf 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -722,6 +722,7 @@ static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
 	else
 		swap_slot_free_notify = NULL;
 	while (offset <= end) {
+		arch_swap_invalidate_page(si->type, offset);
 		frontswap_invalidate_page(si->type, offset);
 		if (swap_slot_free_notify)
 			swap_slot_free_notify(si->bdev, offset);
@@ -2645,6 +2646,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
 	frontswap_map = frontswap_map_get(p);
 	spin_unlock(&p->lock);
 	spin_unlock(&swap_lock);
+	arch_swap_invalidate_area(p->type);
 	frontswap_invalidate_area(p->type);
 	frontswap_map_set(p, NULL);
 	mutex_unlock(&swapon_mutex);
-- 
2.20.1



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 3/4] arm64: mte: Enable swap of tagged pages
  2020-04-22 14:25 [PATCH 0/4] arm64: MTE swap and hibernation support Steven Price
  2020-04-22 14:25 ` [PATCH 1/4] mm: Add PG_ARCH_2 page flag Steven Price
  2020-04-22 14:25 ` [PATCH 2/4] mm: Add arch hooks for saving/restoring tags Steven Price
@ 2020-04-22 14:25 ` Steven Price
  2020-04-22 18:34   ` Dave Hansen
  2020-05-03 15:29   ` Catalin Marinas
  2020-04-22 14:25 ` [PATCH 4/4] arm64: mte: Save tags when hibernating Steven Price
  3 siblings, 2 replies; 12+ messages in thread
From: Steven Price @ 2020-04-22 14:25 UTC (permalink / raw)
  To: linux-arm-kernel, linux-mm
  Cc: Steven Price, linux-arch, Andrew Morton, Arnd Bergmann,
	Catalin Marinas, Hugh Dickins, Vincenzo Frascino, Will Deacon

When swapping pages out to disk it is necessary to save any tags that
have been set, and restore when swapping back in. To do this pages that
mapped so user space can access tags are marked with a new page flag
(PG_ARCH_2, locally named PG_mte_tagged). When swapping out these pages
the tags are stored in memory and later restored when the pages are
brought back in. Because shmem can swap pages back in without restoring
the userspace PTE it is also necessary to add a hook for shmem.

Signed-off-by: Steven Price <steven.price@arm.com>
---
 arch/arm64/Kconfig               |  2 +-
 arch/arm64/include/asm/mte.h     |  6 ++
 arch/arm64/include/asm/pgtable.h | 44 ++++++++++++++
 arch/arm64/lib/mte.S             | 50 ++++++++++++++++
 arch/arm64/mm/Makefile           |  2 +-
 arch/arm64/mm/mteswap.c          | 98 ++++++++++++++++++++++++++++++++
 6 files changed, 200 insertions(+), 2 deletions(-)
 create mode 100644 arch/arm64/mm/mteswap.c

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index af2e6e5dae1b..697d5c6b1d53 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1615,7 +1615,7 @@ config ARM64_MTE
 	bool "Memory Tagging Extension support"
 	depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI
 	select ARCH_USES_HIGH_VMA_FLAGS
-	select ARCH_NO_SWAP
+	select ARCH_USES_PG_ARCH_2
 	help
 	  Memory Tagging (part of the ARMv8.5 Extensions) provides
 	  architectural support for run-time, always-on detection of
diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
index 0ca2aaff07a1..28bb32b270ee 100644
--- a/arch/arm64/include/asm/mte.h
+++ b/arch/arm64/include/asm/mte.h
@@ -17,6 +17,10 @@ unsigned long mte_copy_tags_from_user(void *to, const void __user *from,
 				      unsigned long n);
 unsigned long mte_copy_tags_to_user(void __user *to, void *from,
 				    unsigned long n);
+unsigned long mte_save_page_tags(const void *page_addr, void *tag_storage);
+void mte_restore_page_tags(void *page_addr, const void *tag_storage);
+
+#define PG_mte_tagged PG_arch_2
 
 #ifdef CONFIG_ARM64_MTE
 void flush_mte_state(void);
@@ -26,6 +30,8 @@ long set_mte_ctrl(unsigned long arg);
 long get_mte_ctrl(void);
 int mte_ptrace_copy_tags(struct task_struct *child, long request,
 			 unsigned long addr, unsigned long data);
+void *mte_allocate_tag_storage(void);
+void mte_free_tag_storage(char *storage);
 #else
 static inline void flush_mte_state(void)
 {
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 39a372bf8afc..a4ad1b75a1a7 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -80,6 +80,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 #define pte_user_exec(pte)	(!(pte_val(pte) & PTE_UXN))
 #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
 #define pte_devmap(pte)		(!!(pte_val(pte) & PTE_DEVMAP))
+#define pte_tagged(pte)		(!!((pte_val(pte) & PTE_ATTRINDX_MASK) == \
+				    PTE_ATTRINDX(MT_NORMAL_TAGGED)))
 
 #define pte_cont_addr_end(addr, end)						\
 ({	unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK;	\
@@ -268,12 +270,17 @@ static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
 		     __func__, pte_val(old_pte), pte_val(pte));
 }
 
+void mte_sync_tags(pte_t *ptep, pte_t pte);
+
 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
 			      pte_t *ptep, pte_t pte)
 {
 	if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
 		__sync_icache_dcache(pte);
 
+	if (system_supports_mte() && pte_tagged(pte))
+		mte_sync_tags(ptep, pte);
+
 	__check_racy_pte_update(mm, ptep, pte);
 
 	set_pte(ptep, pte);
@@ -845,6 +852,43 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
 
 extern int kern_addr_valid(unsigned long addr);
 
+#ifdef CONFIG_ARM64_MTE
+
+#define __HAVE_ARCH_PREPARE_TO_SWAP
+int mte_save_tags(struct page *page);
+static inline int arch_prepare_to_swap(struct page *page)
+{
+	if (system_supports_mte())
+		return mte_save_tags(page);
+	return 0;
+}
+
+#define __HAVE_ARCH_SWAP_INVALIDATE
+void mte_invalidate_tags(int type, pgoff_t offset);
+void mte_invalidate_tags_area(int type);
+
+static inline void arch_swap_invalidate_page(int type, pgoff_t offset)
+{
+	if (system_supports_mte())
+		mte_invalidate_tags(type, offset);
+}
+
+static inline void arch_swap_invalidate_area(int type)
+{
+	if (system_supports_mte())
+		mte_invalidate_tags_area(type);
+}
+
+#define __HAVE_ARCH_SWAP_RESTORE_TAGS
+void mte_restore_tags(swp_entry_t entry, struct page *page);
+static inline void arch_swap_restore_tags(swp_entry_t entry, struct page *page)
+{
+	if (system_supports_mte())
+		mte_restore_tags(entry, page);
+}
+
+#endif /* CONFIG_ARM64_MTE */
+
 #include <asm-generic/pgtable.h>
 
 /*
diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S
index 45be04a8c73c..df8800dfe891 100644
--- a/arch/arm64/lib/mte.S
+++ b/arch/arm64/lib/mte.S
@@ -94,3 +94,53 @@ USER(2f, sttrb	w4, [x0])
 2:	sub	x0, x0, x3		// update the number of tags copied
 	ret
 SYM_FUNC_END(mte_copy_tags_from_user)
+
+/*
+ * Save the tags in a page
+ *   x0 - page address
+ *   x1 - tag storage
+ *
+ * Returns 0 if all tags are 0, otherwise non-zero
+ */
+SYM_FUNC_START(mte_save_page_tags)
+	multitag_transfer_size x7, x5
+	mov	x3, #0
+1:
+	mov	x2, #0
+2:
+	ldgm	x5, [x0]
+	orr	x2, x2, x5
+	add	x0, x0, x7
+	tst	x0, #0xFF		// 16 tag values fit in a register,
+	b.ne	2b			// which is 16*4=256 bytes
+
+	str	x2, [x1], #8
+
+	orr	x3, x3, x2		// OR together all the tag values
+	tst	x0, #(PAGE_SIZE - 1)
+	b.ne	1b
+
+	mov	x0, x3
+	ret
+SYM_FUNC_END(mte_save_page_tags)
+
+/*
+ * Restore the tags in a page
+ *   x0 - page address
+ *   x1 - tag storage
+ */
+SYM_FUNC_START(mte_restore_page_tags)
+	multitag_transfer_size x7, x5
+1:
+	ldr	x2, [x1], #8
+2:
+	stgm	x2, [x0]
+	add	x0, x0, x7
+	tst	x0, #0xFF
+	b.ne	2b
+
+	tst	x0, #(PAGE_SIZE - 1)
+	b.ne	1b
+
+	ret
+SYM_FUNC_END(mte_restore_page_tags)
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile
index e93d696295d0..cd7cb19fc224 100644
--- a/arch/arm64/mm/Makefile
+++ b/arch/arm64/mm/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_PTDUMP_CORE)	+= dump.o
 obj-$(CONFIG_PTDUMP_DEBUGFS)	+= ptdump_debugfs.o
 obj-$(CONFIG_NUMA)		+= numa.o
 obj-$(CONFIG_DEBUG_VIRTUAL)	+= physaddr.o
-obj-$(CONFIG_ARM64_MTE)		+= cmppages.o
+obj-$(CONFIG_ARM64_MTE)		+= cmppages.o mteswap.o
 KASAN_SANITIZE_physaddr.o	+= n
 
 obj-$(CONFIG_KASAN)		+= kasan_init.o
diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c
new file mode 100644
index 000000000000..3f8ab5a6d33b
--- /dev/null
+++ b/arch/arm64/mm/mteswap.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/pagemap.h>
+#include <linux/xarray.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
+#include <asm/mte.h>
+
+static DEFINE_XARRAY(mte_pages);
+
+void *mte_allocate_tag_storage(void)
+{
+	/* tags granule is 16 bytes, 2 tags stored per byte */
+	return kmalloc(PAGE_SIZE / 16 / 2, GFP_KERNEL);
+}
+
+void mte_free_tag_storage(char *storage)
+{
+	kfree(storage);
+}
+
+int mte_save_tags(struct page *page)
+{
+	void *tag_storage, *ret;
+
+	if (!test_bit(PG_mte_tagged, &page->flags))
+		return 0;
+
+	tag_storage = mte_allocate_tag_storage();
+	if (!tag_storage)
+		return -ENOMEM;
+
+	mte_save_page_tags(page_address(page), tag_storage);
+
+	ret = xa_store(&mte_pages, page_private(page), tag_storage, GFP_KERNEL);
+	if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
+		mte_free_tag_storage(tag_storage);
+		return xa_err(ret);
+	} else if (ret) {
+		mte_free_tag_storage(ret);
+	}
+
+	return 0;
+}
+
+void mte_restore_tags(swp_entry_t entry, struct page *page)
+{
+	void *tags = xa_load(&mte_pages, entry.val);
+
+	if (!tags)
+		return;
+
+	mte_restore_page_tags(page_address(page), tags);
+
+	set_bit(PG_mte_tagged, &page->flags);
+}
+
+void mte_invalidate_tags(int type, pgoff_t offset)
+{
+	swp_entry_t entry = swp_entry(type, offset);
+	void *tags = xa_erase(&mte_pages, entry.val);
+
+	mte_free_tag_storage(tags);
+}
+
+void mte_invalidate_tags_area(int type)
+{
+	swp_entry_t entry = swp_entry(type, 0);
+	swp_entry_t last_entry = swp_entry(type + 1, 0);
+	void *tags;
+
+	XA_STATE(xa_state, &mte_pages, entry.val);
+
+	xa_lock(&mte_pages);
+	xas_for_each(&xa_state, tags, last_entry.val - 1) {
+		__xa_erase(&mte_pages, xa_state.xa_index);
+		mte_free_tag_storage(tags);
+	}
+	xa_unlock(&mte_pages);
+}
+
+void mte_sync_tags(pte_t *ptep, pte_t pte)
+{
+	struct page *page = pte_page(pte);
+	pte_t old_pte = READ_ONCE(*ptep);
+	swp_entry_t entry;
+
+	set_bit(PG_mte_tagged, &page->flags);
+
+	if (!is_swap_pte(old_pte))
+		return;
+
+	entry = pte_to_swp_entry(old_pte);
+	if (non_swap_entry(entry))
+		return;
+
+	mte_restore_tags(entry, page);
+}
-- 
2.20.1



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH 4/4] arm64: mte: Save tags when hibernating
  2020-04-22 14:25 [PATCH 0/4] arm64: MTE swap and hibernation support Steven Price
                   ` (2 preceding siblings ...)
  2020-04-22 14:25 ` [PATCH 3/4] arm64: mte: Enable swap of tagged pages Steven Price
@ 2020-04-22 14:25 ` Steven Price
  3 siblings, 0 replies; 12+ messages in thread
From: Steven Price @ 2020-04-22 14:25 UTC (permalink / raw)
  To: linux-arm-kernel, linux-mm
  Cc: Steven Price, linux-arch, Andrew Morton, Arnd Bergmann,
	Catalin Marinas, Hugh Dickins, Vincenzo Frascino, Will Deacon

When hibernating the contents of all pages in the system are written to
disk, however the MTE tags are not visible to the generic hibernation
code. So just before the hibernation image is created copy the tags out
of the physical tag storage into standard memory so they will be
included in the hibernation image. After hibernation apply the tags back
into the physical tag storage.

Signed-off-by: Steven Price <steven.price@arm.com>
---
 arch/arm64/kernel/hibernate.c | 116 ++++++++++++++++++++++++++++++++++
 1 file changed, 116 insertions(+)

diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c
index 5b73e92c99e3..b89429778b5d 100644
--- a/arch/arm64/kernel/hibernate.c
+++ b/arch/arm64/kernel/hibernate.c
@@ -31,6 +31,7 @@
 #include <asm/kexec.h>
 #include <asm/memory.h>
 #include <asm/mmu_context.h>
+#include <asm/mte.h>
 #include <asm/pgalloc.h>
 #include <asm/pgtable.h>
 #include <asm/pgtable-hwdef.h>
@@ -277,6 +278,115 @@ static int create_safe_exec_page(void *src_start, size_t length,
 
 #define dcache_clean_range(start, end)	__flush_dcache_area(start, (end - start))
 
+#ifdef CONFIG_ARM64_MTE
+
+static DEFINE_XARRAY(mte_pages);
+
+static int save_tags(struct page *page, unsigned long pfn)
+{
+	void *tag_storage, *ret;
+
+	tag_storage = mte_allocate_tag_storage();
+	if (!tag_storage)
+		return -ENOMEM;
+
+	mte_save_page_tags(page_address(page), tag_storage);
+
+	ret = xa_store(&mte_pages, pfn, tag_storage, GFP_KERNEL);
+	if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
+		mte_free_tag_storage(tag_storage);
+		return xa_err(ret);
+	} else if (WARN(ret, "swsusp: %s: Duplicate entry", __func__)) {
+		mte_free_tag_storage(ret);
+	}
+
+	return 0;
+}
+
+static void swsusp_mte_free_storage(void)
+{
+	XA_STATE(xa_state, &mte_pages, 0);
+	void *tags;
+
+	xa_lock(&mte_pages);
+	xas_for_each(&xa_state, tags, ULONG_MAX) {
+		mte_free_tag_storage(tags);
+	}
+	xa_unlock(&mte_pages);
+
+	xa_destroy(&mte_pages);
+}
+
+static int swsusp_mte_save_tags(void)
+{
+	struct zone *zone;
+	unsigned long pfn, max_zone_pfn;
+	int ret = 0;
+	int n = 0;
+
+	if (!system_supports_mte())
+		return 0;
+
+	for_each_populated_zone(zone) {
+		max_zone_pfn = zone_end_pfn(zone);
+		for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) {
+			struct page *page = pfn_to_online_page(pfn);
+
+			if (!page)
+				continue;
+
+			if (!test_bit(PG_mte_tagged, &page->flags))
+				continue;
+
+			ret = save_tags(page, pfn);
+			n++;
+
+			if (ret) {
+				swsusp_mte_free_storage();
+				goto out;
+			}
+		}
+	}
+
+	pr_info("Saved %d MTE pages\n", n);
+
+out:
+	return ret;
+}
+
+static void swsusp_mte_restore_tags(void)
+{
+	XA_STATE(xa_state, &mte_pages, 0);
+	int n = 0;
+	void *tags;
+
+	xa_lock(&mte_pages);
+	xas_for_each(&xa_state, tags, ULONG_MAX) {
+		unsigned long pfn = xa_state.xa_index;
+		struct page *page = pfn_to_online_page(pfn);
+
+		mte_restore_page_tags(page_address(page), tags);
+
+		mte_free_tag_storage(tags);
+		n++;
+	}
+	xa_unlock(&mte_pages);
+
+	pr_info("Restored %d MTE pages\n", n);
+
+	xa_destroy(&mte_pages);
+}
+#else
+static int swsusp_mte_save_tags(void)
+{
+	return 0;
+}
+
+static void swsusp_mte_restore_tags(void)
+{
+}
+#endif
+
 int swsusp_arch_suspend(void)
 {
 	int ret = 0;
@@ -294,6 +404,10 @@ int swsusp_arch_suspend(void)
 		/* make the crash dump kernel image visible/saveable */
 		crash_prepare_suspend();
 
+		ret = swsusp_mte_save_tags();
+		if (ret)
+			return ret;
+
 		sleep_cpu = smp_processor_id();
 		ret = swsusp_save();
 	} else {
@@ -307,6 +421,8 @@ int swsusp_arch_suspend(void)
 			dcache_clean_range(__hyp_text_start, __hyp_text_end);
 		}
 
+		swsusp_mte_restore_tags();
+
 		/* make the crash dump kernel image protected again */
 		crash_post_resume();
 
-- 
2.20.1



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/4] mm: Add arch hooks for saving/restoring tags
  2020-04-22 14:25 ` [PATCH 2/4] mm: Add arch hooks for saving/restoring tags Steven Price
@ 2020-04-22 18:08   ` Dave Hansen
  2020-04-23  9:09     ` Catalin Marinas
  0 siblings, 1 reply; 12+ messages in thread
From: Dave Hansen @ 2020-04-22 18:08 UTC (permalink / raw)
  To: Steven Price, linux-arm-kernel, linux-mm
  Cc: linux-arch, Andrew Morton, Arnd Bergmann, Catalin Marinas,
	Hugh Dickins, Vincenzo Frascino, Will Deacon

On 4/22/20 7:25 AM, Steven Price wrote:
> Three new hooks are added to the swap code:
>  * arch_prepare_to_swap() and
>  * arch_swap_invalidate_page() / arch_swap_invalidate_area().
> One new hook is added to shmem:
>  * arch_swap_restore_tags()

How do the tags get restored outside of the shmem path?  I was expecting
to see more arch_swap_restore_tags() sites.


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 3/4] arm64: mte: Enable swap of tagged pages
  2020-04-22 14:25 ` [PATCH 3/4] arm64: mte: Enable swap of tagged pages Steven Price
@ 2020-04-22 18:34   ` Dave Hansen
  2020-04-23 13:51     ` Steven Price
  2020-05-03 15:29   ` Catalin Marinas
  1 sibling, 1 reply; 12+ messages in thread
From: Dave Hansen @ 2020-04-22 18:34 UTC (permalink / raw)
  To: Steven Price, linux-arm-kernel, linux-mm
  Cc: linux-arch, Andrew Morton, Arnd Bergmann, Catalin Marinas,
	Hugh Dickins, Vincenzo Frascino, Will Deacon

On 4/22/20 7:25 AM, Steven Price wrote:
> Because shmem can swap pages back in without restoring
> the userspace PTE it is also necessary to add a hook for shmem.

I think the swap readahead code does this as well.  It pulls the page
into the swap cache, but doesn't map it in.

...
> +static DEFINE_XARRAY(mte_pages);
> +
> +void *mte_allocate_tag_storage(void)
> +{
> +	/* tags granule is 16 bytes, 2 tags stored per byte */
> +	return kmalloc(PAGE_SIZE / 16 / 2, GFP_KERNEL);
> +}

Yikes, so this eats 2k of unmovable kernel memory per 64k of swap?  This
is *probably* worth having its own slab just so the memory that's used
for it is less opaque.  It could be pretty large.  But, I guess if
you're worried about how much kernel memory this can eat, there's always
the swap cgroup controller to enforce limits.

This also *increases* the footprint of a page while it's in the swap
cache.  That's at least temporarily a _bit_ counterproductive.

I guess there aren't any nice alternatives, though.  I would imagine
that it would be substantially more complicated to rig the swap code up
to write the tag along with the data.  Or, to store the tag data
somewhere *it* can be reclaimed, like in a kernel-internal shmem file or
something.

> +void mte_free_tag_storage(char *storage)
> +{
> +	kfree(storage);
> +}
> +
> +int mte_save_tags(struct page *page)
> +{
> +	void *tag_storage, *ret;
> +
> +	if (!test_bit(PG_mte_tagged, &page->flags))
> +		return 0;
> +
> +	tag_storage = mte_allocate_tag_storage();
> +	if (!tag_storage)
> +		return -ENOMEM;
> +
> +	mte_save_page_tags(page_address(page), tag_storage);
> +
> +	ret = xa_store(&mte_pages, page_private(page), tag_storage, GFP_KERNEL);

This is indexing into the xarray with the swap entry.val established in
do_swap_page()?  Might be nice to make a note where it came from.

> +	if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
> +		mte_free_tag_storage(tag_storage);
> +		return xa_err(ret);
> +	} else if (ret) {
> +		mte_free_tag_storage(ret);

Is there a missing "return ret;" here?  Otherwise, it seems like this
might silently fail to save the page's tags.  I'm not sure what
non-xa_is_err() codes get returned, but if there is one, it could end up
here.

> +	}
> +
> +	return 0;
> +}
...

> +void mte_sync_tags(pte_t *ptep, pte_t pte)
> +{
> +	struct page *page = pte_page(pte);
> +	pte_t old_pte = READ_ONCE(*ptep);
> +	swp_entry_t entry;
> +
> +	set_bit(PG_mte_tagged, &page->flags);
> +
> +	if (!is_swap_pte(old_pte))
> +		return;
> +
> +	entry = pte_to_swp_entry(old_pte);
> +	if (non_swap_entry(entry))
> +		return;
> +
> +	mte_restore_tags(entry, page);
> +}

Oh, here it is!  This gets called when replacing a swap PTE with a
present PTE and restores the tags on swap-in.

Does this work for swap PTEs which were copied at fork()?  I *think*
those might end up in here twice, once for the parent and another for
the child.  If both read the page, both will fault and both will do a
set_pte() and end up in here.  I don't think it will do any harm since
it will just set_bit(PG_mte_tagged) twice and restore the same tags
twice.  But, it might be nice to call that out.

This function is a bit light on comments.  It might make sense, for
instance to note that 'pte' is always a tagged PTE at this point.


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/4] mm: Add arch hooks for saving/restoring tags
  2020-04-22 18:08   ` Dave Hansen
@ 2020-04-23  9:09     ` Catalin Marinas
  2020-04-23 12:37       ` Steven Price
  0 siblings, 1 reply; 12+ messages in thread
From: Catalin Marinas @ 2020-04-23  9:09 UTC (permalink / raw)
  To: Dave Hansen
  Cc: Steven Price, linux-arm-kernel, linux-mm, linux-arch,
	Andrew Morton, Arnd Bergmann, Hugh Dickins, Vincenzo Frascino,
	Will Deacon

On Wed, Apr 22, 2020 at 11:08:10AM -0700, Dave Hansen wrote:
> On 4/22/20 7:25 AM, Steven Price wrote:
> > Three new hooks are added to the swap code:
> >  * arch_prepare_to_swap() and
> >  * arch_swap_invalidate_page() / arch_swap_invalidate_area().
> > One new hook is added to shmem:
> >  * arch_swap_restore_tags()
> 
> How do the tags get restored outside of the shmem path?  I was expecting
> to see more arch_swap_restore_tags() sites.

The restoring is done via set_pte_at() -> mte_sync_tags() ->
mte_restore_tags() in the arch code (see patch 3).
arch_swap_restore_tags() just calls mte_restore_tags() directly.

shmem is slightly problematic as it moves the page from the swap cache
to the shmem one and I think arch_swap_invalidate_page() would have
already been called by the time we get to set_pte_at() (Steven can
correct me if I got this wrong).

-- 
Catalin


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 2/4] mm: Add arch hooks for saving/restoring tags
  2020-04-23  9:09     ` Catalin Marinas
@ 2020-04-23 12:37       ` Steven Price
  0 siblings, 0 replies; 12+ messages in thread
From: Steven Price @ 2020-04-23 12:37 UTC (permalink / raw)
  To: Catalin Marinas, Dave Hansen
  Cc: linux-arm-kernel, linux-mm, linux-arch, Andrew Morton,
	Arnd Bergmann, Hugh Dickins, Vincenzo Frascino, Will Deacon

On 23/04/2020 10:09, Catalin Marinas wrote:
> On Wed, Apr 22, 2020 at 11:08:10AM -0700, Dave Hansen wrote:
>> On 4/22/20 7:25 AM, Steven Price wrote:
>>> Three new hooks are added to the swap code:
>>>   * arch_prepare_to_swap() and
>>>   * arch_swap_invalidate_page() / arch_swap_invalidate_area().
>>> One new hook is added to shmem:
>>>   * arch_swap_restore_tags()
>>
>> How do the tags get restored outside of the shmem path?  I was expecting
>> to see more arch_swap_restore_tags() sites.
> 
> The restoring is done via set_pte_at() -> mte_sync_tags() ->
> mte_restore_tags() in the arch code (see patch 3).
> arch_swap_restore_tags() just calls mte_restore_tags() directly.
> 
> shmem is slightly problematic as it moves the page from the swap cache
> to the shmem one and I think arch_swap_invalidate_page() would have
> already been called by the time we get to set_pte_at() (Steven can
> correct me if I got this wrong).

That's correct - shmem can pull in pages (into it's own cache) and 
invalidate the swap entries without any process having a PTE restored. 
So we need to hook shmem to restore the tags even though there's no PTE 
restored yet.

The set_pte_at() 'trick' enables delaying the restoring of the tags (in 
the usual case) until the I/O for the page has completed, which might be 
necessary in some cases if the I/O can clobber the tags in memory. I 
couldn't find a better way of hooking this.

Steve


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 3/4] arm64: mte: Enable swap of tagged pages
  2020-04-22 18:34   ` Dave Hansen
@ 2020-04-23 13:51     ` Steven Price
  0 siblings, 0 replies; 12+ messages in thread
From: Steven Price @ 2020-04-23 13:51 UTC (permalink / raw)
  To: Dave Hansen, linux-arm-kernel, linux-mm
  Cc: linux-arch, Andrew Morton, Arnd Bergmann, Catalin Marinas,
	Hugh Dickins, Vincenzo Frascino, Will Deacon

On 22/04/2020 19:34, Dave Hansen wrote:
> On 4/22/20 7:25 AM, Steven Price wrote:
>> Because shmem can swap pages back in without restoring
>> the userspace PTE it is also necessary to add a hook for shmem.
> 
> I think the swap readahead code does this as well.  It pulls the page
> into the swap cache, but doesn't map it in.

The swap cache isn't a problem because the swap entry is still valid - 
so the parallel tag cache is still associated with the page. shmem has 
it's own cache so the tag data has to be transferred between the caches 
(in this case stored back in the physical tag memory).

> ...
>> +static DEFINE_XARRAY(mte_pages);
>> +
>> +void *mte_allocate_tag_storage(void)
>> +{
>> +	/* tags granule is 16 bytes, 2 tags stored per byte */
>> +	return kmalloc(PAGE_SIZE / 16 / 2, GFP_KERNEL);
>> +}
> 
> Yikes, so this eats 2k of unmovable kernel memory per 64k of swap?  This
> is *probably* worth having its own slab just so the memory that's used
> for it is less opaque.  It could be pretty large.  But, I guess if
> you're worried about how much kernel memory this can eat, there's always
> the swap cgroup controller to enforce limits.

Yes, this is probably something that will need tuning in the future. At 
the moment we don't have much of an idea how much memory will have tags. 
It's 'only' 2k per 64k of memory which has been mapped with PROT_MTE. 
Obvious avenues for potential improvement are:

  * A dedicated slab (as you suggest)
  * Enabling swapping of the tags themselves
  * Compressing the tags (there might be large runs of duplicated tag 
values)

> This also *increases* the footprint of a page while it's in the swap
> cache.  That's at least temporarily a _bit_ counterproductive.

Indeed. It *may* be possible to store the tags back in the physical tag 
memory while the page is in the swap cache, but this would require 
hooking into the path where the swap cache is freed without write-back 
of the page data.

> I guess there aren't any nice alternatives, though.  I would imagine
> that it would be substantially more complicated to rig the swap code up
> to write the tag along with the data.  Or, to store the tag data
> somewhere *it* can be reclaimed, like in a kernel-internal shmem file or
> something.

Yes, I'm attempting a simple (hopefully 'obviously right') 
implementation. When we actually have some real applications making use 
of this then we can look at optimisations - that will also allow 
meaningful benchmarking of the optimisations.

>> +void mte_free_tag_storage(char *storage)
>> +{
>> +	kfree(storage);
>> +}
>> +
>> +int mte_save_tags(struct page *page)
>> +{
>> +	void *tag_storage, *ret;
>> +
>> +	if (!test_bit(PG_mte_tagged, &page->flags))
>> +		return 0;
>> +
>> +	tag_storage = mte_allocate_tag_storage();
>> +	if (!tag_storage)
>> +		return -ENOMEM;
>> +
>> +	mte_save_page_tags(page_address(page), tag_storage);
>> +
>> +	ret = xa_store(&mte_pages, page_private(page), tag_storage, GFP_KERNEL);
> 
> This is indexing into the xarray with the swap entry.val established in
> do_swap_page()?  Might be nice to make a note where it came from.

Good point, I'll put a comment in

>> +	if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
>> +		mte_free_tag_storage(tag_storage);
>> +		return xa_err(ret);
>> +	} else if (ret) {
>> +		mte_free_tag_storage(ret);
> 
> Is there a missing "return ret;" here?  Otherwise, it seems like this
> might silently fail to save the page's tags.  I'm not sure what
> non-xa_is_err() codes get returned, but if there is one, it could end up
> here.

No, perhaps a comment is needed! xa_store() will return the value that 
used to be at that index (or NULL if there wasn't an entry). So if the 
swap index is being reused ret != NULL (and !xa_is_err(ret)) and in this 
case we need to free the old storage to prevent a memory leak. So in 
this path the tags have been successfully saved.

>> +	}
>> +
>> +	return 0;
>> +}
> ...
> 
>> +void mte_sync_tags(pte_t *ptep, pte_t pte)
>> +{
>> +	struct page *page = pte_page(pte);
>> +	pte_t old_pte = READ_ONCE(*ptep);
>> +	swp_entry_t entry;
>> +
>> +	set_bit(PG_mte_tagged, &page->flags);
>> +
>> +	if (!is_swap_pte(old_pte))
>> +		return;
>> +
>> +	entry = pte_to_swp_entry(old_pte);
>> +	if (non_swap_entry(entry))
>> +		return;
>> +
>> +	mte_restore_tags(entry, page);
>> +}
> 
> Oh, here it is!  This gets called when replacing a swap PTE with a
> present PTE and restores the tags on swap-in.
> 
> Does this work for swap PTEs which were copied at fork()?  I *think*
> those might end up in here twice, once for the parent and another for
> the child.  If both read the page, both will fault and both will do a
> set_pte() and end up in here.  I don't think it will do any harm since
> it will just set_bit(PG_mte_tagged) twice and restore the same tags
> twice.  But, it might be nice to call that out.

Yes, that's what should happen. Beyond adding another page flag (to 
track whether the tags have been restored or not) I couldn't think of a 
neat way of preventing this. And I believe it should be harmless 
restoring it twice.

> This function is a bit light on comments.  It might make sense, for
> instance to note that 'pte' is always a tagged PTE at this point.

Yes, I'll add some more comments here too.

Thanks for the review!

Steve


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 3/4] arm64: mte: Enable swap of tagged pages
  2020-04-22 14:25 ` [PATCH 3/4] arm64: mte: Enable swap of tagged pages Steven Price
  2020-04-22 18:34   ` Dave Hansen
@ 2020-05-03 15:29   ` Catalin Marinas
  2020-05-04 12:53     ` Steven Price
  1 sibling, 1 reply; 12+ messages in thread
From: Catalin Marinas @ 2020-05-03 15:29 UTC (permalink / raw)
  To: Steven Price
  Cc: linux-arm-kernel, linux-mm, linux-arch, Andrew Morton,
	Arnd Bergmann, Hugh Dickins, Vincenzo Frascino, Will Deacon

On Wed, Apr 22, 2020 at 03:25:29PM +0100, Steven Price wrote:
> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
> index 39a372bf8afc..a4ad1b75a1a7 100644
> --- a/arch/arm64/include/asm/pgtable.h
> +++ b/arch/arm64/include/asm/pgtable.h
> @@ -80,6 +80,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
>  #define pte_user_exec(pte)	(!(pte_val(pte) & PTE_UXN))
>  #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
>  #define pte_devmap(pte)		(!!(pte_val(pte) & PTE_DEVMAP))
> +#define pte_tagged(pte)		(!!((pte_val(pte) & PTE_ATTRINDX_MASK) == \
> +				    PTE_ATTRINDX(MT_NORMAL_TAGGED)))
>  
>  #define pte_cont_addr_end(addr, end)						\
>  ({	unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK;	\
> @@ -268,12 +270,17 @@ static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
>  		     __func__, pte_val(old_pte), pte_val(pte));
>  }
>  
> +void mte_sync_tags(pte_t *ptep, pte_t pte);
> +
>  static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
>  			      pte_t *ptep, pte_t pte)
>  {
>  	if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
>  		__sync_icache_dcache(pte);
>  
> +	if (system_supports_mte() && pte_tagged(pte))
> +		mte_sync_tags(ptep, pte);

I think this needs a pte_present() check as well, otherwise pte_tagged()
could match some random swap entry.

-- 
Catalin


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH 3/4] arm64: mte: Enable swap of tagged pages
  2020-05-03 15:29   ` Catalin Marinas
@ 2020-05-04 12:53     ` Steven Price
  0 siblings, 0 replies; 12+ messages in thread
From: Steven Price @ 2020-05-04 12:53 UTC (permalink / raw)
  To: Catalin Marinas
  Cc: linux-arm-kernel, linux-mm, linux-arch, Andrew Morton,
	Arnd Bergmann, Hugh Dickins, Vincenzo Frascino, Will Deacon

On 03/05/2020 16:29, Catalin Marinas wrote:
> On Wed, Apr 22, 2020 at 03:25:29PM +0100, Steven Price wrote:
>> diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
>> index 39a372bf8afc..a4ad1b75a1a7 100644
>> --- a/arch/arm64/include/asm/pgtable.h
>> +++ b/arch/arm64/include/asm/pgtable.h
>> @@ -80,6 +80,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
>>   #define pte_user_exec(pte)	(!(pte_val(pte) & PTE_UXN))
>>   #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
>>   #define pte_devmap(pte)		(!!(pte_val(pte) & PTE_DEVMAP))
>> +#define pte_tagged(pte)		(!!((pte_val(pte) & PTE_ATTRINDX_MASK) == \
>> +				    PTE_ATTRINDX(MT_NORMAL_TAGGED)))
>>   
>>   #define pte_cont_addr_end(addr, end)						\
>>   ({	unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK;	\
>> @@ -268,12 +270,17 @@ static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep,
>>   		     __func__, pte_val(old_pte), pte_val(pte));
>>   }
>>   
>> +void mte_sync_tags(pte_t *ptep, pte_t pte);
>> +
>>   static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
>>   			      pte_t *ptep, pte_t pte)
>>   {
>>   	if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
>>   		__sync_icache_dcache(pte);
>>   
>> +	if (system_supports_mte() && pte_tagged(pte))
>> +		mte_sync_tags(ptep, pte);
> 
> I think this needs a pte_present() check as well, otherwise pte_tagged()
> could match some random swap entry.

Good spot - mte_sync_tags() bails out fairly early in this case (which 
explains why I didn't see any problems). But it's *after* PG_mte_tagged 
is set which will lead to incorrectly flagging pages.

Thanks,

Steve


^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2020-05-04 12:53 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-04-22 14:25 [PATCH 0/4] arm64: MTE swap and hibernation support Steven Price
2020-04-22 14:25 ` [PATCH 1/4] mm: Add PG_ARCH_2 page flag Steven Price
2020-04-22 14:25 ` [PATCH 2/4] mm: Add arch hooks for saving/restoring tags Steven Price
2020-04-22 18:08   ` Dave Hansen
2020-04-23  9:09     ` Catalin Marinas
2020-04-23 12:37       ` Steven Price
2020-04-22 14:25 ` [PATCH 3/4] arm64: mte: Enable swap of tagged pages Steven Price
2020-04-22 18:34   ` Dave Hansen
2020-04-23 13:51     ` Steven Price
2020-05-03 15:29   ` Catalin Marinas
2020-05-04 12:53     ` Steven Price
2020-04-22 14:25 ` [PATCH 4/4] arm64: mte: Save tags when hibernating Steven Price

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).