All of lore.kernel.org
 help / color / mirror / Atom feed
From: Liam Howlett <liam.howlett@oracle.com>
To: "maple-tree@lists.infradead.org" <maple-tree@lists.infradead.org>,
	"linux-mm@kvack.org" <linux-mm@kvack.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>
Cc: Song Liu <songliubraving@fb.com>,
	Davidlohr Bueso <dave@stgolabs.net>,
	"Paul E . McKenney" <paulmck@kernel.org>,
	Matthew Wilcox <willy@infradead.org>,
	Laurent Dufour <ldufour@linux.ibm.com>,
	David Rientjes <rientjes@google.com>,
	Axel Rasmussen <axelrasmussen@google.com>,
	Suren Baghdasaryan <surenb@google.com>,
	Vlastimil Babka <vbabka@suse.cz>, Rik van Riel <riel@surriel.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Michel Lespinasse <walken.cr@gmail.com>,
	Liam Howlett <liam.howlett@oracle.com>
Subject: [PATCH v2 17/61] mm/mmap: Use advanced maple tree API for mmap_region()
Date: Tue, 17 Aug 2021 15:47:13 +0000	[thread overview]
Message-ID: <20210817154651.1570984-18-Liam.Howlett@oracle.com> (raw)
In-Reply-To: <20210817154651.1570984-1-Liam.Howlett@oracle.com>

From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>

Changing mmap_region() to use the maple tree state and the advanced
maple tree interface allows for a lot less tree walking.

This change removes the last caller of munmap_vma_range(), so drop this
unused function.

Add vma_expand() to expand a VMA if possible by doing the necessary
hugepage check, uprobe_munmap of files, dcache flush, modifications then
undoing the detaches, etc.

Add vma_mas_link() helper to add a VMA to the linked list and maple tree
until the linked list is removed.

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
---
 mm/mmap.c | 258 +++++++++++++++++++++++++++++++++++++++++++-----------
 1 file changed, 206 insertions(+), 52 deletions(-)

diff --git a/mm/mmap.c b/mm/mmap.c
index 9b86f47f7387..060a372fc812 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -503,28 +503,6 @@ static inline struct vm_area_struct *vma_next(struct mm_struct *mm,
 	return vma->vm_next;
 }
 
-/*
- * munmap_vma_range() - munmap VMAs that overlap a range.
- * @mm: The mm struct
- * @start: The start of the range.
- * @len: The length of the range.
- * @pprev: pointer to the pointer that will be set to previous vm_area_struct
- *
- * Find all the vm_area_struct that overlap from @start to
- * @end and munmap them.  Set @pprev to the previous vm_area_struct.
- *
- * Returns: -ENOMEM on munmap failure or 0 on success.
- */
-static inline int
-munmap_vma_range(struct mm_struct *mm, unsigned long start, unsigned long len,
-		 struct vm_area_struct **pprev, struct list_head *uf)
-{
-	// Needs optimization.
-	while (range_has_overlap(mm, start, start + len, pprev))
-		if (do_munmap(mm, start, len, uf))
-			return -ENOMEM;
-	return 0;
-}
 static unsigned long count_vma_pages_range(struct mm_struct *mm,
 		unsigned long addr, unsigned long end)
 {
@@ -604,6 +582,35 @@ void vma_mt_store(struct mm_struct *mm, struct vm_area_struct *vma)
 		GFP_KERNEL);
 }
 
+/*
+ * vma_mas_link() - Link a VMA into an mm
+ * @mm: The mm struct
+ * @vma: The VMA to link in
+ * @mas: The maple state
+ *
+ * Must hold the @mas lock.
+ */
+static void vma_mas_link(struct mm_struct *mm, struct vm_area_struct *vma,
+			 struct ma_state *mas, struct vm_area_struct *prev)
+{
+	struct address_space *mapping = NULL;
+
+	if (vma->vm_file) {
+		mapping = vma->vm_file->f_mapping;
+		i_mmap_lock_write(mapping);
+	}
+
+	vma_mas_store(vma, mas);
+	__vma_link_list(mm, vma, prev);
+	__vma_link_file(vma);
+
+	if (mapping)
+		i_mmap_unlock_write(mapping);
+
+	mm->map_count++;
+	validate_mm(mm);
+}
+
 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
 			struct vm_area_struct *prev)
 {
@@ -639,6 +646,108 @@ static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
 	mm->map_count++;
 }
 
+/*
+ * vma_expand - Expand an existing VMA
+ * @mas: The maple state
+ * @vma: The vma to expand
+ * @start: The start of the vma
+ * @end: The exclusive end of the vma
+ *
+ * @mas must be locked
+ */
+inline int vma_expand(struct ma_state *mas, struct vm_area_struct *vma,
+		      unsigned long start, unsigned long end, pgoff_t pgoff,
+		      struct vm_area_struct *next)
+{
+	struct mm_struct *mm = vma->vm_mm;
+	struct address_space *mapping = NULL;
+	struct rb_root_cached *root = NULL;
+	struct anon_vma *anon_vma = vma->anon_vma;
+	struct file *file = vma->vm_file;
+	bool remove_next = false;
+	int error;
+
+	if (next && (vma != next) && (end == next->vm_end)) {
+		remove_next = true;
+		if (next->anon_vma && !vma->anon_vma) {
+			vma->anon_vma = next->anon_vma;
+			error = anon_vma_clone(vma, next);
+			if (error)
+				return error;
+		}
+	}
+
+	vma_adjust_trans_huge(vma, start, end, 0);
+
+	if (file) {
+		mapping = file->f_mapping;
+		root = &mapping->i_mmap;
+		uprobe_munmap(vma, vma->vm_start, vma->vm_end);
+		i_mmap_lock_write(mapping);
+	}
+
+	if (anon_vma) {
+		anon_vma_lock_write(anon_vma);
+		anon_vma_interval_tree_pre_update_vma(vma);
+	}
+
+	if (file) {
+		flush_dcache_mmap_lock(mapping);
+		vma_interval_tree_remove(vma, root);
+	}
+
+	vma->vm_start = start;
+	vma->vm_end = end;
+	vma->vm_pgoff = pgoff;
+	/* Note: mas must be pointing to the expanding VMA */
+	vma_mas_store(vma, mas);
+
+	if (file) {
+		vma_interval_tree_insert(vma, root);
+		flush_dcache_mmap_unlock(mapping);
+	}
+
+	/* Expanding over the next vma */
+	if (remove_next) {
+		/* Remove from mm linked list - also updates highest_vm_end */
+		__vma_unlink_list(mm, next);
+
+		/* Kill the cache */
+		vmacache_invalidate(mm);
+
+		if (file)
+			__remove_shared_vm_struct(next, file, mapping);
+
+	} else if (!next) {
+		mm->highest_vm_end = vm_end_gap(vma);
+	}
+
+	if (anon_vma) {
+		anon_vma_interval_tree_post_update_vma(vma);
+		anon_vma_unlock_write(anon_vma);
+	}
+
+	if (file) {
+		i_mmap_unlock_write(mapping);
+		uprobe_mmap(vma);
+	}
+
+	if (remove_next) {
+		if (file) {
+			uprobe_munmap(next, next->vm_start, next->vm_end);
+			fput(file);
+		}
+		if (next->anon_vma)
+			anon_vma_merge(vma, next);
+		mm->map_count--;
+		mpol_put(vma_policy(next));
+		vm_area_free(next);
+	}
+
+	validate_mm(mm);
+	return 0;
+}
+
 /*
  * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
  * is already present in an i_mmap tree without adjusting the tree.
@@ -1622,9 +1731,15 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 		struct list_head *uf)
 {
 	struct mm_struct *mm = current->mm;
-	struct vm_area_struct *vma, *prev, *merge;
-	int error;
+	struct vm_area_struct *vma = NULL;
+	struct vm_area_struct *prev, *next;
+	pgoff_t pglen = len >> PAGE_SHIFT;
 	unsigned long charged = 0;
+	unsigned long end = addr + len;
+	unsigned long merge_start = addr, merge_end = end;
+	pgoff_t vm_pgoff;
+	int error;
+	MA_STATE(mas, &mm->mm_mt, addr, end - 1);
 
 	/* Check against address space limit. */
 	if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
@@ -1634,16 +1749,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 		 * MAP_FIXED may remove pages of mappings that intersects with
 		 * requested mapping. Account for the pages it would unmap.
 		 */
-		nr_pages = count_vma_pages_range(mm, addr, addr + len);
+		nr_pages = count_vma_pages_range(mm, addr, end);
 
 		if (!may_expand_vm(mm, vm_flags,
 					(len >> PAGE_SHIFT) - nr_pages))
 			return -ENOMEM;
 	}
 
-	/* Clear old maps, set up prev and uf */
-	if (munmap_vma_range(mm, addr, len, &prev, uf))
+	/* Unmap any existing mapping in the area */
+	if (do_munmap(mm, addr, len, uf))
 		return -ENOMEM;
+
 	/*
 	 * Private writable mapping: check memory availability
 	 */
@@ -1654,14 +1770,50 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 		vm_flags |= VM_ACCOUNT;
 	}
 
-	/*
-	 * Can we just expand an old mapping?
-	 */
-	vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
-			NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX);
-	if (vma)
-		goto out;
 
+	if (vm_flags & VM_SPECIAL) {
+		rcu_read_lock();
+		prev = mas_prev(&mas, 0);
+		rcu_read_unlock();
+		goto cannot_expand;
+	}
+
+	/* Attempt to expand an old mapping */
+
+	/* Check next */
+	rcu_read_lock();
+	next = mas_next(&mas, ULONG_MAX);
+	rcu_read_unlock();
+	if (next && next->vm_start == end && vma_policy(next) &&
+	    can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen,
+				 NULL_VM_UFFD_CTX)) {
+		merge_end = next->vm_end;
+		vma = next;
+		vm_pgoff = next->vm_pgoff - pglen;
+	}
+
+	/* Check prev */
+	rcu_read_lock();
+	prev = mas_prev(&mas, 0);
+	rcu_read_unlock();
+	if (prev && prev->vm_end == addr && !vma_policy(prev) &&
+	    can_vma_merge_after(prev, vm_flags, NULL, file, pgoff,
+				NULL_VM_UFFD_CTX)) {
+		merge_start = prev->vm_start;
+		vma = prev;
+		vm_pgoff = prev->vm_pgoff;
+	}
+
+
+	/* Actually expand, if possible */
+	if (vma &&
+	    !vma_expand(&mas, vma, merge_start, merge_end, vm_pgoff, next)) {
+		khugepaged_enter_vma_merge(prev, vm_flags);
+		goto expanded;
+	}
+
+	mas_set_range(&mas, addr, end - 1);
+cannot_expand:
 	/*
 	 * Determine the object being mapped and call the appropriate
 	 * specific mapper. the address has already been validated, but
@@ -1674,7 +1826,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 	}
 
 	vma->vm_start = addr;
-	vma->vm_end = addr + len;
+	vma->vm_end = end;
 	vma->vm_flags = vm_flags;
 	vma->vm_page_prot = vm_get_page_prot(vm_flags);
 	vma->vm_pgoff = pgoff;
@@ -1706,8 +1858,6 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 		 *
 		 * Answer: Yes, several device drivers can do it in their
 		 *         f_op->mmap method. -DaveM
-		 * Bug: If addr is changed, prev, rb_link, rb_parent should
-		 *      be updated for vma_link()
 		 */
 		WARN_ON_ONCE(addr != vma->vm_start);
 
@@ -1716,18 +1866,25 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 		/* If vm_flags changed after call_mmap(), we should try merge vma again
 		 * as we may succeed this time.
 		 */
-		if (unlikely(vm_flags != vma->vm_flags && prev)) {
-			merge = vma_merge(mm, prev, vma->vm_start, vma->vm_end, vma->vm_flags,
-				NULL, vma->vm_file, vma->vm_pgoff, NULL, NULL_VM_UFFD_CTX);
-			if (merge) {
+		if (unlikely(vm_flags != vma->vm_flags && prev &&
+			     prev->vm_end == addr && !vma_policy(prev) &&
+			     can_vma_merge_after(prev, vm_flags, NULL, file,
+						 pgoff, NULL_VM_UFFD_CTX))) {
+			merge_start = prev->vm_start;
+			vm_pgoff = prev->vm_pgoff;
+			if (!vma_expand(&mas, prev, merge_start, merge_end,
+					vm_pgoff, next)) {
 				/* ->mmap() can change vma->vm_file and fput the original file. So
 				 * fput the vma->vm_file here or we would add an extra fput for file
 				 * and cause general protection fault ultimately.
 				 */
 				fput(vma->vm_file);
 				vm_area_free(vma);
-				vma = merge;
-				/* Update vm_flags to pick up the change. */
+				vma = prev;
+				/* Update vm_flags and possible addr to pick up the change. We don't
+				 * warn here if addr changed as the vma is not linked by vma_link().
+				 */
+				addr = vma->vm_start;
 				vm_flags = vma->vm_flags;
 				goto unmap_writable;
 			}
@@ -1751,7 +1908,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 			goto free_vma;
 	}
 
-	vma_link(mm, vma, prev);
+	mas_set(&mas, addr);
+	vma_mas_link(mm, vma, &mas, prev);
 	/* Once vma denies write, undo our temporary denial count */
 	if (file) {
 unmap_writable:
@@ -1760,14 +1918,14 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
 		if (vm_flags & VM_DENYWRITE)
 			allow_write_access(file);
 	}
-out:
+expanded:
 	perf_event_mmap(vma);
 
 	vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
 	if (vm_flags & VM_LOCKED) {
 		if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
-					is_vm_hugetlb_page(vma) ||
-					vma == get_gate_vma(current->mm))
+		    is_vm_hugetlb_page(vma) ||
+		    vma == get_gate_vma(current->mm))
 			vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
 		else
 			mm->locked_vm += (len >> PAGE_SHIFT);
@@ -2605,13 +2763,10 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
 	vma = find_vma_intersection(mm, start, end);
 	if (!vma)
 		return 0;
+
 	prev = vma->vm_prev;
 	/* we have start < vma->vm_end  */
 
-	/* if it doesn't overlap, we have nothing.. */
-	if (vma->vm_start >= end)
-		return 0;
-
 	/*
 	 * If we need to split any vma, do it now to save pain later.
 	 *
@@ -2621,7 +2776,6 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
 	 */
 	if (start > vma->vm_start) {
 		int error;
-
 		/*
 		 * Make sure that map_count on return from munmap() will
 		 * not exceed its limit; but let map_count go just above
-- 
2.30.2

  parent reply	other threads:[~2021-08-17 15:52 UTC|newest]

Thread overview: 69+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-08-17 15:47 [PATCH v2 00/61] Introducing the Maple Tree Liam Howlett
2021-08-17 15:47 ` [PATCH v2 01/61] radix tree test suite: Add pr_err define Liam Howlett
2021-08-17 15:47 ` [PATCH v2 04/61] radix tree test suite: Add support for slab bulk APIs Liam Howlett
2021-08-17 15:47 ` [PATCH v2 02/61] radix tree test suite: Add kmem_cache_set_non_kernel() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 03/61] radix tree test suite: Add allocation counts and size to kmem_cache Liam Howlett
2021-08-17 15:47 ` [PATCH v2 05/61] Maple Tree: Add new data structure Liam Howlett
2021-08-17 15:47 ` [PATCH v2 06/61] mm: Start tracking VMAs with maple tree Liam Howlett
2021-08-17 15:47 ` [PATCH v2 07/61] mm/mmap: Use the maple tree in find_vma() instead of the rbtree Liam Howlett
2021-08-17 15:47 ` [PATCH v2 09/61] mm/mmap: Use maple tree for unmapped_area{_topdown} Liam Howlett
2021-08-17 15:47 ` [PATCH v2 08/61] mm/mmap: Use the maple tree for find_vma_prev() instead of the rbtree Liam Howlett
2021-08-17 15:47 ` [PATCH v2 10/61] kernel/fork: Use maple tree for dup_mmap() during forking Liam Howlett
2021-08-18  8:36   ` Hillf Danton
2021-08-18 14:54     ` Liam Howlett
2021-08-17 15:47 ` [PATCH v2 12/61] xen/privcmd: Optimized privcmd_ioctl_mmap() by using vma_lookup() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 13/61] mm: Optimize find_exact_vma() to use vma_lookup() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 11/61] mm: Remove rb tree Liam Howlett
2021-08-23  9:49   ` David Hildenbrand
2021-08-31 14:40     ` Liam Howlett
2021-08-17 15:47 ` [PATCH v2 16/61] mm: Use maple tree operations for find_vma_intersection() and find_vma() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 15/61] mm/mmap: Change do_brk_flags() to expand existing VMA and add do_brk_munmap() Liam Howlett
2021-08-17 15:47 ` Liam Howlett [this message]
2021-08-17 15:47 ` [PATCH v2 14/61] mm/khugepaged: Optimize collapse_pte_mapped_thp() by using vma_lookup() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 18/61] mm: Remove vmacache Liam Howlett
2021-08-17 15:47 ` [PATCH v2 19/61] mm/mmap: Move mmap_region() below do_munmap() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 21/61] mm/mmap: Reorganize munmap to use maple states Liam Howlett
2021-08-17 15:47 ` [PATCH v2 20/61] mm/mmap: Convert count_vma_pages_range() to use ma_state Liam Howlett
2021-08-17 15:47 ` [PATCH v2 22/61] mm/mmap: Change do_brk_munmap() to use do_mas_align_munmap() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 24/61] arch/arm64: Remove mmap linked list from vdso Liam Howlett
2021-08-17 15:47 ` [PATCH v2 23/61] mm: Introduce vma_next() and vma_prev() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 26/61] arch/powerpc: Remove mmap linked list from mm/book3s32/tlb Liam Howlett
2021-08-17 15:47 ` [PATCH v2 25/61] arch/parisc: Remove mmap linked list from kernel/cache Liam Howlett
2021-08-17 15:47 ` [PATCH v2 28/61] arch/s390: Use maple tree iterators instead of linked list Liam Howlett
2021-08-17 15:47 ` [PATCH v2 27/61] arch/powerpc: Remove mmap linked list from mm/book3s64/subpage_prot Liam Howlett
2021-08-17 15:47 ` [PATCH v2 31/61] drivers/misc/cxl: Use maple tree iterators for cxl_prefault_vma() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 30/61] arch/xtensa: Use maple tree iterators for unmapped area Liam Howlett
2021-08-17 15:47 ` [PATCH v2 29/61] arch/x86: Use maple tree iterators for vdso/vma Liam Howlett
2021-08-17 15:47 ` [PATCH v2 33/61] fs/binfmt_elf: Use maple tree iterators for fill_files_note() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 32/61] drivers/tee/optee: Use maple tree iterators for __check_mem_type() Liam Howlett
2021-08-17 15:47 ` [PATCH v2 34/61] fs/coredump: Use maple tree iterators in place of linked list Liam Howlett
2021-08-17 15:47 ` [PATCH v2 38/61] fs/userfaultfd: Stop using vma " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 37/61] fs/proc/task_mmu: Stop using linked list and highest_vm_end Liam Howlett
2021-08-17 15:47 ` [PATCH v2 36/61] fs/proc/base: Use maple tree iterators in place of linked list Liam Howlett
2021-08-17 15:47 ` [PATCH v2 35/61] fs/exec: Use vma_next() instead " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 39/61] ipc/shm: Stop using the vma " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 41/61] kernel/events/core: Use maple tree iterators instead of " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 40/61] kernel/acct: " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 45/61] arch/um/kernel/tlb: Stop using " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 42/61] kernel/events/uprobes: Use maple tree iterators instead of " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 44/61] kernel/sys: " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 43/61] kernel/sched/fair: " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 46/61] bpf: Remove VMA " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 48/61] mm/khugepaged: Use maple tree iterators instead of vma " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 47/61] mm/gup: Use maple tree navigation instead of " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 51/61] mm/memcontrol: Stop using mm->highest_vm_end Liam Howlett
2021-08-17 15:47 ` [PATCH v2 49/61] mm/ksm: Use maple tree iterators instead of vma linked list Liam Howlett
2021-08-17 15:47 ` [PATCH v2 52/61] mm/mempolicy: " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 50/61] mm/madvise: Use vma_next " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 54/61] mm/mprotect: Use maple tree navigation " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 56/61] mm/msync: Use vma_next() " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 53/61] mm/mlock: Use maple tree iterators " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 55/61] mm/mremap: Use vma_next() " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 57/61] mm/oom_kill: Use maple tree iterators " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 59/61] mm/swapfile: Use maple tree iterator " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 58/61] mm/pagewalk: Use vma_next() " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 60/61] mm: Remove the " Liam Howlett
2021-08-17 15:47 ` [PATCH v2 61/61] mm/mmap: Drop range_has_overlap() function Liam Howlett
2021-08-20  4:04 [PATCH v2 10/61] kernel/fork: Use maple tree for dup_mmap() during forking Hillf Danton
2021-08-20  4:04 ` Hillf Danton
2021-08-19 13:32   ` Liam Howlett
2021-08-20 17:45   ` Liam Howlett

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210817154651.1570984-18-Liam.Howlett@oracle.com \
    --to=liam.howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=axelrasmussen@google.com \
    --cc=dave@stgolabs.net \
    --cc=ldufour@linux.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=maple-tree@lists.infradead.org \
    --cc=paulmck@kernel.org \
    --cc=peterz@infradead.org \
    --cc=riel@surriel.com \
    --cc=rientjes@google.com \
    --cc=songliubraving@fb.com \
    --cc=surenb@google.com \
    --cc=vbabka@suse.cz \
    --cc=walken.cr@gmail.com \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.