linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@linux-foundation.org>
To: akpm@linux-foundation.org, darrick.wong@oracle.com,
	dchinner@redhat.com, ebiggers@google.com, gaoxiang25@huawei.com,
	hch@lst.de, jaegeuk@kernel.org, jhubbard@nvidia.com,
	johannes.thumshirn@wdc.com, joseph.qi@linux.alibaba.com,
	junxiao.bi@oracle.com, linux-mm@kvack.org, mhocko@suse.com,
	mm-commits@vger.kernel.org, mszeredi@redhat.com,
	torvalds@linux-foundation.org, william.kucharski@oracle.com,
	willy@infradead.org, xiyou.wangcong@gmail.com,
	yuchao0@huawei.com, ziy@nvidia.com
Subject: [patch 026/128] mm: add page_cache_readahead_unbounded
Date: Mon, 01 Jun 2020 21:46:51 -0700	[thread overview]
Message-ID: <20200602044651.7OHjejp61%akpm@linux-foundation.org> (raw)
In-Reply-To: <20200601214457.919c35648e96a2b46b573fe1@linux-foundation.org>

From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Subject: mm: add page_cache_readahead_unbounded

ext4 and f2fs have duplicated the guts of the readahead code so they can
read past i_size.  Instead, separate out the guts of the readahead code so
they can call it directly.

Link: http://lkml.kernel.org/r/20200414150233.24495-14-willy@infradead.org
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Reviewed-by: Eric Biggers <ebiggers@google.com>
Tested-by: Eric Biggers <ebiggers@google.com>
Cc: Chao Yu <yuchao0@huawei.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Dave Chinner <dchinner@redhat.com>
Cc: Gao Xiang <gaoxiang25@huawei.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Joseph Qi <joseph.qi@linux.alibaba.com>
Cc: Junxiao Bi <junxiao.bi@oracle.com>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 fs/ext4/verity.c        |   35 +------------------
 fs/f2fs/data.c          |    2 -
 fs/f2fs/f2fs.h          |    3 -
 fs/f2fs/verity.c        |   35 +------------------
 include/linux/pagemap.h |    3 +
 mm/readahead.c          |   68 ++++++++++++++++++++++++++------------
 6 files changed, 55 insertions(+), 91 deletions(-)

--- a/fs/ext4/verity.c~mm-add-page_cache_readahead_unbounded
+++ a/fs/ext4/verity.c
@@ -342,37 +342,6 @@ static int ext4_get_verity_descriptor(st
 	return desc_size;
 }
 
-/*
- * Prefetch some pages from the file's Merkle tree.
- *
- * This is basically a stripped-down version of __do_page_cache_readahead()
- * which works on pages past i_size.
- */
-static void ext4_merkle_tree_readahead(struct address_space *mapping,
-				       pgoff_t start_index, unsigned long count)
-{
-	LIST_HEAD(pages);
-	unsigned int nr_pages = 0;
-	struct page *page;
-	pgoff_t index;
-	struct blk_plug plug;
-
-	for (index = start_index; index < start_index + count; index++) {
-		page = xa_load(&mapping->i_pages, index);
-		if (!page || xa_is_value(page)) {
-			page = __page_cache_alloc(readahead_gfp_mask(mapping));
-			if (!page)
-				break;
-			page->index = index;
-			list_add(&page->lru, &pages);
-			nr_pages++;
-		}
-	}
-	blk_start_plug(&plug);
-	ext4_mpage_readpages(mapping, &pages, NULL, nr_pages, true);
-	blk_finish_plug(&plug);
-}
-
 static struct page *ext4_read_merkle_tree_page(struct inode *inode,
 					       pgoff_t index,
 					       unsigned long num_ra_pages)
@@ -386,8 +355,8 @@ static struct page *ext4_read_merkle_tre
 		if (page)
 			put_page(page);
 		else if (num_ra_pages > 1)
-			ext4_merkle_tree_readahead(inode->i_mapping, index,
-						   num_ra_pages);
+			page_cache_readahead_unbounded(inode->i_mapping, NULL,
+					index, num_ra_pages, 0);
 		page = read_mapping_page(inode->i_mapping, index, NULL);
 	}
 	return page;
--- a/fs/f2fs/data.c~mm-add-page_cache_readahead_unbounded
+++ a/fs/f2fs/data.c
@@ -2177,7 +2177,7 @@ out:
  * use ->readpage() or do the necessary surgery to decouple ->readpages()
  * from read-ahead.
  */
-int f2fs_mpage_readpages(struct address_space *mapping,
+static int f2fs_mpage_readpages(struct address_space *mapping,
 			struct list_head *pages, struct page *page,
 			unsigned nr_pages, bool is_readahead)
 {
--- a/fs/f2fs/f2fs.h~mm-add-page_cache_readahead_unbounded
+++ a/fs/f2fs/f2fs.h
@@ -3373,9 +3373,6 @@ int f2fs_reserve_new_block(struct dnode_
 int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index);
 int f2fs_preallocate_blocks(struct kiocb *iocb, struct iov_iter *from);
 int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index);
-int f2fs_mpage_readpages(struct address_space *mapping,
-			struct list_head *pages, struct page *page,
-			unsigned nr_pages, bool is_readahead);
 struct page *f2fs_get_read_data_page(struct inode *inode, pgoff_t index,
 			int op_flags, bool for_write);
 struct page *f2fs_find_data_page(struct inode *inode, pgoff_t index);
--- a/fs/f2fs/verity.c~mm-add-page_cache_readahead_unbounded
+++ a/fs/f2fs/verity.c
@@ -222,37 +222,6 @@ static int f2fs_get_verity_descriptor(st
 	return size;
 }
 
-/*
- * Prefetch some pages from the file's Merkle tree.
- *
- * This is basically a stripped-down version of __do_page_cache_readahead()
- * which works on pages past i_size.
- */
-static void f2fs_merkle_tree_readahead(struct address_space *mapping,
-				       pgoff_t start_index, unsigned long count)
-{
-	LIST_HEAD(pages);
-	unsigned int nr_pages = 0;
-	struct page *page;
-	pgoff_t index;
-	struct blk_plug plug;
-
-	for (index = start_index; index < start_index + count; index++) {
-		page = xa_load(&mapping->i_pages, index);
-		if (!page || xa_is_value(page)) {
-			page = __page_cache_alloc(readahead_gfp_mask(mapping));
-			if (!page)
-				break;
-			page->index = index;
-			list_add(&page->lru, &pages);
-			nr_pages++;
-		}
-	}
-	blk_start_plug(&plug);
-	f2fs_mpage_readpages(mapping, &pages, NULL, nr_pages, true);
-	blk_finish_plug(&plug);
-}
-
 static struct page *f2fs_read_merkle_tree_page(struct inode *inode,
 					       pgoff_t index,
 					       unsigned long num_ra_pages)
@@ -266,8 +235,8 @@ static struct page *f2fs_read_merkle_tre
 		if (page)
 			put_page(page);
 		else if (num_ra_pages > 1)
-			f2fs_merkle_tree_readahead(inode->i_mapping, index,
-						   num_ra_pages);
+			page_cache_readahead_unbounded(inode->i_mapping, NULL,
+					index, num_ra_pages, 0);
 		page = read_mapping_page(inode->i_mapping, index, NULL);
 	}
 	return page;
--- a/include/linux/pagemap.h~mm-add-page_cache_readahead_unbounded
+++ a/include/linux/pagemap.h
@@ -625,6 +625,9 @@ void page_cache_sync_readahead(struct ad
 void page_cache_async_readahead(struct address_space *, struct file_ra_state *,
 		struct file *, struct page *, pgoff_t index,
 		unsigned long req_count);
+void page_cache_readahead_unbounded(struct address_space *, struct file *,
+		pgoff_t index, unsigned long nr_to_read,
+		unsigned long lookahead_count);
 
 /*
  * Like add_to_page_cache_locked, but used to add newly allocated pages:
--- a/mm/readahead.c~mm-add-page_cache_readahead_unbounded
+++ a/mm/readahead.c
@@ -156,37 +156,34 @@ out:
 		rac->_index++;
 }
 
-/*
- * __do_page_cache_readahead() actually reads a chunk of disk.  It allocates
- * the pages first, then submits them for I/O. This avoids the very bad
- * behaviour which would occur if page allocations are causing VM writeback.
- * We really don't want to intermingle reads and writes like that.
+/**
+ * page_cache_readahead_unbounded - Start unchecked readahead.
+ * @mapping: File address space.
+ * @file: This instance of the open file; used for authentication.
+ * @index: First page index to read.
+ * @nr_to_read: The number of pages to read.
+ * @lookahead_size: Where to start the next readahead.
+ *
+ * This function is for filesystems to call when they want to start
+ * readahead beyond a file's stated i_size.  This is almost certainly
+ * not the function you want to call.  Use page_cache_async_readahead()
+ * or page_cache_sync_readahead() instead.
+ *
+ * Context: File is referenced by caller.  Mutexes may be held by caller.
+ * May sleep, but will not reenter filesystem to reclaim memory.
  */
-void __do_page_cache_readahead(struct address_space *mapping,
-		struct file *filp, pgoff_t index, unsigned long nr_to_read,
+void page_cache_readahead_unbounded(struct address_space *mapping,
+		struct file *file, pgoff_t index, unsigned long nr_to_read,
 		unsigned long lookahead_size)
 {
-	struct inode *inode = mapping->host;
 	LIST_HEAD(page_pool);
-	loff_t isize = i_size_read(inode);
 	gfp_t gfp_mask = readahead_gfp_mask(mapping);
 	struct readahead_control rac = {
 		.mapping = mapping,
-		.file = filp,
+		.file = file,
 		._index = index,
 	};
 	unsigned long i;
-	pgoff_t end_index;	/* The last page we want to read */
-
-	if (isize == 0)
-		return;
-
-	end_index = (isize - 1) >> PAGE_SHIFT;
-	if (index > end_index)
-		return;
-	/* Don't read past the page containing the last byte of the file */
-	if (nr_to_read > end_index - index)
-		nr_to_read = end_index - index + 1;
 
 	/*
 	 * Preallocate as many pages as we will need.
@@ -230,6 +227,35 @@ void __do_page_cache_readahead(struct ad
 	 */
 	read_pages(&rac, &page_pool, false);
 }
+EXPORT_SYMBOL_GPL(page_cache_readahead_unbounded);
+
+/*
+ * __do_page_cache_readahead() actually reads a chunk of disk.  It allocates
+ * the pages first, then submits them for I/O. This avoids the very bad
+ * behaviour which would occur if page allocations are causing VM writeback.
+ * We really don't want to intermingle reads and writes like that.
+ */
+void __do_page_cache_readahead(struct address_space *mapping,
+		struct file *file, pgoff_t index, unsigned long nr_to_read,
+		unsigned long lookahead_size)
+{
+	struct inode *inode = mapping->host;
+	loff_t isize = i_size_read(inode);
+	pgoff_t end_index;	/* The last page we want to read */
+
+	if (isize == 0)
+		return;
+
+	end_index = (isize - 1) >> PAGE_SHIFT;
+	if (index > end_index)
+		return;
+	/* Don't read past the page containing the last byte of the file */
+	if (nr_to_read > end_index - index)
+		nr_to_read = end_index - index + 1;
+
+	page_cache_readahead_unbounded(mapping, file, index, nr_to_read,
+			lookahead_size);
+}
 
 /*
  * Chunk the readahead into 2 megabyte units, so that we don't pin too much
_


  parent reply	other threads:[~2020-06-02  4:46 UTC|newest]

Thread overview: 137+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-02  4:44 incoming Andrew Morton
2020-06-02  4:45 ` [patch 001/128] squashfs: migrate from ll_rw_block usage to BIO Andrew Morton
2020-06-02  4:45 ` [patch 002/128] ocfs2: add missing annotation for dlm_empty_lockres() Andrew Morton
2020-06-02  4:45 ` [patch 003/128] ocfs2: mount shared volume without ha stack Andrew Morton
2020-06-02  4:45 ` [patch 004/128] arch/parisc/include/asm/pgtable.h: remove unused `old_pte' Andrew Morton
2020-06-02  4:45 ` [patch 005/128] vfs: track per-sb writeback errors and report them to syncfs Andrew Morton
2020-06-02  4:45 ` [patch 006/128] fs/buffer.c: record blockdev write errors in super_block that it backs Andrew Morton
2020-06-02  4:45 ` [patch 007/128] usercopy: mark dma-kmalloc caches as usercopy caches Andrew Morton
2020-06-02  4:45 ` [patch 008/128] mm/slub.c: fix corrupted freechain in deactivate_slab() Andrew Morton
2020-06-02  4:45 ` [patch 009/128] slub: Remove userspace notifier for cache add/remove Andrew Morton
2020-06-02  4:45 ` [patch 010/128] slub: remove kmalloc under list_lock from list_slab_objects() V2 Andrew Morton
2020-06-02  4:45 ` [patch 011/128] mm/slub: fix stack overruns with SLUB_STATS Andrew Morton
2020-06-02  4:46 ` [patch 012/128] Documentation/vm/slub.rst: s/Toggle/Enable/ Andrew Morton
2020-06-02 13:10   ` Rafael Aquini
2020-06-02  4:46 ` [patch 013/128] mm, dump_page(): do not crash with invalid mapping pointer Andrew Morton
2020-06-02  4:46 ` [patch 014/128] mm: move readahead prototypes from mm.h Andrew Morton
2020-06-02  4:46 ` [patch 015/128] mm: return void from various readahead functions Andrew Morton
2020-06-02  4:46 ` [patch 016/128] mm: ignore return value of ->readpages Andrew Morton
2020-06-02  4:46 ` [patch 017/128] mm: move readahead nr_pages check into read_pages Andrew Morton
2020-06-02  4:46 ` [patch 018/128] mm: add new readahead_control API Andrew Morton
2020-06-02  4:46 ` [patch 019/128] mm: use readahead_control to pass arguments Andrew Morton
2020-06-02  4:46 ` [patch 020/128] mm: rename various 'offset' parameters to 'index' Andrew Morton
2020-06-02  4:46 ` [patch 021/128] mm: rename readahead loop variable to 'i' Andrew Morton
2020-06-02  4:46 ` [patch 022/128] mm: remove 'page_offset' from readahead loop Andrew Morton
2020-06-02  4:46 ` [patch 023/128] mm: put readahead pages in cache earlier Andrew Morton
2020-06-02  4:46 ` [patch 024/128] mm: add readahead address space operation Andrew Morton
2020-06-02  4:46 ` [patch 025/128] mm: move end_index check out of readahead loop Andrew Morton
2020-06-02  4:46 ` Andrew Morton [this message]
2020-06-02  4:46 ` [patch 027/128] mm: document why we don't set PageReadahead Andrew Morton
2020-06-02  4:46 ` [patch 028/128] mm: use memalloc_nofs_save in readahead path Andrew Morton
2020-06-02  4:47 ` [patch 029/128] fs: convert mpage_readpages to mpage_readahead Andrew Morton
2020-06-02  4:47 ` [patch 030/128] btrfs: convert from readpages to readahead Andrew Morton
2020-06-02  4:47 ` [patch 031/128] erofs: convert uncompressed files " Andrew Morton
2020-06-02  4:47 ` [patch 032/128] erofs: convert compressed " Andrew Morton
2020-06-02  4:47 ` [patch 033/128] ext4: convert " Andrew Morton
2020-06-02  4:47 ` [patch 034/128] ext4: pass the inode to ext4_mpage_readpages Andrew Morton
2020-06-02  4:47 ` [patch 035/128] f2fs: convert from readpages to readahead Andrew Morton
2020-06-02  4:47 ` [patch 036/128] f2fs: pass the inode to f2fs_mpage_readpages Andrew Morton
2020-06-02  4:47 ` [patch 037/128] fuse: convert from readpages to readahead Andrew Morton
2020-06-02  4:47 ` [patch 038/128] iomap: " Andrew Morton
2020-06-02  4:47 ` [patch 039/128] include/linux/pagemap.h: introduce attach/detach_page_private Andrew Morton
2020-06-02  4:47 ` [patch 040/128] md: remove __clear_page_buffers and use attach/detach_page_private Andrew Morton
2020-06-02  4:47 ` [patch 041/128] btrfs: " Andrew Morton
2020-06-02 14:19   ` David Sterba
2020-06-02  4:47 ` [patch 042/128] fs/buffer.c: " Andrew Morton
2020-06-02  4:47 ` [patch 043/128] f2fs: " Andrew Morton
2020-06-02  4:47 ` [patch 044/128] iomap: " Andrew Morton
2020-06-02 16:23   ` Darrick J. Wong
2020-06-02  4:47 ` [patch 045/128] ntfs: replace attach_page_buffers with attach_page_private Andrew Morton
2020-06-02  4:48 ` [patch 046/128] orangefs: use attach/detach_page_private Andrew Morton
2020-06-02  4:48 ` [patch 047/128] buffer_head.h: remove attach_page_buffers Andrew Morton
2020-06-02  4:48 ` [patch 048/128] mm/migrate.c: call detach_page_private to cleanup code Andrew Morton
2020-06-02  4:48 ` [patch 049/128] mm_types.h: change set_page_private to inline function Andrew Morton
2020-06-02  4:48 ` [patch 050/128] mm/filemap.c: remove misleading comment Andrew Morton
2020-06-02  4:48 ` [patch 051/128] mm/page-writeback.c: remove unused variable Andrew Morton
2020-06-02  4:48 ` [patch 052/128] mm/writeback: replace PF_LESS_THROTTLE with PF_LOCAL_THROTTLE Andrew Morton
2020-06-02  4:48 ` [patch 053/128] mm/writeback: discard NR_UNSTABLE_NFS, use NR_WRITEBACK instead Andrew Morton
2020-06-02  4:48 ` [patch 054/128] mm/gup.c: update the documentation Andrew Morton
2020-06-02  4:48 ` [patch 055/128] mm/gup: introduce pin_user_pages_unlocked Andrew Morton
2020-06-02  4:48 ` [patch 056/128] ivtv: convert get_user_pages() --> pin_user_pages() Andrew Morton
2020-06-02  4:48 ` [patch 057/128] mm/gup.c: further document vma_permits_fault() Andrew Morton
2020-06-02  4:48 ` [patch 058/128] mm/swapfile: use list_{prev,next}_entry() instead of open-coding Andrew Morton
2020-06-02  4:48 ` [patch 059/128] mm/swap_state: fix a data race in swapin_nr_pages Andrew Morton
2020-06-02  4:48 ` [patch 060/128] mm: swap: properly update readahead statistics in unuse_pte_range() Andrew Morton
2020-06-02  4:48 ` [patch 061/128] mm/swapfile.c: offset is only used when there is more slots Andrew Morton
2020-06-02  4:48 ` [patch 062/128] mm/swapfile.c: explicitly show ssd/non-ssd is handled mutually exclusive Andrew Morton
2020-06-02  4:48 ` [patch 063/128] mm/swapfile.c: remove the unnecessary goto for SSD case Andrew Morton
2020-06-02  4:48 ` [patch 064/128] mm/swapfile.c: simplify the calculation of n_goal Andrew Morton
2020-06-02  4:48 ` [patch 065/128] mm/swapfile.c: remove the extra check in scan_swap_map_slots() Andrew Morton
2020-06-02  4:49 ` [patch 066/128] mm/swapfile.c: found_free could be represented by (tmp < max) Andrew Morton
2020-06-02  4:49 ` [patch 067/128] mm/swapfile.c: tmp is always smaller than max Andrew Morton
2020-06-02  4:49 ` [patch 068/128] mm/swapfile.c: omit a duplicate code by compare tmp and max first Andrew Morton
2020-06-02  4:49 ` [patch 069/128] swap: try to scan more free slots even when fragmented Andrew Morton
2020-06-02  4:49 ` [patch 070/128] mm/swapfile.c: classify SWAP_MAP_XXX to make it more readable Andrew Morton
2020-06-02  4:49 ` [patch 071/128] mm/swapfile.c: __swap_entry_free() always free 1 entry Andrew Morton
2020-06-02  4:49 ` [patch 072/128] mm/swapfile.c: use prandom_u32_max() Andrew Morton
2020-06-02  4:49 ` [patch 073/128] swap: reduce lock contention on swap cache from swap slots allocation Andrew Morton
2020-06-02  4:49 ` [patch 074/128] mm: swapfile: fix /proc/swaps heading and Size/Used/Priority alignment Andrew Morton
2020-06-02  4:49 ` [patch 075/128] include/linux/swap.h: delete meaningless __add_to_swap_cache() declaration Andrew Morton
2020-06-02  4:49 ` [patch 076/128] mm, memcg: add workingset_restore in memory.stat Andrew Morton
2020-06-02  4:49 ` [patch 077/128] mm: memcontrol: simplify value comparison between count and limit Andrew Morton
2020-06-02  4:49 ` [patch 078/128] memcg: expose root cgroup's memory.stat Andrew Morton
2020-06-02  4:49 ` [patch 079/128] mm/memcg: prepare for swap over-high accounting and penalty calculation Andrew Morton
2020-06-02  4:49 ` [patch 080/128] mm/memcg: move penalty delay clamping out of calculate_high_delay() Andrew Morton
2020-06-02  4:49 ` [patch 081/128] mm/memcg: move cgroup high memory limit setting into struct page_counter Andrew Morton
2020-06-02  4:49 ` [patch 082/128] mm/memcg: automatically penalize tasks with high swap use Andrew Morton
2020-06-02  4:49 ` [patch 083/128] memcg: fix memcg_kmem_bypass() for remote memcg charging Andrew Morton
2020-06-02  4:49 ` [patch 084/128] x86: mm: ptdump: calculate effective permissions correctly Andrew Morton
2020-06-02  4:50 ` [patch 085/128] mm: ptdump: expand type of 'val' in note_page() Andrew Morton
2020-06-02  4:50 ` [patch 086/128] /proc/PID/smaps: Add PMD migration entry parsing Andrew Morton
2020-06-02  4:50 ` [patch 087/128] mm/memory: remove unnecessary pte_devmap case in copy_one_pte() Andrew Morton
2020-06-02  4:50 ` [patch 088/128] mm, memory_failure: don't send BUS_MCEERR_AO for action required error Andrew Morton
2020-06-02  4:50 ` [patch 089/128] x86/hyperv: use vmalloc_exec for the hypercall page Andrew Morton
2020-06-02  4:50 ` [patch 090/128] x86: fix vmap arguments in map_irq_stack Andrew Morton
2020-06-02  4:50 ` [patch 091/128] staging: android: ion: use vmap instead of vm_map_ram Andrew Morton
2020-06-02  4:50 ` [patch 092/128] staging: media: ipu3: use vmap instead of reimplementing it Andrew Morton
2020-06-02  4:50 ` [patch 093/128] dma-mapping: use vmap insted " Andrew Morton
2020-06-02  4:50 ` [patch 094/128] powerpc: add an ioremap_phb helper Andrew Morton
2020-06-02  4:50 ` [patch 095/128] powerpc: remove __ioremap_at and __iounmap_at Andrew Morton
2020-06-02  4:50 ` [patch 096/128] mm: remove __get_vm_area Andrew Morton
2020-06-02  4:50 ` [patch 097/128] mm: unexport unmap_kernel_range_noflush Andrew Morton
2020-06-02  4:50 ` [patch 098/128] mm: rename CONFIG_PGTABLE_MAPPING to CONFIG_ZSMALLOC_PGTABLE_MAPPING Andrew Morton
2020-06-02  4:50 ` [patch 099/128] mm: only allow page table mappings for built-in zsmalloc Andrew Morton
2020-06-02  4:51 ` [patch 100/128] mm: pass addr as unsigned long to vb_free Andrew Morton
2020-06-02  4:51 ` [patch 101/128] mm: remove vmap_page_range_noflush and vunmap_page_range Andrew Morton
2020-06-02  4:51 ` [patch 102/128] mm: rename vmap_page_range to map_kernel_range Andrew Morton
2020-06-02  4:51 ` [patch 103/128] mm: don't return the number of pages from map_kernel_range{,_noflush} Andrew Morton
2020-06-02  4:51 ` [patch 104/128] mm: remove map_vm_range Andrew Morton
2020-06-02  4:51 ` [patch 105/128] mm: remove unmap_vmap_area Andrew Morton
2020-06-02  4:51 ` [patch 106/128] mm: remove the prot argument from vm_map_ram Andrew Morton
2020-06-02  4:51 ` [patch 107/128] mm: enforce that vmap can't map pages executable Andrew Morton
2020-06-02  4:51 ` [patch 108/128] gpu/drm: remove the powerpc hack in drm_legacy_sg_alloc Andrew Morton
2020-06-02  4:51 ` [patch 109/128] mm: remove the pgprot argument to __vmalloc Andrew Morton
2020-06-02  4:51 ` [patch 110/128] mm: remove the prot argument to __vmalloc_node Andrew Morton
2020-06-02  4:51 ` [patch 111/128] mm: remove both instances of __vmalloc_node_flags Andrew Morton
2020-06-02  4:51 ` [patch 112/128] mm: remove __vmalloc_node_flags_caller Andrew Morton
2020-06-02  4:51 ` [patch 113/128] mm: switch the test_vmalloc module to use __vmalloc_node Andrew Morton
2020-06-02  4:52 ` [patch 114/128] mm: remove vmalloc_user_node_flags Andrew Morton
2020-06-02  4:52 ` [patch 115/128] arm64: use __vmalloc_node in arch_alloc_vmap_stack Andrew Morton
2020-06-02  4:52 ` [patch 116/128] powerpc: use __vmalloc_node in alloc_vm_stack Andrew Morton
2020-06-02  4:52 ` [patch 117/128] s390: use __vmalloc_node in stack_alloc Andrew Morton
2020-06-02  4:52 ` [patch 118/128] mm: add functions to track page directory modifications Andrew Morton
2020-06-02  4:52 ` [patch 119/128] mm/vmalloc: track which page-table levels were modified Andrew Morton
2020-06-02  4:52 ` [patch 120/128] mm/ioremap: " Andrew Morton
2020-06-02  4:52 ` [patch 121/128] x86/mm/64: implement arch_sync_kernel_mappings() Andrew Morton
2020-06-02  4:52 ` [patch 122/128] x86/mm/32: " Andrew Morton
2020-06-02  4:52 ` [patch 123/128] mm: remove vmalloc_sync_(un)mappings() Andrew Morton
2020-06-02  4:52 ` [patch 124/128] x86/mm: remove vmalloc faulting Andrew Morton
2020-06-02  4:52 ` [patch 125/128] kasan: fix clang compilation warning due to stack protector Andrew Morton
2020-06-02  4:52 ` [patch 126/128] ubsan: entirely disable alignment checks under UBSAN_TRAP Andrew Morton
2020-06-02  4:52 ` [patch 127/128] mm/mm_init.c: report kasan-tag information stored in page->flags Andrew Morton
2020-06-02  4:52 ` [patch 128/128] kasan: move kasan_report() into report.c Andrew Morton
2020-06-02 20:08 ` incoming Andrew Morton
2020-06-02 20:45   ` incoming Linus Torvalds
2020-06-02 21:38     ` incoming Andrew Morton
2020-06-02 22:18       ` incoming Linus Torvalds
2020-06-02 20:09 incoming Andrew Morton
2020-06-02 20:11 ` [patch 026/128] mm: add page_cache_readahead_unbounded Andrew Morton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200602044651.7OHjejp61%akpm@linux-foundation.org \
    --to=akpm@linux-foundation.org \
    --cc=darrick.wong@oracle.com \
    --cc=dchinner@redhat.com \
    --cc=ebiggers@google.com \
    --cc=gaoxiang25@huawei.com \
    --cc=hch@lst.de \
    --cc=jaegeuk@kernel.org \
    --cc=jhubbard@nvidia.com \
    --cc=johannes.thumshirn@wdc.com \
    --cc=joseph.qi@linux.alibaba.com \
    --cc=junxiao.bi@oracle.com \
    --cc=linux-mm@kvack.org \
    --cc=mhocko@suse.com \
    --cc=mm-commits@vger.kernel.org \
    --cc=mszeredi@redhat.com \
    --cc=torvalds@linux-foundation.org \
    --cc=william.kucharski@oracle.com \
    --cc=willy@infradead.org \
    --cc=xiyou.wangcong@gmail.com \
    --cc=yuchao0@huawei.com \
    --cc=ziy@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).