All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH RESEND 0/2] swap readahead clean up
@ 2018-02-20  8:52 ` minchan
  0 siblings, 0 replies; 16+ messages in thread
From: minchan @ 2018-02-20  8:52 UTC (permalink / raw)
  To: Andrew Morton; +Cc: lkml, linux-mm, Minchan Kim

From: Minchan Kim <minchan@kernel.org>

This patchset cleans up recent added vma-based readahead code via
unifying cluster-based readahead.

Resent based on mmotm-2018-02-06-16-41.

Minchan Kim (2):
  mm: swap: clean up swap readahead
  mm: swap: unify cluster-based and vma-based swap readahead

 include/linux/swap.h |  38 +++---------
 mm/memory.c          |  33 +++--------
 mm/shmem.c           |   5 +-
 mm/swap_state.c      | 137 +++++++++++++++++++++++++++----------------
 4 files changed, 105 insertions(+), 108 deletions(-)

-- 
2.16.1.291.g4437f3f132-goog

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH RESEND 0/2] swap readahead clean up
@ 2018-02-20  8:52 ` minchan
  0 siblings, 0 replies; 16+ messages in thread
From: minchan @ 2018-02-20  8:52 UTC (permalink / raw)
  To: Andrew Morton; +Cc: lkml, linux-mm, Minchan Kim

From: Minchan Kim <minchan@kernel.org>

This patchset cleans up recent added vma-based readahead code via
unifying cluster-based readahead.

Resent based on mmotm-2018-02-06-16-41.

Minchan Kim (2):
  mm: swap: clean up swap readahead
  mm: swap: unify cluster-based and vma-based swap readahead

 include/linux/swap.h |  38 +++---------
 mm/memory.c          |  33 +++--------
 mm/shmem.c           |   5 +-
 mm/swap_state.c      | 137 +++++++++++++++++++++++++++----------------
 4 files changed, 105 insertions(+), 108 deletions(-)

-- 
2.16.1.291.g4437f3f132-goog

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 16+ messages in thread

* [PATCH RESEND 1/2] mm: swap: clean up swap readahead
  2018-02-20  8:52 ` minchan
@ 2018-02-20  8:52   ` minchan
  -1 siblings, 0 replies; 16+ messages in thread
From: minchan @ 2018-02-20  8:52 UTC (permalink / raw)
  To: Andrew Morton; +Cc: lkml, linux-mm, Minchan Kim, Hugh Dickins, Huang Ying

From: Minchan Kim <minchan@kernel.org>

When I see recent change of swap readahead, I am very unhappy about
current code structure which diverges two swap readahead algorithm in
do_swap_page.  This patch is to clean it up.

Main motivation is that fault handler doesn't need to be aware of
readahead algorithms but just should call swapin_readahead.

As first step, this patch cleans up a little bit but not perfect (I just
separate for review easier) so next patch will make the goal complete.

Link: http://lkml.kernel.org/r/1509520520-32367-2-git-send-email-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Huang Ying <ying.huang@intel.com>
Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 include/linux/swap.h | 17 ++-------
 mm/memory.c          | 26 +++----------
 mm/swap_state.c      | 89 ++++++++++++++++++++++++--------------------
 3 files changed, 57 insertions(+), 75 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index a1a3f4ed94ce..fa92177d863e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -424,12 +424,8 @@ extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
 			bool *new_page_allocated);
 extern struct page *swapin_readahead(swp_entry_t, gfp_t,
 			struct vm_area_struct *vma, unsigned long addr);
-
-extern struct page *swap_readahead_detect(struct vm_fault *vmf,
-					  struct vma_swap_readahead *swap_ra);
 extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
-					   struct vm_fault *vmf,
-					   struct vma_swap_readahead *swap_ra);
+					   struct vm_fault *vmf);
 
 /* linux/mm/swapfile.c */
 extern atomic_long_t nr_swap_pages;
@@ -548,15 +544,8 @@ static inline bool swap_use_vma_readahead(void)
 	return false;
 }
 
-static inline struct page *swap_readahead_detect(
-	struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
-{
-	return NULL;
-}
-
-static inline struct page *do_swap_page_readahead(
-	swp_entry_t fentry, gfp_t gfp_mask,
-	struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
+static inline struct page *do_swap_page_readahead(swp_entry_t fentry,
+				gfp_t gfp_mask, struct vm_fault *vmf)
 {
 	return NULL;
 }
diff --git a/mm/memory.c b/mm/memory.c
index 5ec6433d6a5c..5b6e29d927c8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2883,26 +2883,16 @@ EXPORT_SYMBOL(unmap_mapping_range);
 int do_swap_page(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct page *page = NULL, *swapcache = NULL;
+	struct page *page = NULL, *swapcache;
 	struct mem_cgroup *memcg;
-	struct vma_swap_readahead swap_ra;
 	swp_entry_t entry;
 	pte_t pte;
 	int locked;
 	int exclusive = 0;
 	int ret = 0;
-	bool vma_readahead = swap_use_vma_readahead();
 
-	if (vma_readahead) {
-		page = swap_readahead_detect(vmf, &swap_ra);
-		swapcache = page;
-	}
-
-	if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) {
-		if (page)
-			put_page(page);
+	if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
 		goto out;
-	}
 
 	entry = pte_to_swp_entry(vmf->orig_pte);
 	if (unlikely(non_swap_entry(entry))) {
@@ -2928,11 +2918,8 @@ int do_swap_page(struct vm_fault *vmf)
 
 
 	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
-	if (!page) {
-		page = lookup_swap_cache(entry, vma_readahead ? vma : NULL,
-					 vmf->address);
-		swapcache = page;
-	}
+	page = lookup_swap_cache(entry, vma, vmf->address);
+	swapcache = page;
 
 	if (!page) {
 		struct swap_info_struct *si = swp_swap_info(entry);
@@ -2949,9 +2936,9 @@ int do_swap_page(struct vm_fault *vmf)
 				swap_readpage(page, true);
 			}
 		} else {
-			if (vma_readahead)
+			if (swap_use_vma_readahead())
 				page = do_swap_page_readahead(entry,
-					GFP_HIGHUSER_MOVABLE, vmf, &swap_ra);
+					GFP_HIGHUSER_MOVABLE, vmf);
 			else
 				page = swapin_readahead(entry,
 				       GFP_HIGHUSER_MOVABLE, vma, vmf->address);
@@ -2982,7 +2969,6 @@ int do_swap_page(struct vm_fault *vmf)
 		 */
 		ret = VM_FAULT_HWPOISON;
 		delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
-		swapcache = page;
 		goto out_release;
 	}
 
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 39ae7cfad90f..c56cce64b2c3 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -332,32 +332,38 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
 			       unsigned long addr)
 {
 	struct page *page;
-	unsigned long ra_info;
-	int win, hits, readahead;
 
 	page = find_get_page(swap_address_space(entry), swp_offset(entry));
 
 	INC_CACHE_INFO(find_total);
 	if (page) {
+		bool vma_ra = swap_use_vma_readahead();
+		bool readahead = TestClearPageReadahead(page);
+
 		INC_CACHE_INFO(find_success);
 		if (unlikely(PageTransCompound(page)))
 			return page;
-		readahead = TestClearPageReadahead(page);
-		if (vma) {
-			ra_info = GET_SWAP_RA_VAL(vma);
-			win = SWAP_RA_WIN(ra_info);
-			hits = SWAP_RA_HITS(ra_info);
+
+		if (vma && vma_ra) {
+			unsigned long ra_val;
+			int win, hits;
+
+			ra_val = GET_SWAP_RA_VAL(vma);
+			win = SWAP_RA_WIN(ra_val);
+			hits = SWAP_RA_HITS(ra_val);
 			if (readahead)
 				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
 			atomic_long_set(&vma->swap_readahead_info,
 					SWAP_RA_VAL(addr, win, hits));
 		}
+
 		if (readahead) {
 			count_vm_event(SWAP_RA_HIT);
-			if (!vma)
+			if (!vma || !vma_ra)
 				atomic_inc(&swapin_readahead_hits);
 		}
 	}
+
 	return page;
 }
 
@@ -649,16 +655,15 @@ static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
 		    PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
 }
 
-struct page *swap_readahead_detect(struct vm_fault *vmf,
-				   struct vma_swap_readahead *swap_ra)
+static void swap_ra_info(struct vm_fault *vmf,
+			struct vma_swap_readahead *ra_info)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	unsigned long swap_ra_info;
-	struct page *page;
+	unsigned long ra_val;
 	swp_entry_t entry;
 	unsigned long faddr, pfn, fpfn;
 	unsigned long start, end;
-	pte_t *pte;
+	pte_t *pte, *orig_pte;
 	unsigned int max_win, hits, prev_win, win, left;
 #ifndef CONFIG_64BIT
 	pte_t *tpte;
@@ -667,30 +672,32 @@ struct page *swap_readahead_detect(struct vm_fault *vmf,
 	max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
 			     SWAP_RA_ORDER_CEILING);
 	if (max_win == 1) {
-		swap_ra->win = 1;
-		return NULL;
+		ra_info->win = 1;
+		return;
 	}
 
 	faddr = vmf->address;
-	entry = pte_to_swp_entry(vmf->orig_pte);
-	if ((unlikely(non_swap_entry(entry))))
-		return NULL;
-	page = lookup_swap_cache(entry, vma, faddr);
-	if (page)
-		return page;
+	orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
+	entry = pte_to_swp_entry(*pte);
+	if ((unlikely(non_swap_entry(entry)))) {
+		pte_unmap(orig_pte);
+		return;
+	}
 
 	fpfn = PFN_DOWN(faddr);
-	swap_ra_info = GET_SWAP_RA_VAL(vma);
-	pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info));
-	prev_win = SWAP_RA_WIN(swap_ra_info);
-	hits = SWAP_RA_HITS(swap_ra_info);
-	swap_ra->win = win = __swapin_nr_pages(pfn, fpfn, hits,
+	ra_val = GET_SWAP_RA_VAL(vma);
+	pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
+	prev_win = SWAP_RA_WIN(ra_val);
+	hits = SWAP_RA_HITS(ra_val);
+	ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
 					       max_win, prev_win);
 	atomic_long_set(&vma->swap_readahead_info,
 			SWAP_RA_VAL(faddr, win, 0));
 
-	if (win == 1)
-		return NULL;
+	if (win == 1) {
+		pte_unmap(orig_pte);
+		return;
+	}
 
 	/* Copy the PTEs because the page table may be unmapped */
 	if (fpfn == pfn + 1)
@@ -703,23 +710,21 @@ struct page *swap_readahead_detect(struct vm_fault *vmf,
 		swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
 				  &start, &end);
 	}
-	swap_ra->nr_pte = end - start;
-	swap_ra->offset = fpfn - start;
-	pte = vmf->pte - swap_ra->offset;
+	ra_info->nr_pte = end - start;
+	ra_info->offset = fpfn - start;
+	pte -= ra_info->offset;
 #ifdef CONFIG_64BIT
-	swap_ra->ptes = pte;
+	ra_info->ptes = pte;
 #else
-	tpte = swap_ra->ptes;
+	tpte = ra_info->ptes;
 	for (pfn = start; pfn != end; pfn++)
 		*tpte++ = *pte++;
 #endif
-
-	return NULL;
+	pte_unmap(orig_pte);
 }
 
 struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
-				    struct vm_fault *vmf,
-				    struct vma_swap_readahead *swap_ra)
+				    struct vm_fault *vmf)
 {
 	struct blk_plug plug;
 	struct vm_area_struct *vma = vmf->vma;
@@ -728,12 +733,14 @@ struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
 	swp_entry_t entry;
 	unsigned int i;
 	bool page_allocated;
+	struct vma_swap_readahead ra_info = {0,};
 
-	if (swap_ra->win == 1)
+	swap_ra_info(vmf, &ra_info);
+	if (ra_info.win == 1)
 		goto skip;
 
 	blk_start_plug(&plug);
-	for (i = 0, pte = swap_ra->ptes; i < swap_ra->nr_pte;
+	for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
 	     i++, pte++) {
 		pentry = *pte;
 		if (pte_none(pentry))
@@ -749,7 +756,7 @@ struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
 			continue;
 		if (page_allocated) {
 			swap_readpage(page, false);
-			if (i != swap_ra->offset &&
+			if (i != ra_info.offset &&
 			    likely(!PageTransCompound(page))) {
 				SetPageReadahead(page);
 				count_vm_event(SWAP_RA);
@@ -761,7 +768,7 @@ struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
 	lru_add_drain();
 skip:
 	return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
-				     swap_ra->win == 1);
+				     ra_info.win == 1);
 }
 
 #ifdef CONFIG_SYSFS
-- 
2.16.1.291.g4437f3f132-goog

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH RESEND 1/2] mm: swap: clean up swap readahead
@ 2018-02-20  8:52   ` minchan
  0 siblings, 0 replies; 16+ messages in thread
From: minchan @ 2018-02-20  8:52 UTC (permalink / raw)
  To: Andrew Morton; +Cc: lkml, linux-mm, Minchan Kim, Hugh Dickins, Huang Ying

From: Minchan Kim <minchan@kernel.org>

When I see recent change of swap readahead, I am very unhappy about
current code structure which diverges two swap readahead algorithm in
do_swap_page.  This patch is to clean it up.

Main motivation is that fault handler doesn't need to be aware of
readahead algorithms but just should call swapin_readahead.

As first step, this patch cleans up a little bit but not perfect (I just
separate for review easier) so next patch will make the goal complete.

Link: http://lkml.kernel.org/r/1509520520-32367-2-git-send-email-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Huang Ying <ying.huang@intel.com>
Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 include/linux/swap.h | 17 ++-------
 mm/memory.c          | 26 +++----------
 mm/swap_state.c      | 89 ++++++++++++++++++++++++--------------------
 3 files changed, 57 insertions(+), 75 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index a1a3f4ed94ce..fa92177d863e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -424,12 +424,8 @@ extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
 			bool *new_page_allocated);
 extern struct page *swapin_readahead(swp_entry_t, gfp_t,
 			struct vm_area_struct *vma, unsigned long addr);
-
-extern struct page *swap_readahead_detect(struct vm_fault *vmf,
-					  struct vma_swap_readahead *swap_ra);
 extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
-					   struct vm_fault *vmf,
-					   struct vma_swap_readahead *swap_ra);
+					   struct vm_fault *vmf);
 
 /* linux/mm/swapfile.c */
 extern atomic_long_t nr_swap_pages;
@@ -548,15 +544,8 @@ static inline bool swap_use_vma_readahead(void)
 	return false;
 }
 
-static inline struct page *swap_readahead_detect(
-	struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
-{
-	return NULL;
-}
-
-static inline struct page *do_swap_page_readahead(
-	swp_entry_t fentry, gfp_t gfp_mask,
-	struct vm_fault *vmf, struct vma_swap_readahead *swap_ra)
+static inline struct page *do_swap_page_readahead(swp_entry_t fentry,
+				gfp_t gfp_mask, struct vm_fault *vmf)
 {
 	return NULL;
 }
diff --git a/mm/memory.c b/mm/memory.c
index 5ec6433d6a5c..5b6e29d927c8 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2883,26 +2883,16 @@ EXPORT_SYMBOL(unmap_mapping_range);
 int do_swap_page(struct vm_fault *vmf)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	struct page *page = NULL, *swapcache = NULL;
+	struct page *page = NULL, *swapcache;
 	struct mem_cgroup *memcg;
-	struct vma_swap_readahead swap_ra;
 	swp_entry_t entry;
 	pte_t pte;
 	int locked;
 	int exclusive = 0;
 	int ret = 0;
-	bool vma_readahead = swap_use_vma_readahead();
 
-	if (vma_readahead) {
-		page = swap_readahead_detect(vmf, &swap_ra);
-		swapcache = page;
-	}
-
-	if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte)) {
-		if (page)
-			put_page(page);
+	if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
 		goto out;
-	}
 
 	entry = pte_to_swp_entry(vmf->orig_pte);
 	if (unlikely(non_swap_entry(entry))) {
@@ -2928,11 +2918,8 @@ int do_swap_page(struct vm_fault *vmf)
 
 
 	delayacct_set_flag(DELAYACCT_PF_SWAPIN);
-	if (!page) {
-		page = lookup_swap_cache(entry, vma_readahead ? vma : NULL,
-					 vmf->address);
-		swapcache = page;
-	}
+	page = lookup_swap_cache(entry, vma, vmf->address);
+	swapcache = page;
 
 	if (!page) {
 		struct swap_info_struct *si = swp_swap_info(entry);
@@ -2949,9 +2936,9 @@ int do_swap_page(struct vm_fault *vmf)
 				swap_readpage(page, true);
 			}
 		} else {
-			if (vma_readahead)
+			if (swap_use_vma_readahead())
 				page = do_swap_page_readahead(entry,
-					GFP_HIGHUSER_MOVABLE, vmf, &swap_ra);
+					GFP_HIGHUSER_MOVABLE, vmf);
 			else
 				page = swapin_readahead(entry,
 				       GFP_HIGHUSER_MOVABLE, vma, vmf->address);
@@ -2982,7 +2969,6 @@ int do_swap_page(struct vm_fault *vmf)
 		 */
 		ret = VM_FAULT_HWPOISON;
 		delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
-		swapcache = page;
 		goto out_release;
 	}
 
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 39ae7cfad90f..c56cce64b2c3 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -332,32 +332,38 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
 			       unsigned long addr)
 {
 	struct page *page;
-	unsigned long ra_info;
-	int win, hits, readahead;
 
 	page = find_get_page(swap_address_space(entry), swp_offset(entry));
 
 	INC_CACHE_INFO(find_total);
 	if (page) {
+		bool vma_ra = swap_use_vma_readahead();
+		bool readahead = TestClearPageReadahead(page);
+
 		INC_CACHE_INFO(find_success);
 		if (unlikely(PageTransCompound(page)))
 			return page;
-		readahead = TestClearPageReadahead(page);
-		if (vma) {
-			ra_info = GET_SWAP_RA_VAL(vma);
-			win = SWAP_RA_WIN(ra_info);
-			hits = SWAP_RA_HITS(ra_info);
+
+		if (vma && vma_ra) {
+			unsigned long ra_val;
+			int win, hits;
+
+			ra_val = GET_SWAP_RA_VAL(vma);
+			win = SWAP_RA_WIN(ra_val);
+			hits = SWAP_RA_HITS(ra_val);
 			if (readahead)
 				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
 			atomic_long_set(&vma->swap_readahead_info,
 					SWAP_RA_VAL(addr, win, hits));
 		}
+
 		if (readahead) {
 			count_vm_event(SWAP_RA_HIT);
-			if (!vma)
+			if (!vma || !vma_ra)
 				atomic_inc(&swapin_readahead_hits);
 		}
 	}
+
 	return page;
 }
 
@@ -649,16 +655,15 @@ static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
 		    PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
 }
 
-struct page *swap_readahead_detect(struct vm_fault *vmf,
-				   struct vma_swap_readahead *swap_ra)
+static void swap_ra_info(struct vm_fault *vmf,
+			struct vma_swap_readahead *ra_info)
 {
 	struct vm_area_struct *vma = vmf->vma;
-	unsigned long swap_ra_info;
-	struct page *page;
+	unsigned long ra_val;
 	swp_entry_t entry;
 	unsigned long faddr, pfn, fpfn;
 	unsigned long start, end;
-	pte_t *pte;
+	pte_t *pte, *orig_pte;
 	unsigned int max_win, hits, prev_win, win, left;
 #ifndef CONFIG_64BIT
 	pte_t *tpte;
@@ -667,30 +672,32 @@ struct page *swap_readahead_detect(struct vm_fault *vmf,
 	max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
 			     SWAP_RA_ORDER_CEILING);
 	if (max_win == 1) {
-		swap_ra->win = 1;
-		return NULL;
+		ra_info->win = 1;
+		return;
 	}
 
 	faddr = vmf->address;
-	entry = pte_to_swp_entry(vmf->orig_pte);
-	if ((unlikely(non_swap_entry(entry))))
-		return NULL;
-	page = lookup_swap_cache(entry, vma, faddr);
-	if (page)
-		return page;
+	orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
+	entry = pte_to_swp_entry(*pte);
+	if ((unlikely(non_swap_entry(entry)))) {
+		pte_unmap(orig_pte);
+		return;
+	}
 
 	fpfn = PFN_DOWN(faddr);
-	swap_ra_info = GET_SWAP_RA_VAL(vma);
-	pfn = PFN_DOWN(SWAP_RA_ADDR(swap_ra_info));
-	prev_win = SWAP_RA_WIN(swap_ra_info);
-	hits = SWAP_RA_HITS(swap_ra_info);
-	swap_ra->win = win = __swapin_nr_pages(pfn, fpfn, hits,
+	ra_val = GET_SWAP_RA_VAL(vma);
+	pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
+	prev_win = SWAP_RA_WIN(ra_val);
+	hits = SWAP_RA_HITS(ra_val);
+	ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
 					       max_win, prev_win);
 	atomic_long_set(&vma->swap_readahead_info,
 			SWAP_RA_VAL(faddr, win, 0));
 
-	if (win == 1)
-		return NULL;
+	if (win == 1) {
+		pte_unmap(orig_pte);
+		return;
+	}
 
 	/* Copy the PTEs because the page table may be unmapped */
 	if (fpfn == pfn + 1)
@@ -703,23 +710,21 @@ struct page *swap_readahead_detect(struct vm_fault *vmf,
 		swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
 				  &start, &end);
 	}
-	swap_ra->nr_pte = end - start;
-	swap_ra->offset = fpfn - start;
-	pte = vmf->pte - swap_ra->offset;
+	ra_info->nr_pte = end - start;
+	ra_info->offset = fpfn - start;
+	pte -= ra_info->offset;
 #ifdef CONFIG_64BIT
-	swap_ra->ptes = pte;
+	ra_info->ptes = pte;
 #else
-	tpte = swap_ra->ptes;
+	tpte = ra_info->ptes;
 	for (pfn = start; pfn != end; pfn++)
 		*tpte++ = *pte++;
 #endif
-
-	return NULL;
+	pte_unmap(orig_pte);
 }
 
 struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
-				    struct vm_fault *vmf,
-				    struct vma_swap_readahead *swap_ra)
+				    struct vm_fault *vmf)
 {
 	struct blk_plug plug;
 	struct vm_area_struct *vma = vmf->vma;
@@ -728,12 +733,14 @@ struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
 	swp_entry_t entry;
 	unsigned int i;
 	bool page_allocated;
+	struct vma_swap_readahead ra_info = {0,};
 
-	if (swap_ra->win == 1)
+	swap_ra_info(vmf, &ra_info);
+	if (ra_info.win == 1)
 		goto skip;
 
 	blk_start_plug(&plug);
-	for (i = 0, pte = swap_ra->ptes; i < swap_ra->nr_pte;
+	for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
 	     i++, pte++) {
 		pentry = *pte;
 		if (pte_none(pentry))
@@ -749,7 +756,7 @@ struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
 			continue;
 		if (page_allocated) {
 			swap_readpage(page, false);
-			if (i != swap_ra->offset &&
+			if (i != ra_info.offset &&
 			    likely(!PageTransCompound(page))) {
 				SetPageReadahead(page);
 				count_vm_event(SWAP_RA);
@@ -761,7 +768,7 @@ struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
 	lru_add_drain();
 skip:
 	return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
-				     swap_ra->win == 1);
+				     ra_info.win == 1);
 }
 
 #ifdef CONFIG_SYSFS
-- 
2.16.1.291.g4437f3f132-goog

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH RESEND 2/2] mm: swap: unify cluster-based and vma-based swap readahead
  2018-02-20  8:52 ` minchan
@ 2018-02-20  8:52   ` minchan
  -1 siblings, 0 replies; 16+ messages in thread
From: minchan @ 2018-02-20  8:52 UTC (permalink / raw)
  To: Andrew Morton; +Cc: lkml, linux-mm, Minchan Kim, Hugh Dickins, Huang Ying

From: Minchan Kim <minchan@kernel.org>

This patch makes do_swap_page() not need to be aware of two different swap
readahead algorithms.  Just unify cluster-based and vma-based readahead
function call.

Link: http://lkml.kernel.org/r/1509520520-32367-3-git-send-email-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Huang Ying <ying.huang@intel.com>
Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 include/linux/swap.h | 27 ++++++++-----------------
 mm/memory.c          | 11 ++++------
 mm/shmem.c           |  5 ++++-
 mm/swap_state.c      | 48 ++++++++++++++++++++++++++++++++++----------
 4 files changed, 53 insertions(+), 38 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index fa92177d863e..2417d288e016 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -400,7 +400,6 @@ int generic_swapfile_activate(struct swap_info_struct *, struct file *,
 #define SWAP_ADDRESS_SPACE_SHIFT	14
 #define SWAP_ADDRESS_SPACE_PAGES	(1 << SWAP_ADDRESS_SPACE_SHIFT)
 extern struct address_space *swapper_spaces[];
-extern bool swap_vma_readahead;
 #define swap_address_space(entry)			    \
 	(&swapper_spaces[swp_type(entry)][swp_offset(entry) \
 		>> SWAP_ADDRESS_SPACE_SHIFT])
@@ -422,10 +421,10 @@ extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
 extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
 			struct vm_area_struct *vma, unsigned long addr,
 			bool *new_page_allocated);
-extern struct page *swapin_readahead(swp_entry_t, gfp_t,
-			struct vm_area_struct *vma, unsigned long addr);
-extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
-					   struct vm_fault *vmf);
+extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
+				struct vm_fault *vmf);
+extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
+				struct vm_fault *vmf);
 
 /* linux/mm/swapfile.c */
 extern atomic_long_t nr_swap_pages;
@@ -433,11 +432,6 @@ extern long total_swap_pages;
 extern atomic_t nr_rotate_swap;
 extern bool has_usable_swap(void);
 
-static inline bool swap_use_vma_readahead(void)
-{
-	return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap);
-}
-
 /* Swap 50% full? Release swapcache more aggressively.. */
 static inline bool vm_swap_full(void)
 {
@@ -533,19 +527,14 @@ static inline void put_swap_page(struct page *page, swp_entry_t swp)
 {
 }
 
-static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
-			struct vm_area_struct *vma, unsigned long addr)
+static inline struct page *swap_cluster_readahead(swp_entry_t entry,
+				gfp_t gfp_mask, struct vm_fault *vmf)
 {
 	return NULL;
 }
 
-static inline bool swap_use_vma_readahead(void)
-{
-	return false;
-}
-
-static inline struct page *do_swap_page_readahead(swp_entry_t fentry,
-				gfp_t gfp_mask, struct vm_fault *vmf)
+static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
+			struct vm_fault *vmf)
 {
 	return NULL;
 }
diff --git a/mm/memory.c b/mm/memory.c
index 5b6e29d927c8..394078c97a98 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2927,7 +2927,8 @@ int do_swap_page(struct vm_fault *vmf)
 		if (si->flags & SWP_SYNCHRONOUS_IO &&
 				__swap_count(si, entry) == 1) {
 			/* skip swapcache */
-			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
+			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
+							vmf->address);
 			if (page) {
 				__SetPageLocked(page);
 				__SetPageSwapBacked(page);
@@ -2936,12 +2937,8 @@ int do_swap_page(struct vm_fault *vmf)
 				swap_readpage(page, true);
 			}
 		} else {
-			if (swap_use_vma_readahead())
-				page = do_swap_page_readahead(entry,
-					GFP_HIGHUSER_MOVABLE, vmf);
-			else
-				page = swapin_readahead(entry,
-				       GFP_HIGHUSER_MOVABLE, vma, vmf->address);
+			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
+						vmf);
 			swapcache = page;
 		}
 
diff --git a/mm/shmem.c b/mm/shmem.c
index 1907688b75ee..e493c5095b5f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1413,9 +1413,12 @@ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
 {
 	struct vm_area_struct pvma;
 	struct page *page;
+	struct vm_fault vmf;
 
 	shmem_pseudo_vma_init(&pvma, info, index);
-	page = swapin_readahead(swap, gfp, &pvma, 0);
+	vmf.vma = &pvma;
+	vmf.address = 0;
+	page = swap_cluster_readahead(swap, gfp, &vmf);
 	shmem_pseudo_vma_destroy(&pvma);
 
 	return page;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index c56cce64b2c3..0b8ae361981f 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -38,7 +38,7 @@ static const struct address_space_operations swap_aops = {
 
 struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
 static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
-bool swap_vma_readahead __read_mostly = true;
+bool enable_vma_readahead __read_mostly = true;
 
 #define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)
 #define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)
@@ -322,6 +322,11 @@ void free_pages_and_swap_cache(struct page **pages, int nr)
 	release_pages(pagep, nr);
 }
 
+static inline bool swap_use_vma_readahead(void)
+{
+	return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
+}
+
 /*
  * Lookup a swap entry in the swap cache. A found page will be returned
  * unlocked and with its refcount incremented - we rely on the kernel
@@ -539,11 +544,10 @@ static unsigned long swapin_nr_pages(unsigned long offset)
 }
 
 /**
- * swapin_readahead - swap in pages in hope we need them soon
+ * swap_cluster_readahead - swap in pages in hope we need them soon
  * @entry: swap entry of this memory
  * @gfp_mask: memory allocation flags
- * @vma: user vma this address belongs to
- * @addr: target address for mempolicy
+ * @vmf: fault information
  *
  * Returns the struct page for entry and addr, after queueing swapin.
  *
@@ -555,10 +559,10 @@ static unsigned long swapin_nr_pages(unsigned long offset)
  * This has been extended to use the NUMA policies from the mm triggering
  * the readahead.
  *
- * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
+ * Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL.
  */
-struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
-			struct vm_area_struct *vma, unsigned long addr)
+struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
+				struct vm_fault *vmf)
 {
 	struct page *page;
 	unsigned long entry_offset = swp_offset(entry);
@@ -568,6 +572,8 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
 	struct swap_info_struct *si = swp_swap_info(entry);
 	struct blk_plug plug;
 	bool do_poll = true, page_allocated;
+	struct vm_area_struct *vma = vmf->vma;
+	unsigned long addr = vmf->address;
 
 	mask = swapin_nr_pages(offset) - 1;
 	if (!mask)
@@ -723,7 +729,7 @@ static void swap_ra_info(struct vm_fault *vmf,
 	pte_unmap(orig_pte);
 }
 
-struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
+struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
 				    struct vm_fault *vmf)
 {
 	struct blk_plug plug;
@@ -771,20 +777,40 @@ struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
 				     ra_info.win == 1);
 }
 
+/**
+ * swapin_readahead - swap in pages in hope we need them soon
+ * @entry: swap entry of this memory
+ * @gfp_mask: memory allocation flags
+ * @vmf: fault information
+ *
+ * Returns the struct page for entry and addr, after queueing swapin.
+ *
+ * It's a main entry function for swap readahead. By the configuration,
+ * it will read ahead blocks by cluster-based(ie, physical disk based)
+ * or vma-based(ie, virtual address based on faulty address) readahead.
+ */
+struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
+				struct vm_fault *vmf)
+{
+	return swap_use_vma_readahead() ?
+			swap_vma_readahead(entry, gfp_mask, vmf) :
+			swap_cluster_readahead(entry, gfp_mask, vmf);
+}
+
 #ifdef CONFIG_SYSFS
 static ssize_t vma_ra_enabled_show(struct kobject *kobj,
 				     struct kobj_attribute *attr, char *buf)
 {
-	return sprintf(buf, "%s\n", swap_vma_readahead ? "true" : "false");
+	return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false");
 }
 static ssize_t vma_ra_enabled_store(struct kobject *kobj,
 				      struct kobj_attribute *attr,
 				      const char *buf, size_t count)
 {
 	if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
-		swap_vma_readahead = true;
+		enable_vma_readahead = true;
 	else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
-		swap_vma_readahead = false;
+		enable_vma_readahead = false;
 	else
 		return -EINVAL;
 
-- 
2.16.1.291.g4437f3f132-goog

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [PATCH RESEND 2/2] mm: swap: unify cluster-based and vma-based swap readahead
@ 2018-02-20  8:52   ` minchan
  0 siblings, 0 replies; 16+ messages in thread
From: minchan @ 2018-02-20  8:52 UTC (permalink / raw)
  To: Andrew Morton; +Cc: lkml, linux-mm, Minchan Kim, Hugh Dickins, Huang Ying

From: Minchan Kim <minchan@kernel.org>

This patch makes do_swap_page() not need to be aware of two different swap
readahead algorithms.  Just unify cluster-based and vma-based readahead
function call.

Link: http://lkml.kernel.org/r/1509520520-32367-3-git-send-email-minchan@kernel.org
Signed-off-by: Minchan Kim <minchan@kernel.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Huang Ying <ying.huang@intel.com>
Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 include/linux/swap.h | 27 ++++++++-----------------
 mm/memory.c          | 11 ++++------
 mm/shmem.c           |  5 ++++-
 mm/swap_state.c      | 48 ++++++++++++++++++++++++++++++++++----------
 4 files changed, 53 insertions(+), 38 deletions(-)

diff --git a/include/linux/swap.h b/include/linux/swap.h
index fa92177d863e..2417d288e016 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -400,7 +400,6 @@ int generic_swapfile_activate(struct swap_info_struct *, struct file *,
 #define SWAP_ADDRESS_SPACE_SHIFT	14
 #define SWAP_ADDRESS_SPACE_PAGES	(1 << SWAP_ADDRESS_SPACE_SHIFT)
 extern struct address_space *swapper_spaces[];
-extern bool swap_vma_readahead;
 #define swap_address_space(entry)			    \
 	(&swapper_spaces[swp_type(entry)][swp_offset(entry) \
 		>> SWAP_ADDRESS_SPACE_SHIFT])
@@ -422,10 +421,10 @@ extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
 extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
 			struct vm_area_struct *vma, unsigned long addr,
 			bool *new_page_allocated);
-extern struct page *swapin_readahead(swp_entry_t, gfp_t,
-			struct vm_area_struct *vma, unsigned long addr);
-extern struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
-					   struct vm_fault *vmf);
+extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
+				struct vm_fault *vmf);
+extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
+				struct vm_fault *vmf);
 
 /* linux/mm/swapfile.c */
 extern atomic_long_t nr_swap_pages;
@@ -433,11 +432,6 @@ extern long total_swap_pages;
 extern atomic_t nr_rotate_swap;
 extern bool has_usable_swap(void);
 
-static inline bool swap_use_vma_readahead(void)
-{
-	return READ_ONCE(swap_vma_readahead) && !atomic_read(&nr_rotate_swap);
-}
-
 /* Swap 50% full? Release swapcache more aggressively.. */
 static inline bool vm_swap_full(void)
 {
@@ -533,19 +527,14 @@ static inline void put_swap_page(struct page *page, swp_entry_t swp)
 {
 }
 
-static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
-			struct vm_area_struct *vma, unsigned long addr)
+static inline struct page *swap_cluster_readahead(swp_entry_t entry,
+				gfp_t gfp_mask, struct vm_fault *vmf)
 {
 	return NULL;
 }
 
-static inline bool swap_use_vma_readahead(void)
-{
-	return false;
-}
-
-static inline struct page *do_swap_page_readahead(swp_entry_t fentry,
-				gfp_t gfp_mask, struct vm_fault *vmf)
+static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
+			struct vm_fault *vmf)
 {
 	return NULL;
 }
diff --git a/mm/memory.c b/mm/memory.c
index 5b6e29d927c8..394078c97a98 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2927,7 +2927,8 @@ int do_swap_page(struct vm_fault *vmf)
 		if (si->flags & SWP_SYNCHRONOUS_IO &&
 				__swap_count(si, entry) == 1) {
 			/* skip swapcache */
-			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address);
+			page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
+							vmf->address);
 			if (page) {
 				__SetPageLocked(page);
 				__SetPageSwapBacked(page);
@@ -2936,12 +2937,8 @@ int do_swap_page(struct vm_fault *vmf)
 				swap_readpage(page, true);
 			}
 		} else {
-			if (swap_use_vma_readahead())
-				page = do_swap_page_readahead(entry,
-					GFP_HIGHUSER_MOVABLE, vmf);
-			else
-				page = swapin_readahead(entry,
-				       GFP_HIGHUSER_MOVABLE, vma, vmf->address);
+			page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
+						vmf);
 			swapcache = page;
 		}
 
diff --git a/mm/shmem.c b/mm/shmem.c
index 1907688b75ee..e493c5095b5f 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1413,9 +1413,12 @@ static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp,
 {
 	struct vm_area_struct pvma;
 	struct page *page;
+	struct vm_fault vmf;
 
 	shmem_pseudo_vma_init(&pvma, info, index);
-	page = swapin_readahead(swap, gfp, &pvma, 0);
+	vmf.vma = &pvma;
+	vmf.address = 0;
+	page = swap_cluster_readahead(swap, gfp, &vmf);
 	shmem_pseudo_vma_destroy(&pvma);
 
 	return page;
diff --git a/mm/swap_state.c b/mm/swap_state.c
index c56cce64b2c3..0b8ae361981f 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -38,7 +38,7 @@ static const struct address_space_operations swap_aops = {
 
 struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
 static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
-bool swap_vma_readahead __read_mostly = true;
+bool enable_vma_readahead __read_mostly = true;
 
 #define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)
 #define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)
@@ -322,6 +322,11 @@ void free_pages_and_swap_cache(struct page **pages, int nr)
 	release_pages(pagep, nr);
 }
 
+static inline bool swap_use_vma_readahead(void)
+{
+	return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
+}
+
 /*
  * Lookup a swap entry in the swap cache. A found page will be returned
  * unlocked and with its refcount incremented - we rely on the kernel
@@ -539,11 +544,10 @@ static unsigned long swapin_nr_pages(unsigned long offset)
 }
 
 /**
- * swapin_readahead - swap in pages in hope we need them soon
+ * swap_cluster_readahead - swap in pages in hope we need them soon
  * @entry: swap entry of this memory
  * @gfp_mask: memory allocation flags
- * @vma: user vma this address belongs to
- * @addr: target address for mempolicy
+ * @vmf: fault information
  *
  * Returns the struct page for entry and addr, after queueing swapin.
  *
@@ -555,10 +559,10 @@ static unsigned long swapin_nr_pages(unsigned long offset)
  * This has been extended to use the NUMA policies from the mm triggering
  * the readahead.
  *
- * Caller must hold down_read on the vma->vm_mm if vma is not NULL.
+ * Caller must hold down_read on the vma->vm_mm if vmf->vma is not NULL.
  */
-struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
-			struct vm_area_struct *vma, unsigned long addr)
+struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
+				struct vm_fault *vmf)
 {
 	struct page *page;
 	unsigned long entry_offset = swp_offset(entry);
@@ -568,6 +572,8 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
 	struct swap_info_struct *si = swp_swap_info(entry);
 	struct blk_plug plug;
 	bool do_poll = true, page_allocated;
+	struct vm_area_struct *vma = vmf->vma;
+	unsigned long addr = vmf->address;
 
 	mask = swapin_nr_pages(offset) - 1;
 	if (!mask)
@@ -723,7 +729,7 @@ static void swap_ra_info(struct vm_fault *vmf,
 	pte_unmap(orig_pte);
 }
 
-struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
+struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
 				    struct vm_fault *vmf)
 {
 	struct blk_plug plug;
@@ -771,20 +777,40 @@ struct page *do_swap_page_readahead(swp_entry_t fentry, gfp_t gfp_mask,
 				     ra_info.win == 1);
 }
 
+/**
+ * swapin_readahead - swap in pages in hope we need them soon
+ * @entry: swap entry of this memory
+ * @gfp_mask: memory allocation flags
+ * @vmf: fault information
+ *
+ * Returns the struct page for entry and addr, after queueing swapin.
+ *
+ * It's a main entry function for swap readahead. By the configuration,
+ * it will read ahead blocks by cluster-based(ie, physical disk based)
+ * or vma-based(ie, virtual address based on faulty address) readahead.
+ */
+struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
+				struct vm_fault *vmf)
+{
+	return swap_use_vma_readahead() ?
+			swap_vma_readahead(entry, gfp_mask, vmf) :
+			swap_cluster_readahead(entry, gfp_mask, vmf);
+}
+
 #ifdef CONFIG_SYSFS
 static ssize_t vma_ra_enabled_show(struct kobject *kobj,
 				     struct kobj_attribute *attr, char *buf)
 {
-	return sprintf(buf, "%s\n", swap_vma_readahead ? "true" : "false");
+	return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false");
 }
 static ssize_t vma_ra_enabled_store(struct kobject *kobj,
 				      struct kobj_attribute *attr,
 				      const char *buf, size_t count)
 {
 	if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
-		swap_vma_readahead = true;
+		enable_vma_readahead = true;
 	else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
-		swap_vma_readahead = false;
+		enable_vma_readahead = false;
 	else
 		return -EINVAL;
 
-- 
2.16.1.291.g4437f3f132-goog

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH RESEND 1/2] mm: swap: clean up swap readahead
  2018-02-20  8:52   ` minchan
@ 2018-02-23  8:02     ` Huang, Ying
  -1 siblings, 0 replies; 16+ messages in thread
From: Huang, Ying @ 2018-02-23  8:02 UTC (permalink / raw)
  To: minchan; +Cc: Andrew Morton, lkml, linux-mm, Hugh Dickins

<minchan@kernel.org> writes:
[snip]

> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index 39ae7cfad90f..c56cce64b2c3 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -332,32 +332,38 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
>  			       unsigned long addr)
>  {
>  	struct page *page;
> -	unsigned long ra_info;
> -	int win, hits, readahead;
>  
>  	page = find_get_page(swap_address_space(entry), swp_offset(entry));
>  
>  	INC_CACHE_INFO(find_total);
>  	if (page) {
> +		bool vma_ra = swap_use_vma_readahead();
> +		bool readahead = TestClearPageReadahead(page);
> +

TestClearPageReadahead() cannot be called for compound page.  As in

PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)

>  		INC_CACHE_INFO(find_success);
>  		if (unlikely(PageTransCompound(page)))
>  			return page;
> -		readahead = TestClearPageReadahead(page);

So we can only call it here after checking whether page is compound.

Best Regards,
Huang, Ying

> -		if (vma) {
> -			ra_info = GET_SWAP_RA_VAL(vma);
> -			win = SWAP_RA_WIN(ra_info);
> -			hits = SWAP_RA_HITS(ra_info);
> +
> +		if (vma && vma_ra) {
> +			unsigned long ra_val;
> +			int win, hits;
> +
> +			ra_val = GET_SWAP_RA_VAL(vma);
> +			win = SWAP_RA_WIN(ra_val);
> +			hits = SWAP_RA_HITS(ra_val);
>  			if (readahead)
>  				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
>  			atomic_long_set(&vma->swap_readahead_info,
>  					SWAP_RA_VAL(addr, win, hits));
>  		}
> +
>  		if (readahead) {
>  			count_vm_event(SWAP_RA_HIT);
> -			if (!vma)
> +			if (!vma || !vma_ra)
>  				atomic_inc(&swapin_readahead_hits);
>  		}
>  	}
> +
>  	return page;
>  }
>

[snip]

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH RESEND 1/2] mm: swap: clean up swap readahead
@ 2018-02-23  8:02     ` Huang, Ying
  0 siblings, 0 replies; 16+ messages in thread
From: Huang, Ying @ 2018-02-23  8:02 UTC (permalink / raw)
  To: minchan; +Cc: Andrew Morton, lkml, linux-mm, Hugh Dickins

<minchan@kernel.org> writes:
[snip]

> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index 39ae7cfad90f..c56cce64b2c3 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -332,32 +332,38 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
>  			       unsigned long addr)
>  {
>  	struct page *page;
> -	unsigned long ra_info;
> -	int win, hits, readahead;
>  
>  	page = find_get_page(swap_address_space(entry), swp_offset(entry));
>  
>  	INC_CACHE_INFO(find_total);
>  	if (page) {
> +		bool vma_ra = swap_use_vma_readahead();
> +		bool readahead = TestClearPageReadahead(page);
> +

TestClearPageReadahead() cannot be called for compound page.  As in

PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)

>  		INC_CACHE_INFO(find_success);
>  		if (unlikely(PageTransCompound(page)))
>  			return page;
> -		readahead = TestClearPageReadahead(page);

So we can only call it here after checking whether page is compound.

Best Regards,
Huang, Ying

> -		if (vma) {
> -			ra_info = GET_SWAP_RA_VAL(vma);
> -			win = SWAP_RA_WIN(ra_info);
> -			hits = SWAP_RA_HITS(ra_info);
> +
> +		if (vma && vma_ra) {
> +			unsigned long ra_val;
> +			int win, hits;
> +
> +			ra_val = GET_SWAP_RA_VAL(vma);
> +			win = SWAP_RA_WIN(ra_val);
> +			hits = SWAP_RA_HITS(ra_val);
>  			if (readahead)
>  				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
>  			atomic_long_set(&vma->swap_readahead_info,
>  					SWAP_RA_VAL(addr, win, hits));
>  		}
> +
>  		if (readahead) {
>  			count_vm_event(SWAP_RA_HIT);
> -			if (!vma)
> +			if (!vma || !vma_ra)
>  				atomic_inc(&swapin_readahead_hits);
>  		}
>  	}
> +
>  	return page;
>  }
>

[snip]

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH RESEND 1/2] mm: swap: clean up swap readahead
  2018-02-23  8:02     ` Huang, Ying
@ 2018-02-26  4:56       ` Minchan Kim
  -1 siblings, 0 replies; 16+ messages in thread
From: Minchan Kim @ 2018-02-26  4:56 UTC (permalink / raw)
  To: Huang, Ying
  Cc: Andrew Morton, lkml, linux-mm, Hugh Dickins, Kirill A. Shutemov

On Fri, Feb 23, 2018 at 04:02:27PM +0800, Huang, Ying wrote:
> <minchan@kernel.org> writes:
> [snip]
> 
> > diff --git a/mm/swap_state.c b/mm/swap_state.c
> > index 39ae7cfad90f..c56cce64b2c3 100644
> > --- a/mm/swap_state.c
> > +++ b/mm/swap_state.c
> > @@ -332,32 +332,38 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
> >  			       unsigned long addr)
> >  {
> >  	struct page *page;
> > -	unsigned long ra_info;
> > -	int win, hits, readahead;
> >  
> >  	page = find_get_page(swap_address_space(entry), swp_offset(entry));
> >  
> >  	INC_CACHE_INFO(find_total);
> >  	if (page) {
> > +		bool vma_ra = swap_use_vma_readahead();
> > +		bool readahead = TestClearPageReadahead(page);
> > +
> 
> TestClearPageReadahead() cannot be called for compound page.  As in
> 
> PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
> 	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
> 
> >  		INC_CACHE_INFO(find_success);
> >  		if (unlikely(PageTransCompound(page)))
> >  			return page;
> > -		readahead = TestClearPageReadahead(page);
> 
> So we can only call it here after checking whether page is compound.

Hi Huang,

Thanks for cathing this.
However, I don't see the reason we should rule out THP page for
readahead marker. Could't we relax the rule?

I hope we can do so that we could remove PageTransCompound check
for readahead marker, which makes code ugly.

>From 748b084d5c3960ec2418d8c51a678aada30f1072 Mon Sep 17 00:00:00 2001
From: Minchan Kim <minchan@kernel.org>
Date: Mon, 26 Feb 2018 13:46:43 +0900
Subject: [PATCH] mm: relax policy for PG_readahead

This flag is in use for anon THP page so let's relax it.

Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Minchan Kim <minchan@kernel.org>
---
 include/linux/page-flags.h | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)

diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e34a27727b9a..f12d4dfae580 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -318,8 +318,8 @@ PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
 /* PG_readahead is only used for reads; PG_reclaim is only for writes */
 PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
 	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
-PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
-	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
+PAGEFLAG(Readahead, reclaim, PF_NO_TAIL)
+	TESTCLEARFLAG(Readahead, reclaim, PF_NO_TAIL)
 
 #ifdef CONFIG_HIGHMEM
 /*
-- 
2.16.1.291.g4437f3f132-goog

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH RESEND 1/2] mm: swap: clean up swap readahead
@ 2018-02-26  4:56       ` Minchan Kim
  0 siblings, 0 replies; 16+ messages in thread
From: Minchan Kim @ 2018-02-26  4:56 UTC (permalink / raw)
  To: Huang, Ying
  Cc: Andrew Morton, lkml, linux-mm, Hugh Dickins, Kirill A. Shutemov

On Fri, Feb 23, 2018 at 04:02:27PM +0800, Huang, Ying wrote:
> <minchan@kernel.org> writes:
> [snip]
> 
> > diff --git a/mm/swap_state.c b/mm/swap_state.c
> > index 39ae7cfad90f..c56cce64b2c3 100644
> > --- a/mm/swap_state.c
> > +++ b/mm/swap_state.c
> > @@ -332,32 +332,38 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
> >  			       unsigned long addr)
> >  {
> >  	struct page *page;
> > -	unsigned long ra_info;
> > -	int win, hits, readahead;
> >  
> >  	page = find_get_page(swap_address_space(entry), swp_offset(entry));
> >  
> >  	INC_CACHE_INFO(find_total);
> >  	if (page) {
> > +		bool vma_ra = swap_use_vma_readahead();
> > +		bool readahead = TestClearPageReadahead(page);
> > +
> 
> TestClearPageReadahead() cannot be called for compound page.  As in
> 
> PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
> 	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
> 
> >  		INC_CACHE_INFO(find_success);
> >  		if (unlikely(PageTransCompound(page)))
> >  			return page;
> > -		readahead = TestClearPageReadahead(page);
> 
> So we can only call it here after checking whether page is compound.

Hi Huang,

Thanks for cathing this.
However, I don't see the reason we should rule out THP page for
readahead marker. Could't we relax the rule?

I hope we can do so that we could remove PageTransCompound check
for readahead marker, which makes code ugly.

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH RESEND 1/2] mm: swap: clean up swap readahead
  2018-02-26  4:56       ` Minchan Kim
@ 2018-02-26  5:18         ` Huang, Ying
  -1 siblings, 0 replies; 16+ messages in thread
From: Huang, Ying @ 2018-02-26  5:18 UTC (permalink / raw)
  To: Minchan Kim
  Cc: Andrew Morton, lkml, linux-mm, Hugh Dickins, Kirill A. Shutemov

Minchan Kim <minchan@kernel.org> writes:

> On Fri, Feb 23, 2018 at 04:02:27PM +0800, Huang, Ying wrote:
>> <minchan@kernel.org> writes:
>> [snip]
>> 
>> > diff --git a/mm/swap_state.c b/mm/swap_state.c
>> > index 39ae7cfad90f..c56cce64b2c3 100644
>> > --- a/mm/swap_state.c
>> > +++ b/mm/swap_state.c
>> > @@ -332,32 +332,38 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
>> >  			       unsigned long addr)
>> >  {
>> >  	struct page *page;
>> > -	unsigned long ra_info;
>> > -	int win, hits, readahead;
>> >  
>> >  	page = find_get_page(swap_address_space(entry), swp_offset(entry));
>> >  
>> >  	INC_CACHE_INFO(find_total);
>> >  	if (page) {
>> > +		bool vma_ra = swap_use_vma_readahead();
>> > +		bool readahead = TestClearPageReadahead(page);
>> > +
>> 
>> TestClearPageReadahead() cannot be called for compound page.  As in
>> 
>> PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
>> 	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
>> 
>> >  		INC_CACHE_INFO(find_success);
>> >  		if (unlikely(PageTransCompound(page)))
>> >  			return page;
>> > -		readahead = TestClearPageReadahead(page);
>> 
>> So we can only call it here after checking whether page is compound.
>
> Hi Huang,
>
> Thanks for cathing this.
> However, I don't see the reason we should rule out THP page for
> readahead marker. Could't we relax the rule?
>
> I hope we can do so that we could remove PageTransCompound check
> for readahead marker, which makes code ugly.
>
> From 748b084d5c3960ec2418d8c51a678aada30f1072 Mon Sep 17 00:00:00 2001
> From: Minchan Kim <minchan@kernel.org>
> Date: Mon, 26 Feb 2018 13:46:43 +0900
> Subject: [PATCH] mm: relax policy for PG_readahead
>
> This flag is in use for anon THP page so let's relax it.
>
> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> Signed-off-by: Minchan Kim <minchan@kernel.org>
> ---
>  include/linux/page-flags.h | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> index e34a27727b9a..f12d4dfae580 100644
> --- a/include/linux/page-flags.h
> +++ b/include/linux/page-flags.h
> @@ -318,8 +318,8 @@ PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
>  /* PG_readahead is only used for reads; PG_reclaim is only for writes */
>  PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
>  	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
> -PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
> -	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
> +PAGEFLAG(Readahead, reclaim, PF_NO_TAIL)
> +	TESTCLEARFLAG(Readahead, reclaim, PF_NO_TAIL)
>  
>  #ifdef CONFIG_HIGHMEM
>  /*

We never set Readahead bit for THP in reality.  The original code acts
as document for this.  I don't think it is a good idea to change this
without a good reason.

Best Regards,
Huang, Ying

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH RESEND 1/2] mm: swap: clean up swap readahead
@ 2018-02-26  5:18         ` Huang, Ying
  0 siblings, 0 replies; 16+ messages in thread
From: Huang, Ying @ 2018-02-26  5:18 UTC (permalink / raw)
  To: Minchan Kim
  Cc: Andrew Morton, lkml, linux-mm, Hugh Dickins, Kirill A. Shutemov

Minchan Kim <minchan@kernel.org> writes:

> On Fri, Feb 23, 2018 at 04:02:27PM +0800, Huang, Ying wrote:
>> <minchan@kernel.org> writes:
>> [snip]
>> 
>> > diff --git a/mm/swap_state.c b/mm/swap_state.c
>> > index 39ae7cfad90f..c56cce64b2c3 100644
>> > --- a/mm/swap_state.c
>> > +++ b/mm/swap_state.c
>> > @@ -332,32 +332,38 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
>> >  			       unsigned long addr)
>> >  {
>> >  	struct page *page;
>> > -	unsigned long ra_info;
>> > -	int win, hits, readahead;
>> >  
>> >  	page = find_get_page(swap_address_space(entry), swp_offset(entry));
>> >  
>> >  	INC_CACHE_INFO(find_total);
>> >  	if (page) {
>> > +		bool vma_ra = swap_use_vma_readahead();
>> > +		bool readahead = TestClearPageReadahead(page);
>> > +
>> 
>> TestClearPageReadahead() cannot be called for compound page.  As in
>> 
>> PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
>> 	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
>> 
>> >  		INC_CACHE_INFO(find_success);
>> >  		if (unlikely(PageTransCompound(page)))
>> >  			return page;
>> > -		readahead = TestClearPageReadahead(page);
>> 
>> So we can only call it here after checking whether page is compound.
>
> Hi Huang,
>
> Thanks for cathing this.
> However, I don't see the reason we should rule out THP page for
> readahead marker. Could't we relax the rule?
>
> I hope we can do so that we could remove PageTransCompound check
> for readahead marker, which makes code ugly.
>
> From 748b084d5c3960ec2418d8c51a678aada30f1072 Mon Sep 17 00:00:00 2001
> From: Minchan Kim <minchan@kernel.org>
> Date: Mon, 26 Feb 2018 13:46:43 +0900
> Subject: [PATCH] mm: relax policy for PG_readahead
>
> This flag is in use for anon THP page so let's relax it.
>
> Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> Signed-off-by: Minchan Kim <minchan@kernel.org>
> ---
>  include/linux/page-flags.h | 4 ++--
>  1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> index e34a27727b9a..f12d4dfae580 100644
> --- a/include/linux/page-flags.h
> +++ b/include/linux/page-flags.h
> @@ -318,8 +318,8 @@ PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
>  /* PG_readahead is only used for reads; PG_reclaim is only for writes */
>  PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
>  	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
> -PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
> -	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
> +PAGEFLAG(Readahead, reclaim, PF_NO_TAIL)
> +	TESTCLEARFLAG(Readahead, reclaim, PF_NO_TAIL)
>  
>  #ifdef CONFIG_HIGHMEM
>  /*

We never set Readahead bit for THP in reality.  The original code acts
as document for this.  I don't think it is a good idea to change this
without a good reason.

Best Regards,
Huang, Ying

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH RESEND 1/2] mm: swap: clean up swap readahead
  2018-02-26  5:18         ` Huang, Ying
@ 2018-02-26  5:41           ` Minchan Kim
  -1 siblings, 0 replies; 16+ messages in thread
From: Minchan Kim @ 2018-02-26  5:41 UTC (permalink / raw)
  To: Huang, Ying
  Cc: Andrew Morton, lkml, linux-mm, Hugh Dickins, Kirill A. Shutemov

On Mon, Feb 26, 2018 at 01:18:50PM +0800, Huang, Ying wrote:
> Minchan Kim <minchan@kernel.org> writes:
> 
> > On Fri, Feb 23, 2018 at 04:02:27PM +0800, Huang, Ying wrote:
> >> <minchan@kernel.org> writes:
> >> [snip]
> >> 
> >> > diff --git a/mm/swap_state.c b/mm/swap_state.c
> >> > index 39ae7cfad90f..c56cce64b2c3 100644
> >> > --- a/mm/swap_state.c
> >> > +++ b/mm/swap_state.c
> >> > @@ -332,32 +332,38 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
> >> >  			       unsigned long addr)
> >> >  {
> >> >  	struct page *page;
> >> > -	unsigned long ra_info;
> >> > -	int win, hits, readahead;
> >> >  
> >> >  	page = find_get_page(swap_address_space(entry), swp_offset(entry));
> >> >  
> >> >  	INC_CACHE_INFO(find_total);
> >> >  	if (page) {
> >> > +		bool vma_ra = swap_use_vma_readahead();
> >> > +		bool readahead = TestClearPageReadahead(page);
> >> > +
> >> 
> >> TestClearPageReadahead() cannot be called for compound page.  As in
> >> 
> >> PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
> >> 	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
> >> 
> >> >  		INC_CACHE_INFO(find_success);
> >> >  		if (unlikely(PageTransCompound(page)))
> >> >  			return page;
> >> > -		readahead = TestClearPageReadahead(page);
> >> 
> >> So we can only call it here after checking whether page is compound.
> >
> > Hi Huang,
> >
> > Thanks for cathing this.
> > However, I don't see the reason we should rule out THP page for
> > readahead marker. Could't we relax the rule?
> >
> > I hope we can do so that we could remove PageTransCompound check
> > for readahead marker, which makes code ugly.
> >
> > From 748b084d5c3960ec2418d8c51a678aada30f1072 Mon Sep 17 00:00:00 2001
> > From: Minchan Kim <minchan@kernel.org>
> > Date: Mon, 26 Feb 2018 13:46:43 +0900
> > Subject: [PATCH] mm: relax policy for PG_readahead
> >
> > This flag is in use for anon THP page so let's relax it.
> >
> > Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> > Signed-off-by: Minchan Kim <minchan@kernel.org>
> > ---
> >  include/linux/page-flags.h | 4 ++--
> >  1 file changed, 2 insertions(+), 2 deletions(-)
> >
> > diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> > index e34a27727b9a..f12d4dfae580 100644
> > --- a/include/linux/page-flags.h
> > +++ b/include/linux/page-flags.h
> > @@ -318,8 +318,8 @@ PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
> >  /* PG_readahead is only used for reads; PG_reclaim is only for writes */
> >  PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
> >  	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
> > -PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
> > -	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
> > +PAGEFLAG(Readahead, reclaim, PF_NO_TAIL)
> > +	TESTCLEARFLAG(Readahead, reclaim, PF_NO_TAIL)
> >  
> >  #ifdef CONFIG_HIGHMEM
> >  /*
> 
> We never set Readahead bit for THP in reality.  The original code acts
> as document for this.  I don't think it is a good idea to change this
> without a good reason.

I don't like such divergence so that we don't need to care about whether
the page is THP or not. However, there is pointless to confuse ra stat
counters, too. How about this?

diff --git a/mm/swap_state.c b/mm/swap_state.c
index 8dde719e973c..e169d137d27c 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -348,12 +348,17 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
 	INC_CACHE_INFO(find_total);
 	if (page) {
 		bool vma_ra = swap_use_vma_readahead();
-		bool readahead = TestClearPageReadahead(page);
+		bool readahead;
 
 		INC_CACHE_INFO(find_success);
+		/*
+		 * At the moment, we doesn't support PG_readahead for anon THP
+		 * so let's bail out rather than confusing the readahead stat.
+		 */
 		if (unlikely(PageTransCompound(page)))
 			return page;
 
+		readahead = TestClearPageReadahead(page);
 		if (vma && vma_ra) {
 			unsigned long ra_val;
 			int win, hits;
@@ -608,8 +613,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
 			continue;
 		if (page_allocated) {
 			swap_readpage(page, false);
-			if (offset != entry_offset &&
-			    likely(!PageTransCompound(page))) {
+			if (offset != entry_offset) {
 				SetPageReadahead(page);
 				count_vm_event(SWAP_RA);
 			}
@@ -772,8 +776,7 @@ struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
 			continue;
 		if (page_allocated) {
 			swap_readpage(page, false);
-			if (i != ra_info.offset &&
-			    likely(!PageTransCompound(page))) {
+			if (i != ra_info.offset) {
 				SetPageReadahead(page);
 				count_vm_event(SWAP_RA);
 			}

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH RESEND 1/2] mm: swap: clean up swap readahead
@ 2018-02-26  5:41           ` Minchan Kim
  0 siblings, 0 replies; 16+ messages in thread
From: Minchan Kim @ 2018-02-26  5:41 UTC (permalink / raw)
  To: Huang, Ying
  Cc: Andrew Morton, lkml, linux-mm, Hugh Dickins, Kirill A. Shutemov

On Mon, Feb 26, 2018 at 01:18:50PM +0800, Huang, Ying wrote:
> Minchan Kim <minchan@kernel.org> writes:
> 
> > On Fri, Feb 23, 2018 at 04:02:27PM +0800, Huang, Ying wrote:
> >> <minchan@kernel.org> writes:
> >> [snip]
> >> 
> >> > diff --git a/mm/swap_state.c b/mm/swap_state.c
> >> > index 39ae7cfad90f..c56cce64b2c3 100644
> >> > --- a/mm/swap_state.c
> >> > +++ b/mm/swap_state.c
> >> > @@ -332,32 +332,38 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
> >> >  			       unsigned long addr)
> >> >  {
> >> >  	struct page *page;
> >> > -	unsigned long ra_info;
> >> > -	int win, hits, readahead;
> >> >  
> >> >  	page = find_get_page(swap_address_space(entry), swp_offset(entry));
> >> >  
> >> >  	INC_CACHE_INFO(find_total);
> >> >  	if (page) {
> >> > +		bool vma_ra = swap_use_vma_readahead();
> >> > +		bool readahead = TestClearPageReadahead(page);
> >> > +
> >> 
> >> TestClearPageReadahead() cannot be called for compound page.  As in
> >> 
> >> PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
> >> 	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
> >> 
> >> >  		INC_CACHE_INFO(find_success);
> >> >  		if (unlikely(PageTransCompound(page)))
> >> >  			return page;
> >> > -		readahead = TestClearPageReadahead(page);
> >> 
> >> So we can only call it here after checking whether page is compound.
> >
> > Hi Huang,
> >
> > Thanks for cathing this.
> > However, I don't see the reason we should rule out THP page for
> > readahead marker. Could't we relax the rule?
> >
> > I hope we can do so that we could remove PageTransCompound check
> > for readahead marker, which makes code ugly.
> >
> > From 748b084d5c3960ec2418d8c51a678aada30f1072 Mon Sep 17 00:00:00 2001
> > From: Minchan Kim <minchan@kernel.org>
> > Date: Mon, 26 Feb 2018 13:46:43 +0900
> > Subject: [PATCH] mm: relax policy for PG_readahead
> >
> > This flag is in use for anon THP page so let's relax it.
> >
> > Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
> > Signed-off-by: Minchan Kim <minchan@kernel.org>
> > ---
> >  include/linux/page-flags.h | 4 ++--
> >  1 file changed, 2 insertions(+), 2 deletions(-)
> >
> > diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
> > index e34a27727b9a..f12d4dfae580 100644
> > --- a/include/linux/page-flags.h
> > +++ b/include/linux/page-flags.h
> > @@ -318,8 +318,8 @@ PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
> >  /* PG_readahead is only used for reads; PG_reclaim is only for writes */
> >  PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
> >  	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
> > -PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
> > -	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
> > +PAGEFLAG(Readahead, reclaim, PF_NO_TAIL)
> > +	TESTCLEARFLAG(Readahead, reclaim, PF_NO_TAIL)
> >  
> >  #ifdef CONFIG_HIGHMEM
> >  /*
> 
> We never set Readahead bit for THP in reality.  The original code acts
> as document for this.  I don't think it is a good idea to change this
> without a good reason.

I don't like such divergence so that we don't need to care about whether
the page is THP or not. However, there is pointless to confuse ra stat
counters, too. How about this?

diff --git a/mm/swap_state.c b/mm/swap_state.c
index 8dde719e973c..e169d137d27c 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -348,12 +348,17 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
 	INC_CACHE_INFO(find_total);
 	if (page) {
 		bool vma_ra = swap_use_vma_readahead();
-		bool readahead = TestClearPageReadahead(page);
+		bool readahead;
 
 		INC_CACHE_INFO(find_success);
+		/*
+		 * At the moment, we doesn't support PG_readahead for anon THP
+		 * so let's bail out rather than confusing the readahead stat.
+		 */
 		if (unlikely(PageTransCompound(page)))
 			return page;
 
+		readahead = TestClearPageReadahead(page);
 		if (vma && vma_ra) {
 			unsigned long ra_val;
 			int win, hits;
@@ -608,8 +613,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
 			continue;
 		if (page_allocated) {
 			swap_readpage(page, false);
-			if (offset != entry_offset &&
-			    likely(!PageTransCompound(page))) {
+			if (offset != entry_offset) {
 				SetPageReadahead(page);
 				count_vm_event(SWAP_RA);
 			}
@@ -772,8 +776,7 @@ struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
 			continue;
 		if (page_allocated) {
 			swap_readpage(page, false);
-			if (i != ra_info.offset &&
-			    likely(!PageTransCompound(page))) {
+			if (i != ra_info.offset) {
 				SetPageReadahead(page);
 				count_vm_event(SWAP_RA);
 			}

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [PATCH RESEND 1/2] mm: swap: clean up swap readahead
  2018-02-26  5:41           ` Minchan Kim
@ 2018-02-26  8:22             ` Huang, Ying
  -1 siblings, 0 replies; 16+ messages in thread
From: Huang, Ying @ 2018-02-26  8:22 UTC (permalink / raw)
  To: Minchan Kim
  Cc: Andrew Morton, lkml, linux-mm, Hugh Dickins, Kirill A. Shutemov

Minchan Kim <minchan@kernel.org> writes:

> On Mon, Feb 26, 2018 at 01:18:50PM +0800, Huang, Ying wrote:
>> Minchan Kim <minchan@kernel.org> writes:
>> 
>> > On Fri, Feb 23, 2018 at 04:02:27PM +0800, Huang, Ying wrote:
>> >> <minchan@kernel.org> writes:
>> >> [snip]
>> >> 
>> >> > diff --git a/mm/swap_state.c b/mm/swap_state.c
>> >> > index 39ae7cfad90f..c56cce64b2c3 100644
>> >> > --- a/mm/swap_state.c
>> >> > +++ b/mm/swap_state.c
>> >> > @@ -332,32 +332,38 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
>> >> >  			       unsigned long addr)
>> >> >  {
>> >> >  	struct page *page;
>> >> > -	unsigned long ra_info;
>> >> > -	int win, hits, readahead;
>> >> >  
>> >> >  	page = find_get_page(swap_address_space(entry), swp_offset(entry));
>> >> >  
>> >> >  	INC_CACHE_INFO(find_total);
>> >> >  	if (page) {
>> >> > +		bool vma_ra = swap_use_vma_readahead();
>> >> > +		bool readahead = TestClearPageReadahead(page);
>> >> > +
>> >> 
>> >> TestClearPageReadahead() cannot be called for compound page.  As in
>> >> 
>> >> PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
>> >> 	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
>> >> 
>> >> >  		INC_CACHE_INFO(find_success);
>> >> >  		if (unlikely(PageTransCompound(page)))
>> >> >  			return page;
>> >> > -		readahead = TestClearPageReadahead(page);
>> >> 
>> >> So we can only call it here after checking whether page is compound.
>> >
>> > Hi Huang,
>> >
>> > Thanks for cathing this.
>> > However, I don't see the reason we should rule out THP page for
>> > readahead marker. Could't we relax the rule?
>> >
>> > I hope we can do so that we could remove PageTransCompound check
>> > for readahead marker, which makes code ugly.
>> >
>> > From 748b084d5c3960ec2418d8c51a678aada30f1072 Mon Sep 17 00:00:00 2001
>> > From: Minchan Kim <minchan@kernel.org>
>> > Date: Mon, 26 Feb 2018 13:46:43 +0900
>> > Subject: [PATCH] mm: relax policy for PG_readahead
>> >
>> > This flag is in use for anon THP page so let's relax it.
>> >
>> > Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
>> > Signed-off-by: Minchan Kim <minchan@kernel.org>
>> > ---
>> >  include/linux/page-flags.h | 4 ++--
>> >  1 file changed, 2 insertions(+), 2 deletions(-)
>> >
>> > diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
>> > index e34a27727b9a..f12d4dfae580 100644
>> > --- a/include/linux/page-flags.h
>> > +++ b/include/linux/page-flags.h
>> > @@ -318,8 +318,8 @@ PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
>> >  /* PG_readahead is only used for reads; PG_reclaim is only for writes */
>> >  PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
>> >  	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
>> > -PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
>> > -	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
>> > +PAGEFLAG(Readahead, reclaim, PF_NO_TAIL)
>> > +	TESTCLEARFLAG(Readahead, reclaim, PF_NO_TAIL)
>> >  
>> >  #ifdef CONFIG_HIGHMEM
>> >  /*
>> 
>> We never set Readahead bit for THP in reality.  The original code acts
>> as document for this.  I don't think it is a good idea to change this
>> without a good reason.
>
> I don't like such divergence so that we don't need to care about whether
> the page is THP or not. However, there is pointless to confuse ra stat
> counters, too. How about this?
>
> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index 8dde719e973c..e169d137d27c 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -348,12 +348,17 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
>  	INC_CACHE_INFO(find_total);
>  	if (page) {
>  		bool vma_ra = swap_use_vma_readahead();
> -		bool readahead = TestClearPageReadahead(page);
> +		bool readahead;
>  
>  		INC_CACHE_INFO(find_success);
> +		/*
> +		 * At the moment, we doesn't support PG_readahead for anon THP
> +		 * so let's bail out rather than confusing the readahead stat.
> +		 */
>  		if (unlikely(PageTransCompound(page)))
>  			return page;
>  
> +		readahead = TestClearPageReadahead(page);
>  		if (vma && vma_ra) {
>  			unsigned long ra_val;
>  			int win, hits;
> @@ -608,8 +613,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
>  			continue;
>  		if (page_allocated) {
>  			swap_readpage(page, false);
> -			if (offset != entry_offset &&
> -			    likely(!PageTransCompound(page))) {
> +			if (offset != entry_offset) {
>  				SetPageReadahead(page);
>  				count_vm_event(SWAP_RA);
>  			}
> @@ -772,8 +776,7 @@ struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
>  			continue;
>  		if (page_allocated) {
>  			swap_readpage(page, false);
> -			if (i != ra_info.offset &&
> -			    likely(!PageTransCompound(page))) {
> +			if (i != ra_info.offset) {
>  				SetPageReadahead(page);
>  				count_vm_event(SWAP_RA);
>  			}

This looks good for me.  Thanks!

Best Regards,
Huang, Ying

^ permalink raw reply	[flat|nested] 16+ messages in thread

* Re: [PATCH RESEND 1/2] mm: swap: clean up swap readahead
@ 2018-02-26  8:22             ` Huang, Ying
  0 siblings, 0 replies; 16+ messages in thread
From: Huang, Ying @ 2018-02-26  8:22 UTC (permalink / raw)
  To: Minchan Kim
  Cc: Andrew Morton, lkml, linux-mm, Hugh Dickins, Kirill A. Shutemov

Minchan Kim <minchan@kernel.org> writes:

> On Mon, Feb 26, 2018 at 01:18:50PM +0800, Huang, Ying wrote:
>> Minchan Kim <minchan@kernel.org> writes:
>> 
>> > On Fri, Feb 23, 2018 at 04:02:27PM +0800, Huang, Ying wrote:
>> >> <minchan@kernel.org> writes:
>> >> [snip]
>> >> 
>> >> > diff --git a/mm/swap_state.c b/mm/swap_state.c
>> >> > index 39ae7cfad90f..c56cce64b2c3 100644
>> >> > --- a/mm/swap_state.c
>> >> > +++ b/mm/swap_state.c
>> >> > @@ -332,32 +332,38 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
>> >> >  			       unsigned long addr)
>> >> >  {
>> >> >  	struct page *page;
>> >> > -	unsigned long ra_info;
>> >> > -	int win, hits, readahead;
>> >> >  
>> >> >  	page = find_get_page(swap_address_space(entry), swp_offset(entry));
>> >> >  
>> >> >  	INC_CACHE_INFO(find_total);
>> >> >  	if (page) {
>> >> > +		bool vma_ra = swap_use_vma_readahead();
>> >> > +		bool readahead = TestClearPageReadahead(page);
>> >> > +
>> >> 
>> >> TestClearPageReadahead() cannot be called for compound page.  As in
>> >> 
>> >> PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
>> >> 	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
>> >> 
>> >> >  		INC_CACHE_INFO(find_success);
>> >> >  		if (unlikely(PageTransCompound(page)))
>> >> >  			return page;
>> >> > -		readahead = TestClearPageReadahead(page);
>> >> 
>> >> So we can only call it here after checking whether page is compound.
>> >
>> > Hi Huang,
>> >
>> > Thanks for cathing this.
>> > However, I don't see the reason we should rule out THP page for
>> > readahead marker. Could't we relax the rule?
>> >
>> > I hope we can do so that we could remove PageTransCompound check
>> > for readahead marker, which makes code ugly.
>> >
>> > From 748b084d5c3960ec2418d8c51a678aada30f1072 Mon Sep 17 00:00:00 2001
>> > From: Minchan Kim <minchan@kernel.org>
>> > Date: Mon, 26 Feb 2018 13:46:43 +0900
>> > Subject: [PATCH] mm: relax policy for PG_readahead
>> >
>> > This flag is in use for anon THP page so let's relax it.
>> >
>> > Cc: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
>> > Signed-off-by: Minchan Kim <minchan@kernel.org>
>> > ---
>> >  include/linux/page-flags.h | 4 ++--
>> >  1 file changed, 2 insertions(+), 2 deletions(-)
>> >
>> > diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
>> > index e34a27727b9a..f12d4dfae580 100644
>> > --- a/include/linux/page-flags.h
>> > +++ b/include/linux/page-flags.h
>> > @@ -318,8 +318,8 @@ PAGEFLAG(MappedToDisk, mappedtodisk, PF_NO_TAIL)
>> >  /* PG_readahead is only used for reads; PG_reclaim is only for writes */
>> >  PAGEFLAG(Reclaim, reclaim, PF_NO_TAIL)
>> >  	TESTCLEARFLAG(Reclaim, reclaim, PF_NO_TAIL)
>> > -PAGEFLAG(Readahead, reclaim, PF_NO_COMPOUND)
>> > -	TESTCLEARFLAG(Readahead, reclaim, PF_NO_COMPOUND)
>> > +PAGEFLAG(Readahead, reclaim, PF_NO_TAIL)
>> > +	TESTCLEARFLAG(Readahead, reclaim, PF_NO_TAIL)
>> >  
>> >  #ifdef CONFIG_HIGHMEM
>> >  /*
>> 
>> We never set Readahead bit for THP in reality.  The original code acts
>> as document for this.  I don't think it is a good idea to change this
>> without a good reason.
>
> I don't like such divergence so that we don't need to care about whether
> the page is THP or not. However, there is pointless to confuse ra stat
> counters, too. How about this?
>
> diff --git a/mm/swap_state.c b/mm/swap_state.c
> index 8dde719e973c..e169d137d27c 100644
> --- a/mm/swap_state.c
> +++ b/mm/swap_state.c
> @@ -348,12 +348,17 @@ struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
>  	INC_CACHE_INFO(find_total);
>  	if (page) {
>  		bool vma_ra = swap_use_vma_readahead();
> -		bool readahead = TestClearPageReadahead(page);
> +		bool readahead;
>  
>  		INC_CACHE_INFO(find_success);
> +		/*
> +		 * At the moment, we doesn't support PG_readahead for anon THP
> +		 * so let's bail out rather than confusing the readahead stat.
> +		 */
>  		if (unlikely(PageTransCompound(page)))
>  			return page;
>  
> +		readahead = TestClearPageReadahead(page);
>  		if (vma && vma_ra) {
>  			unsigned long ra_val;
>  			int win, hits;
> @@ -608,8 +613,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
>  			continue;
>  		if (page_allocated) {
>  			swap_readpage(page, false);
> -			if (offset != entry_offset &&
> -			    likely(!PageTransCompound(page))) {
> +			if (offset != entry_offset) {
>  				SetPageReadahead(page);
>  				count_vm_event(SWAP_RA);
>  			}
> @@ -772,8 +776,7 @@ struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
>  			continue;
>  		if (page_allocated) {
>  			swap_readpage(page, false);
> -			if (i != ra_info.offset &&
> -			    likely(!PageTransCompound(page))) {
> +			if (i != ra_info.offset) {
>  				SetPageReadahead(page);
>  				count_vm_event(SWAP_RA);
>  			}

This looks good for me.  Thanks!

Best Regards,
Huang, Ying

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2018-02-26  8:22 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-02-20  8:52 [PATCH RESEND 0/2] swap readahead clean up minchan
2018-02-20  8:52 ` minchan
2018-02-20  8:52 ` [PATCH RESEND 1/2] mm: swap: clean up swap readahead minchan
2018-02-20  8:52   ` minchan
2018-02-23  8:02   ` Huang, Ying
2018-02-23  8:02     ` Huang, Ying
2018-02-26  4:56     ` Minchan Kim
2018-02-26  4:56       ` Minchan Kim
2018-02-26  5:18       ` Huang, Ying
2018-02-26  5:18         ` Huang, Ying
2018-02-26  5:41         ` Minchan Kim
2018-02-26  5:41           ` Minchan Kim
2018-02-26  8:22           ` Huang, Ying
2018-02-26  8:22             ` Huang, Ying
2018-02-20  8:52 ` [PATCH RESEND 2/2] mm: swap: unify cluster-based and vma-based " minchan
2018-02-20  8:52   ` minchan

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.