All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Hugh Dickins <hughd@google.com>,
	Andrea Arcangeli <aarcange@redhat.com>,
	Andrew Morton <akpm@linux-foundation.org>
Cc: Dave Hansen <dave.hansen@intel.com>,
	Vlastimil Babka <vbabka@suse.cz>,
	Christoph Lameter <cl@gentwo.org>,
	Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>,
	Jerome Marchand <jmarchan@redhat.com>,
	Yang Shi <yang.shi@linaro.org>,
	Sasha Levin <sasha.levin@oracle.com>,
	Andres Lagar-Cavilla <andreslc@google.com>,
	Ning Qu <quning@gmail.com>,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	linux-fsdevel@vger.kernel.org,
	Ebru Akagunduz <ebru.akagunduz@gmail.com>,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCHv9-rebased2 36/37] shmem: split huge pages beyond i_size under memory pressure
Date: Wed, 15 Jun 2016 23:06:41 +0300	[thread overview]
Message-ID: <1466021202-61880-37-git-send-email-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <1466021202-61880-1-git-send-email-kirill.shutemov@linux.intel.com>

Even if user asked to allocate huge pages always (huge=always), we
should be able to free up some memory by splitting pages which are
partly byound i_size if memory presure comes or once we hit limit on
filesystem size (-o size=).

In order to do this we maintain per-superblock list of inodes, which
potentially have huge pages on the border of file size.

Per-fs shrinker can reclaim memory by splitting such pages.

If we hit -ENOSPC during shmem_getpage_gfp(), we try to split a page to
free up space on the filesystem and retry allocation if it succeed.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 include/linux/shmem_fs.h |   6 +-
 mm/shmem.c               | 175 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 180 insertions(+), 1 deletion(-)

diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 54fa28dfbd89..ff078e7043b6 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -16,8 +16,9 @@ struct shmem_inode_info {
 	unsigned long		flags;
 	unsigned long		alloced;	/* data pages alloced to file */
 	unsigned long		swapped;	/* subtotal assigned to swap */
-	struct shared_policy	policy;		/* NUMA memory alloc policy */
+	struct list_head        shrinklist;     /* shrinkable hpage inodes */
 	struct list_head	swaplist;	/* chain of maybes on swap */
+	struct shared_policy	policy;		/* NUMA memory alloc policy */
 	struct simple_xattrs	xattrs;		/* list of xattrs */
 	struct inode		vfs_inode;
 };
@@ -33,6 +34,9 @@ struct shmem_sb_info {
 	kuid_t uid;		    /* Mount uid for root directory */
 	kgid_t gid;		    /* Mount gid for root directory */
 	struct mempolicy *mpol;     /* default memory policy for mappings */
+	spinlock_t shrinklist_lock;   /* Protects shrinklist */
+	struct list_head shrinklist;  /* List of shinkable inodes */
+	unsigned long shrinklist_len; /* Length of shrinklist */
 };
 
 static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
diff --git a/mm/shmem.c b/mm/shmem.c
index 6beeb98b3592..bfaa007ccb58 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -188,6 +188,7 @@ static const struct inode_operations shmem_inode_operations;
 static const struct inode_operations shmem_dir_inode_operations;
 static const struct inode_operations shmem_special_inode_operations;
 static const struct vm_operations_struct shmem_vm_ops;
+static struct file_system_type shmem_fs_type;
 
 static LIST_HEAD(shmem_swaplist);
 static DEFINE_MUTEX(shmem_swaplist_mutex);
@@ -406,10 +407,122 @@ static const char *shmem_format_huge(int huge)
 	}
 }
 
+static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
+		struct shrink_control *sc, unsigned long nr_to_split)
+{
+	LIST_HEAD(list), *pos, *next;
+	struct inode *inode;
+	struct shmem_inode_info *info;
+	struct page *page;
+	unsigned long batch = sc ? sc->nr_to_scan : 128;
+	int removed = 0, split = 0;
+
+	if (list_empty(&sbinfo->shrinklist))
+		return SHRINK_STOP;
+
+	spin_lock(&sbinfo->shrinklist_lock);
+	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
+		info = list_entry(pos, struct shmem_inode_info, shrinklist);
+
+		/* pin the inode */
+		inode = igrab(&info->vfs_inode);
+
+		/* inode is about to be evicted */
+		if (!inode) {
+			list_del_init(&info->shrinklist);
+			removed++;
+			goto next;
+		}
+
+		/* Check if there's anything to gain */
+		if (round_up(inode->i_size, PAGE_SIZE) ==
+				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
+			list_del_init(&info->shrinklist);
+			removed++;
+			iput(inode);
+			goto next;
+		}
+
+		list_move(&info->shrinklist, &list);
+next:
+		if (!--batch)
+			break;
+	}
+	spin_unlock(&sbinfo->shrinklist_lock);
+
+	list_for_each_safe(pos, next, &list) {
+		int ret;
+
+		info = list_entry(pos, struct shmem_inode_info, shrinklist);
+		inode = &info->vfs_inode;
+
+		if (nr_to_split && split >= nr_to_split) {
+			iput(inode);
+			continue;
+		}
+
+		page = find_lock_page(inode->i_mapping,
+				(inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
+		if (!page)
+			goto drop;
+
+		if (!PageTransHuge(page)) {
+			unlock_page(page);
+			put_page(page);
+			goto drop;
+		}
+
+		ret = split_huge_page(page);
+		unlock_page(page);
+		put_page(page);
+
+		if (ret) {
+			/* split failed: leave it on the list */
+			iput(inode);
+			continue;
+		}
+
+		split++;
+drop:
+		list_del_init(&info->shrinklist);
+		removed++;
+		iput(inode);
+	}
+
+	spin_lock(&sbinfo->shrinklist_lock);
+	list_splice_tail(&list, &sbinfo->shrinklist);
+	sbinfo->shrinklist_len -= removed;
+	spin_unlock(&sbinfo->shrinklist_lock);
+
+	return split;
+}
+
+static long shmem_unused_huge_scan(struct super_block *sb,
+		struct shrink_control *sc)
+{
+	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
+
+	if (!READ_ONCE(sbinfo->shrinklist_len))
+		return SHRINK_STOP;
+
+	return shmem_unused_huge_shrink(sbinfo, sc, 0);
+}
+
+static long shmem_unused_huge_count(struct super_block *sb,
+		struct shrink_control *sc)
+{
+	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
+	return READ_ONCE(sbinfo->shrinklist_len);
+}
 #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */
 
 #define shmem_huge SHMEM_HUGE_DENY
 
+static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
+		struct shrink_control *sc, unsigned long nr_to_split)
+{
+	return 0;
+}
 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
 
 /*
@@ -843,6 +956,7 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
 {
 	struct inode *inode = d_inode(dentry);
 	struct shmem_inode_info *info = SHMEM_I(inode);
+	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
 	int error;
 
 	error = inode_change_ok(inode, attr);
@@ -878,6 +992,20 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
 			if (oldsize > holebegin)
 				unmap_mapping_range(inode->i_mapping,
 							holebegin, 0, 1);
+
+			/*
+			 * Part of the huge page can be beyond i_size: subject
+			 * to shrink under memory pressure.
+			 */
+			if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
+				spin_lock(&sbinfo->shrinklist_lock);
+				if (list_empty(&info->shrinklist)) {
+					list_add_tail(&info->shrinklist,
+							&sbinfo->shrinklist);
+					sbinfo->shrinklist_len++;
+				}
+				spin_unlock(&sbinfo->shrinklist_lock);
+			}
 		}
 	}
 
@@ -890,11 +1018,20 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
 static void shmem_evict_inode(struct inode *inode)
 {
 	struct shmem_inode_info *info = SHMEM_I(inode);
+	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
 
 	if (inode->i_mapping->a_ops == &shmem_aops) {
 		shmem_unacct_size(info->flags, inode->i_size);
 		inode->i_size = 0;
 		shmem_truncate_range(inode, 0, (loff_t)-1);
+		if (!list_empty(&info->shrinklist)) {
+			spin_lock(&sbinfo->shrinklist_lock);
+			if (!list_empty(&info->shrinklist)) {
+				list_del_init(&info->shrinklist);
+				sbinfo->shrinklist_len--;
+			}
+			spin_unlock(&sbinfo->shrinklist_lock);
+		}
 		if (!list_empty(&info->swaplist)) {
 			mutex_lock(&shmem_swaplist_mutex);
 			list_del_init(&info->swaplist);
@@ -1563,8 +1700,23 @@ alloc_nohuge:		page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
 					index, false);
 		}
 		if (IS_ERR(page)) {
+			int retry = 5;
 			error = PTR_ERR(page);
 			page = NULL;
+			if (error != -ENOSPC)
+				goto failed;
+			/*
+			 * Try to reclaim some spece by splitting a huge page
+			 * beyond i_size on the filesystem.
+			 */
+			while (retry--) {
+				int ret;
+				ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
+				if (ret == SHRINK_STOP)
+					break;
+				if (ret)
+					goto alloc_nohuge;
+			}
 			goto failed;
 		}
 
@@ -1603,6 +1755,22 @@ alloc_nohuge:		page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
 		spin_unlock_irq(&info->lock);
 		alloced = true;
 
+		if (PageTransHuge(page) &&
+				DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
+				hindex + HPAGE_PMD_NR - 1) {
+			/*
+			 * Part of the huge page is beyond i_size: subject
+			 * to shrink under memory pressure.
+			 */
+			spin_lock(&sbinfo->shrinklist_lock);
+			if (list_empty(&info->shrinklist)) {
+				list_add_tail(&info->shrinklist,
+						&sbinfo->shrinklist);
+				sbinfo->shrinklist_len++;
+			}
+			spin_unlock(&sbinfo->shrinklist_lock);
+		}
+
 		/*
 		 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
 		 */
@@ -1920,6 +2088,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
 		spin_lock_init(&info->lock);
 		info->seals = F_SEAL_SEAL;
 		info->flags = flags & VM_NORESERVE;
+		INIT_LIST_HEAD(&info->shrinklist);
 		INIT_LIST_HEAD(&info->swaplist);
 		simple_xattrs_init(&info->xattrs);
 		cache_no_acl(inode);
@@ -3516,6 +3685,8 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
 		goto failed;
 	sbinfo->free_inodes = sbinfo->max_inodes;
+	spin_lock_init(&sbinfo->shrinklist_lock);
+	INIT_LIST_HEAD(&sbinfo->shrinklist);
 
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
 	sb->s_blocksize = PAGE_SIZE;
@@ -3678,6 +3849,10 @@ static const struct super_operations shmem_ops = {
 	.evict_inode	= shmem_evict_inode,
 	.drop_inode	= generic_delete_inode,
 	.put_super	= shmem_put_super,
+#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
+	.nr_cached_objects	= shmem_unused_huge_count,
+	.free_cached_objects	= shmem_unused_huge_scan,
+#endif
 };
 
 static const struct vm_operations_struct shmem_vm_ops = {
-- 
2.8.1

WARNING: multiple messages have this Message-ID (diff)
From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
To: Hugh Dickins <hughd@google.com>,
	Andrea Arcangeli <aarcange@redhat.com>,
	Andrew Morton <akpm@linux-foundation.org>
Cc: Dave Hansen <dave.hansen@intel.com>,
	Vlastimil Babka <vbabka@suse.cz>,
	Christoph Lameter <cl@gentwo.org>,
	Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>,
	Jerome Marchand <jmarchan@redhat.com>,
	Yang Shi <yang.shi@linaro.org>,
	Sasha Levin <sasha.levin@oracle.com>,
	Andres Lagar-Cavilla <andreslc@google.com>,
	Ning Qu <quning@gmail.com>,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	linux-fsdevel@vger.kernel.org,
	Ebru Akagunduz <ebru.akagunduz@gmail.com>,
	"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Subject: [PATCHv9-rebased2 36/37] shmem: split huge pages beyond i_size under memory pressure
Date: Wed, 15 Jun 2016 23:06:41 +0300	[thread overview]
Message-ID: <1466021202-61880-37-git-send-email-kirill.shutemov@linux.intel.com> (raw)
In-Reply-To: <1466021202-61880-1-git-send-email-kirill.shutemov@linux.intel.com>

Even if user asked to allocate huge pages always (huge=always), we
should be able to free up some memory by splitting pages which are
partly byound i_size if memory presure comes or once we hit limit on
filesystem size (-o size=).

In order to do this we maintain per-superblock list of inodes, which
potentially have huge pages on the border of file size.

Per-fs shrinker can reclaim memory by splitting such pages.

If we hit -ENOSPC during shmem_getpage_gfp(), we try to split a page to
free up space on the filesystem and retry allocation if it succeed.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 include/linux/shmem_fs.h |   6 +-
 mm/shmem.c               | 175 +++++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 180 insertions(+), 1 deletion(-)

diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 54fa28dfbd89..ff078e7043b6 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -16,8 +16,9 @@ struct shmem_inode_info {
 	unsigned long		flags;
 	unsigned long		alloced;	/* data pages alloced to file */
 	unsigned long		swapped;	/* subtotal assigned to swap */
-	struct shared_policy	policy;		/* NUMA memory alloc policy */
+	struct list_head        shrinklist;     /* shrinkable hpage inodes */
 	struct list_head	swaplist;	/* chain of maybes on swap */
+	struct shared_policy	policy;		/* NUMA memory alloc policy */
 	struct simple_xattrs	xattrs;		/* list of xattrs */
 	struct inode		vfs_inode;
 };
@@ -33,6 +34,9 @@ struct shmem_sb_info {
 	kuid_t uid;		    /* Mount uid for root directory */
 	kgid_t gid;		    /* Mount gid for root directory */
 	struct mempolicy *mpol;     /* default memory policy for mappings */
+	spinlock_t shrinklist_lock;   /* Protects shrinklist */
+	struct list_head shrinklist;  /* List of shinkable inodes */
+	unsigned long shrinklist_len; /* Length of shrinklist */
 };
 
 static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
diff --git a/mm/shmem.c b/mm/shmem.c
index 6beeb98b3592..bfaa007ccb58 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -188,6 +188,7 @@ static const struct inode_operations shmem_inode_operations;
 static const struct inode_operations shmem_dir_inode_operations;
 static const struct inode_operations shmem_special_inode_operations;
 static const struct vm_operations_struct shmem_vm_ops;
+static struct file_system_type shmem_fs_type;
 
 static LIST_HEAD(shmem_swaplist);
 static DEFINE_MUTEX(shmem_swaplist_mutex);
@@ -406,10 +407,122 @@ static const char *shmem_format_huge(int huge)
 	}
 }
 
+static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
+		struct shrink_control *sc, unsigned long nr_to_split)
+{
+	LIST_HEAD(list), *pos, *next;
+	struct inode *inode;
+	struct shmem_inode_info *info;
+	struct page *page;
+	unsigned long batch = sc ? sc->nr_to_scan : 128;
+	int removed = 0, split = 0;
+
+	if (list_empty(&sbinfo->shrinklist))
+		return SHRINK_STOP;
+
+	spin_lock(&sbinfo->shrinklist_lock);
+	list_for_each_safe(pos, next, &sbinfo->shrinklist) {
+		info = list_entry(pos, struct shmem_inode_info, shrinklist);
+
+		/* pin the inode */
+		inode = igrab(&info->vfs_inode);
+
+		/* inode is about to be evicted */
+		if (!inode) {
+			list_del_init(&info->shrinklist);
+			removed++;
+			goto next;
+		}
+
+		/* Check if there's anything to gain */
+		if (round_up(inode->i_size, PAGE_SIZE) ==
+				round_up(inode->i_size, HPAGE_PMD_SIZE)) {
+			list_del_init(&info->shrinklist);
+			removed++;
+			iput(inode);
+			goto next;
+		}
+
+		list_move(&info->shrinklist, &list);
+next:
+		if (!--batch)
+			break;
+	}
+	spin_unlock(&sbinfo->shrinklist_lock);
+
+	list_for_each_safe(pos, next, &list) {
+		int ret;
+
+		info = list_entry(pos, struct shmem_inode_info, shrinklist);
+		inode = &info->vfs_inode;
+
+		if (nr_to_split && split >= nr_to_split) {
+			iput(inode);
+			continue;
+		}
+
+		page = find_lock_page(inode->i_mapping,
+				(inode->i_size & HPAGE_PMD_MASK) >> PAGE_SHIFT);
+		if (!page)
+			goto drop;
+
+		if (!PageTransHuge(page)) {
+			unlock_page(page);
+			put_page(page);
+			goto drop;
+		}
+
+		ret = split_huge_page(page);
+		unlock_page(page);
+		put_page(page);
+
+		if (ret) {
+			/* split failed: leave it on the list */
+			iput(inode);
+			continue;
+		}
+
+		split++;
+drop:
+		list_del_init(&info->shrinklist);
+		removed++;
+		iput(inode);
+	}
+
+	spin_lock(&sbinfo->shrinklist_lock);
+	list_splice_tail(&list, &sbinfo->shrinklist);
+	sbinfo->shrinklist_len -= removed;
+	spin_unlock(&sbinfo->shrinklist_lock);
+
+	return split;
+}
+
+static long shmem_unused_huge_scan(struct super_block *sb,
+		struct shrink_control *sc)
+{
+	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
+
+	if (!READ_ONCE(sbinfo->shrinklist_len))
+		return SHRINK_STOP;
+
+	return shmem_unused_huge_shrink(sbinfo, sc, 0);
+}
+
+static long shmem_unused_huge_count(struct super_block *sb,
+		struct shrink_control *sc)
+{
+	struct shmem_sb_info *sbinfo = SHMEM_SB(sb);
+	return READ_ONCE(sbinfo->shrinklist_len);
+}
 #else /* !CONFIG_TRANSPARENT_HUGE_PAGECACHE */
 
 #define shmem_huge SHMEM_HUGE_DENY
 
+static unsigned long shmem_unused_huge_shrink(struct shmem_sb_info *sbinfo,
+		struct shrink_control *sc, unsigned long nr_to_split)
+{
+	return 0;
+}
 #endif /* CONFIG_TRANSPARENT_HUGE_PAGECACHE */
 
 /*
@@ -843,6 +956,7 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
 {
 	struct inode *inode = d_inode(dentry);
 	struct shmem_inode_info *info = SHMEM_I(inode);
+	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
 	int error;
 
 	error = inode_change_ok(inode, attr);
@@ -878,6 +992,20 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
 			if (oldsize > holebegin)
 				unmap_mapping_range(inode->i_mapping,
 							holebegin, 0, 1);
+
+			/*
+			 * Part of the huge page can be beyond i_size: subject
+			 * to shrink under memory pressure.
+			 */
+			if (IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
+				spin_lock(&sbinfo->shrinklist_lock);
+				if (list_empty(&info->shrinklist)) {
+					list_add_tail(&info->shrinklist,
+							&sbinfo->shrinklist);
+					sbinfo->shrinklist_len++;
+				}
+				spin_unlock(&sbinfo->shrinklist_lock);
+			}
 		}
 	}
 
@@ -890,11 +1018,20 @@ static int shmem_setattr(struct dentry *dentry, struct iattr *attr)
 static void shmem_evict_inode(struct inode *inode)
 {
 	struct shmem_inode_info *info = SHMEM_I(inode);
+	struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
 
 	if (inode->i_mapping->a_ops == &shmem_aops) {
 		shmem_unacct_size(info->flags, inode->i_size);
 		inode->i_size = 0;
 		shmem_truncate_range(inode, 0, (loff_t)-1);
+		if (!list_empty(&info->shrinklist)) {
+			spin_lock(&sbinfo->shrinklist_lock);
+			if (!list_empty(&info->shrinklist)) {
+				list_del_init(&info->shrinklist);
+				sbinfo->shrinklist_len--;
+			}
+			spin_unlock(&sbinfo->shrinklist_lock);
+		}
 		if (!list_empty(&info->swaplist)) {
 			mutex_lock(&shmem_swaplist_mutex);
 			list_del_init(&info->swaplist);
@@ -1563,8 +1700,23 @@ alloc_nohuge:		page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
 					index, false);
 		}
 		if (IS_ERR(page)) {
+			int retry = 5;
 			error = PTR_ERR(page);
 			page = NULL;
+			if (error != -ENOSPC)
+				goto failed;
+			/*
+			 * Try to reclaim some spece by splitting a huge page
+			 * beyond i_size on the filesystem.
+			 */
+			while (retry--) {
+				int ret;
+				ret = shmem_unused_huge_shrink(sbinfo, NULL, 1);
+				if (ret == SHRINK_STOP)
+					break;
+				if (ret)
+					goto alloc_nohuge;
+			}
 			goto failed;
 		}
 
@@ -1603,6 +1755,22 @@ alloc_nohuge:		page = shmem_alloc_and_acct_page(gfp, info, sbinfo,
 		spin_unlock_irq(&info->lock);
 		alloced = true;
 
+		if (PageTransHuge(page) &&
+				DIV_ROUND_UP(i_size_read(inode), PAGE_SIZE) <
+				hindex + HPAGE_PMD_NR - 1) {
+			/*
+			 * Part of the huge page is beyond i_size: subject
+			 * to shrink under memory pressure.
+			 */
+			spin_lock(&sbinfo->shrinklist_lock);
+			if (list_empty(&info->shrinklist)) {
+				list_add_tail(&info->shrinklist,
+						&sbinfo->shrinklist);
+				sbinfo->shrinklist_len++;
+			}
+			spin_unlock(&sbinfo->shrinklist_lock);
+		}
+
 		/*
 		 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
 		 */
@@ -1920,6 +2088,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
 		spin_lock_init(&info->lock);
 		info->seals = F_SEAL_SEAL;
 		info->flags = flags & VM_NORESERVE;
+		INIT_LIST_HEAD(&info->shrinklist);
 		INIT_LIST_HEAD(&info->swaplist);
 		simple_xattrs_init(&info->xattrs);
 		cache_no_acl(inode);
@@ -3516,6 +3685,8 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
 	if (percpu_counter_init(&sbinfo->used_blocks, 0, GFP_KERNEL))
 		goto failed;
 	sbinfo->free_inodes = sbinfo->max_inodes;
+	spin_lock_init(&sbinfo->shrinklist_lock);
+	INIT_LIST_HEAD(&sbinfo->shrinklist);
 
 	sb->s_maxbytes = MAX_LFS_FILESIZE;
 	sb->s_blocksize = PAGE_SIZE;
@@ -3678,6 +3849,10 @@ static const struct super_operations shmem_ops = {
 	.evict_inode	= shmem_evict_inode,
 	.drop_inode	= generic_delete_inode,
 	.put_super	= shmem_put_super,
+#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
+	.nr_cached_objects	= shmem_unused_huge_count,
+	.free_cached_objects	= shmem_unused_huge_scan,
+#endif
 };
 
 static const struct vm_operations_struct shmem_vm_ops = {
-- 
2.8.1

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2016-06-15 20:07 UTC|newest]

Thread overview: 216+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-06-06 14:06 [PATCHv9 00/32] THP-enabled tmpfs/shmem using compound pages Kirill A. Shutemov
2016-06-06 14:06 ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 01/32] thp, mlock: update unevictable-lru.txt Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 02/32] mm: do not pass mm_struct into handle_mm_fault Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 03/32] mm: introduce fault_env Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 04/32] mm: postpone page table allocation until we have page to map Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 05/32] rmap: support file thp Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 06/32] mm: introduce do_set_pmd() Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 07/32] thp, vmstats: add counters for huge file pages Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 08/32] thp: support file pages in zap_huge_pmd() Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 09/32] thp: handle file pages in split_huge_pmd() Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 10/32] thp: handle file COW faults Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 11/32] thp: skip file huge pmd on copy_huge_pmd() Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 12/32] thp: prepare change_huge_pmd() for file thp Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 13/32] thp: run vma_adjust_trans_huge() outside i_mmap_rwsem Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 14/32] thp: file pages support for split_huge_page() Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 15/32] thp, mlock: do not mlock PTE-mapped file huge pages Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 16/32] vmscan: split file huge pages before paging them out Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 17/32] page-flags: relax policy for PG_mappedtodisk and PG_reclaim Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 18/32] radix-tree: implement radix_tree_maybe_preload_order() Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 19/32] filemap: prepare find and delete operations for huge pages Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 20/32] truncate: handle file thp Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 21/32] mm, rmap: account shmem thp pages Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:06 ` [PATCHv9 22/32] shmem: prepare huge= mount option and sysfs knob Kirill A. Shutemov
2016-06-06 14:06   ` Kirill A. Shutemov
2016-06-06 14:07 ` [PATCHv9 23/32] shmem: get_unmapped_area align huge page Kirill A. Shutemov
2016-06-06 14:07   ` Kirill A. Shutemov
2016-06-06 14:07 ` [PATCHv9 24/32] shmem: add huge pages support Kirill A. Shutemov
2016-06-06 14:07   ` Kirill A. Shutemov
2016-06-06 14:07 ` [PATCHv9 25/32] shmem, thp: respect MADV_{NO,}HUGEPAGE for file mappings Kirill A. Shutemov
2016-06-06 14:07   ` Kirill A. Shutemov
2016-06-06 14:07 ` [PATCHv9 26/32] thp: extract khugepaged from mm/huge_memory.c Kirill A. Shutemov
2016-06-06 14:07   ` Kirill A. Shutemov
2016-06-06 14:07 ` [PATCHv9 27/32] khugepaged: move up_read(mmap_sem) out of khugepaged_alloc_page() Kirill A. Shutemov
2016-06-06 14:07   ` Kirill A. Shutemov
2016-06-06 14:07 ` [PATCHv9 28/32] shmem: make shmem_inode_info::lock irq-safe Kirill A. Shutemov
2016-06-06 14:07   ` Kirill A. Shutemov
2016-06-06 14:07 ` [PATCHv9 29/32] khugepaged: add support of collapse for tmpfs/shmem pages Kirill A. Shutemov
2016-06-06 14:07   ` Kirill A. Shutemov
2016-06-06 14:07 ` [PATCHv9 30/32] thp: introduce CONFIG_TRANSPARENT_HUGE_PAGECACHE Kirill A. Shutemov
2016-06-06 14:07   ` Kirill A. Shutemov
2016-06-06 14:07 ` [PATCHv9 31/32] shmem: split huge pages beyond i_size under memory pressure Kirill A. Shutemov
2016-06-06 14:07   ` Kirill A. Shutemov
2016-06-06 14:07 ` [PATCHv9 32/32] thp: update Documentation/{vm/transhuge,filesystems/proc}.txt Kirill A. Shutemov
2016-06-06 14:07   ` Kirill A. Shutemov
2016-06-07 11:00 ` [PATCHv9-rebased 00/32] THP-enabled tmpfs/shmem using compound pages Kirill A. Shutemov
2016-06-07 11:00   ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 01/32] thp, mlock: update unevictable-lru.txt Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 02/32] mm: do not pass mm_struct into handle_mm_fault Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 03/32] mm: introduce fault_env Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 04/32] mm: postpone page table allocation until we have page to map Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 05/32] rmap: support file thp Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 06/32] mm: introduce do_set_pmd() Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 07/32] thp, vmstats: add counters for huge file pages Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 08/32] thp: support file pages in zap_huge_pmd() Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 09/32] thp: handle file pages in split_huge_pmd() Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 10/32] thp: handle file COW faults Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 11/32] thp: skip file huge pmd on copy_huge_pmd() Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 12/32] thp: prepare change_huge_pmd() for file thp Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 13/32] thp: run vma_adjust_trans_huge() outside i_mmap_rwsem Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 14/32] thp: file pages support for split_huge_page() Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 15/32] thp, mlock: do not mlock PTE-mapped file huge pages Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 16/32] vmscan: split file huge pages before paging them out Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 17/32] page-flags: relax policy for PG_mappedtodisk and PG_reclaim Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 18/32] radix-tree: implement radix_tree_maybe_preload_order() Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 19/32] filemap: prepare find and delete operations for huge pages Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 20/32] truncate: handle file thp Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 21/32] mm, rmap: account shmem thp pages Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 22/32] shmem: prepare huge= mount option and sysfs knob Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 23/32] shmem: get_unmapped_area align huge page Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 24/32] shmem: add huge pages support Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 25/32] shmem, thp: respect MADV_{NO,}HUGEPAGE for file mappings Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 26/32] thp: extract khugepaged from mm/huge_memory.c Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 27/32] khugepaged: move up_read(mmap_sem) out of khugepaged_alloc_page() Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 28/32] shmem: make shmem_inode_info::lock irq-safe Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 29/32] khugepaged: add support of collapse for tmpfs/shmem pages Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 30/32] thp: introduce CONFIG_TRANSPARENT_HUGE_PAGECACHE Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 31/32] shmem: split huge pages beyond i_size under memory pressure Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-07 11:00   ` [PATCHv9-rebased 32/32] thp: update Documentation/{vm/transhuge,filesystems/proc}.txt Kirill A. Shutemov
2016-06-07 11:00     ` Kirill A. Shutemov
2016-06-15 20:06 ` [PATCHv9-rebased2 00/37] THP-enabled tmpfs/shmem using compound pages Kirill A. Shutemov
2016-06-15 20:06   ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 01/37] mm, thp: make swapin readahead under down_read of mmap_sem Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 02/37] mm-thp-make-swapin-readahead-under-down_read-of-mmap_sem-fix Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 03/37] mm, thp: fix locking inconsistency in collapse_huge_page Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-16  0:43     ` Sergey Senozhatsky
2016-06-16  0:43       ` Sergey Senozhatsky
2016-06-16  8:02       ` Kirill A. Shutemov
2016-06-16  8:02         ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 04/37] mm-thp-make-swapin-readahead-under-down_read-of-mmap_sem-fix-2-fix Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 05/37] khugepaged: recheck pmd after mmap_sem re-acquired Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-16 11:47     ` Ebru Akagunduz
2016-06-16 11:47       ` Ebru Akagunduz
2016-06-16 11:51       ` Kirill A. Shutemov
2016-06-16 11:51         ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 06/37] thp, mlock: update unevictable-lru.txt Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 07/37] mm: do not pass mm_struct into handle_mm_fault Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 08/37] mm: introduce fault_env Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 09/37] mm: postpone page table allocation until we have page to map Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 10/37] rmap: support file thp Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 11/37] mm: introduce do_set_pmd() Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 12/37] thp, vmstats: add counters for huge file pages Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 13/37] thp: support file pages in zap_huge_pmd() Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 14/37] thp: handle file pages in split_huge_pmd() Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 15/37] thp: handle file COW faults Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 16/37] thp: skip file huge pmd on copy_huge_pmd() Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 17/37] thp: prepare change_huge_pmd() for file thp Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 18/37] thp: run vma_adjust_trans_huge() outside i_mmap_rwsem Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 19/37] thp: file pages support for split_huge_page() Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 20/37] thp, mlock: do not mlock PTE-mapped file huge pages Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 21/37] vmscan: split file huge pages before paging them out Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 22/37] page-flags: relax policy for PG_mappedtodisk and PG_reclaim Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 23/37] radix-tree: implement radix_tree_maybe_preload_order() Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 24/37] filemap: prepare find and delete operations for huge pages Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 25/37] truncate: handle file thp Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 26/37] mm, rmap: account shmem thp pages Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 27/37] shmem: prepare huge= mount option and sysfs knob Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 28/37] shmem: get_unmapped_area align huge page Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 29/37] shmem: add huge pages support Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 30/37] shmem, thp: respect MADV_{NO,}HUGEPAGE for file mappings Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 31/37] thp: extract khugepaged from mm/huge_memory.c Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 32/37] khugepaged: move up_read(mmap_sem) out of khugepaged_alloc_page() Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 33/37] shmem: make shmem_inode_info::lock irq-safe Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 34/37] khugepaged: add support of collapse for tmpfs/shmem pages Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 35/37] thp: introduce CONFIG_TRANSPARENT_HUGE_PAGECACHE Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov
2016-06-15 20:06   ` Kirill A. Shutemov [this message]
2016-06-15 20:06     ` [PATCHv9-rebased2 36/37] shmem: split huge pages beyond i_size under memory pressure Kirill A. Shutemov
2016-06-15 20:06   ` [PATCHv9-rebased2 37/37] thp: update Documentation/{vm/transhuge,filesystems/proc}.txt Kirill A. Shutemov
2016-06-15 20:06     ` Kirill A. Shutemov

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1466021202-61880-37-git-send-email-kirill.shutemov@linux.intel.com \
    --to=kirill.shutemov@linux.intel.com \
    --cc=aarcange@redhat.com \
    --cc=akpm@linux-foundation.org \
    --cc=andreslc@google.com \
    --cc=cl@gentwo.org \
    --cc=dave.hansen@intel.com \
    --cc=ebru.akagunduz@gmail.com \
    --cc=hughd@google.com \
    --cc=jmarchan@redhat.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=n-horiguchi@ah.jp.nec.com \
    --cc=quning@gmail.com \
    --cc=sasha.levin@oracle.com \
    --cc=vbabka@suse.cz \
    --cc=yang.shi@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.