All of lore.kernel.org
 help / color / mirror / Atom feed
From: Matthew Wilcox <matthew.r.wilcox@intel.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: linux-nvdimm@lists.01.org, x86@kernel.org,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org,
	linux-fsdevel@vger.kernel.org
Subject: [PATCH v5 06/14] procfs: Add support for PUDs to smaps, clear_refs and pagemap
Date: Thu, 10 Mar 2016 18:55:23 -0500	[thread overview]
Message-ID: <1457654131-4562-7-git-send-email-matthew.r.wilcox@intel.com> (raw)
In-Reply-To: <1457654131-4562-1-git-send-email-matthew.r.wilcox@intel.com>

From: Matthew Wilcox <willy@linux.intel.com>

Because there's no 'struct page' for DAX THPs, a lot of this code is
simpler than the PMD code it mimics.  Extra code would need to be added
to support PUDs of anonymous or page-cache THPs.

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
---
 fs/proc/task_mmu.c | 109 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 109 insertions(+)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9df4316..197f37d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -586,6 +586,33 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 }
 #endif
 
+static int smaps_pud_range(pud_t *pud, unsigned long addr, unsigned long end,
+		struct mm_walk *walk)
+{
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+	struct vm_area_struct *vma = walk->vma;
+	struct mem_size_stats *mss = walk->private;
+
+	if (is_huge_zero_pud(*pud))
+		return 0;
+
+	mss->resident += HPAGE_PUD_SIZE;
+	if (vma->vm_flags & VM_SHARED) {
+		if (pud_dirty(*pud))
+			mss->shared_dirty += HPAGE_PUD_SIZE;
+		else
+			mss->shared_clean += HPAGE_PUD_SIZE;
+	} else {
+		if (pud_dirty(*pud))
+			mss->private_dirty += HPAGE_PUD_SIZE;
+		else
+			mss->private_clean += HPAGE_PUD_SIZE;
+	}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
+	return 0;
+}
+
 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 			   struct mm_walk *walk)
 {
@@ -720,6 +747,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
 	struct vm_area_struct *vma = v;
 	struct mem_size_stats mss;
 	struct mm_walk smaps_walk = {
+		.pud_entry = smaps_pud_range,
 		.pmd_entry = smaps_pte_range,
 #ifdef CONFIG_HUGETLB_PAGE
 		.hugetlb_entry = smaps_hugetlb_range,
@@ -903,13 +931,50 @@ static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 
 	set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
 }
+static inline void clear_soft_dirty_pud(struct vm_area_struct *vma,
+		unsigned long addr, pud_t *pudp)
+{
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+	pud_t pud = pudp_huge_get_and_clear(vma->vm_mm, addr, pudp);
+
+	pud = pud_wrprotect(pud);
+	pud = pud_clear_soft_dirty(pud);
+
+	if (vma->vm_flags & VM_SOFTDIRTY)
+		vma->vm_flags &= ~VM_SOFTDIRTY;
+
+	set_pud_at(vma->vm_mm, addr, pudp, pud);
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+}
 #else
 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 		unsigned long addr, pmd_t *pmdp)
 {
 }
+static inline void clear_soft_dirty_pud(struct vm_area_struct *vma,
+		unsigned long addr, pud_t *pudp)
+{
+}
 #endif
 
+static int clear_refs_pud_range(pud_t *pud, unsigned long addr,
+				unsigned long end, struct mm_walk *walk)
+{
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+	struct clear_refs_private *cp = walk->private;
+	struct vm_area_struct *vma = walk->vma;
+
+	if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
+		clear_soft_dirty_pud(vma, addr, pud);
+	} else {
+		/* Clear accessed and referenced bits. */
+		pudp_test_and_clear_young(vma, addr, pud);
+	}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
+	return 0;
+}
+
 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
 				unsigned long end, struct mm_walk *walk)
 {
@@ -1020,6 +1085,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 			.type = type,
 		};
 		struct mm_walk clear_refs_walk = {
+			.pud_entry = clear_refs_pud_range,
 			.pmd_entry = clear_refs_pte_range,
 			.test_walk = clear_refs_test_walk,
 			.mm = mm,
@@ -1184,6 +1250,48 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
 	return make_pme(frame, flags);
 }
 
+static int pagemap_pud_range(pud_t *pudp, unsigned long addr, unsigned long end,
+			     struct mm_walk *walk)
+{
+	int err = 0;
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+	struct vm_area_struct *vma = walk->vma;
+	struct pagemapread *pm = walk->private;
+	u64 flags = 0, frame = 0;
+	pud_t pud = *pudp;
+
+	if ((vma->vm_flags & VM_SOFTDIRTY) || pud_soft_dirty(pud))
+		flags |= PM_SOFT_DIRTY;
+
+	/*
+	 * Currently pud for thp is always present because thp
+	 * can not be swapped-out, migrated, or HWPOISONed
+	 * (split in such cases instead.)
+	 * This if-check is just to prepare for future implementation.
+	 */
+	if (pud_present(pud)) {
+		flags |= PM_PRESENT;
+		if (!(vma->vm_flags & VM_SHARED))
+			flags |= PM_MMAP_EXCLUSIVE;
+
+		if (pm->show_pfn)
+			frame = pud_pfn(pud) +
+					((addr & ~PUD_MASK) >> PAGE_SHIFT);
+
+		for (; addr != end; addr += PAGE_SIZE) {
+			pagemap_entry_t pme = make_pme(frame, flags);
+
+			err = add_to_pagemap(addr, &pme, pm);
+			if (err)
+				break;
+			if (pm->show_pfn && (flags & PM_PRESENT))
+				frame++;
+		}
+	}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+	return err;
+}
+
 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
 			     struct mm_walk *walk)
 {
@@ -1363,6 +1471,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
 	if (!pm.buffer)
 		goto out_mm;
 
+	pagemap_walk.pud_entry = pagemap_pud_range;
 	pagemap_walk.pmd_entry = pagemap_pmd_range;
 	pagemap_walk.pte_hole = pagemap_pte_hole;
 #ifdef CONFIG_HUGETLB_PAGE
-- 
2.7.0

_______________________________________________
Linux-nvdimm mailing list
Linux-nvdimm@lists.01.org
https://lists.01.org/mailman/listinfo/linux-nvdimm

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <matthew.r.wilcox@intel.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Matthew Wilcox <willy@linux.intel.com>,
	linux-mm@kvack.org, linux-nvdimm@ml01.01.org,
	linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
	x86@kernel.org
Subject: [PATCH v5 06/14] procfs: Add support for PUDs to smaps, clear_refs and pagemap
Date: Thu, 10 Mar 2016 18:55:23 -0500	[thread overview]
Message-ID: <1457654131-4562-7-git-send-email-matthew.r.wilcox@intel.com> (raw)
In-Reply-To: <1457654131-4562-1-git-send-email-matthew.r.wilcox@intel.com>

From: Matthew Wilcox <willy@linux.intel.com>

Because there's no 'struct page' for DAX THPs, a lot of this code is
simpler than the PMD code it mimics.  Extra code would need to be added
to support PUDs of anonymous or page-cache THPs.

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
---
 fs/proc/task_mmu.c | 109 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 109 insertions(+)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9df4316..197f37d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -586,6 +586,33 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 }
 #endif
 
+static int smaps_pud_range(pud_t *pud, unsigned long addr, unsigned long end,
+		struct mm_walk *walk)
+{
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+	struct vm_area_struct *vma = walk->vma;
+	struct mem_size_stats *mss = walk->private;
+
+	if (is_huge_zero_pud(*pud))
+		return 0;
+
+	mss->resident += HPAGE_PUD_SIZE;
+	if (vma->vm_flags & VM_SHARED) {
+		if (pud_dirty(*pud))
+			mss->shared_dirty += HPAGE_PUD_SIZE;
+		else
+			mss->shared_clean += HPAGE_PUD_SIZE;
+	} else {
+		if (pud_dirty(*pud))
+			mss->private_dirty += HPAGE_PUD_SIZE;
+		else
+			mss->private_clean += HPAGE_PUD_SIZE;
+	}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
+	return 0;
+}
+
 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 			   struct mm_walk *walk)
 {
@@ -720,6 +747,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
 	struct vm_area_struct *vma = v;
 	struct mem_size_stats mss;
 	struct mm_walk smaps_walk = {
+		.pud_entry = smaps_pud_range,
 		.pmd_entry = smaps_pte_range,
 #ifdef CONFIG_HUGETLB_PAGE
 		.hugetlb_entry = smaps_hugetlb_range,
@@ -903,13 +931,50 @@ static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 
 	set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
 }
+static inline void clear_soft_dirty_pud(struct vm_area_struct *vma,
+		unsigned long addr, pud_t *pudp)
+{
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+	pud_t pud = pudp_huge_get_and_clear(vma->vm_mm, addr, pudp);
+
+	pud = pud_wrprotect(pud);
+	pud = pud_clear_soft_dirty(pud);
+
+	if (vma->vm_flags & VM_SOFTDIRTY)
+		vma->vm_flags &= ~VM_SOFTDIRTY;
+
+	set_pud_at(vma->vm_mm, addr, pudp, pud);
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+}
 #else
 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 		unsigned long addr, pmd_t *pmdp)
 {
 }
+static inline void clear_soft_dirty_pud(struct vm_area_struct *vma,
+		unsigned long addr, pud_t *pudp)
+{
+}
 #endif
 
+static int clear_refs_pud_range(pud_t *pud, unsigned long addr,
+				unsigned long end, struct mm_walk *walk)
+{
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+	struct clear_refs_private *cp = walk->private;
+	struct vm_area_struct *vma = walk->vma;
+
+	if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
+		clear_soft_dirty_pud(vma, addr, pud);
+	} else {
+		/* Clear accessed and referenced bits. */
+		pudp_test_and_clear_young(vma, addr, pud);
+	}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
+	return 0;
+}
+
 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
 				unsigned long end, struct mm_walk *walk)
 {
@@ -1020,6 +1085,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 			.type = type,
 		};
 		struct mm_walk clear_refs_walk = {
+			.pud_entry = clear_refs_pud_range,
 			.pmd_entry = clear_refs_pte_range,
 			.test_walk = clear_refs_test_walk,
 			.mm = mm,
@@ -1184,6 +1250,48 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
 	return make_pme(frame, flags);
 }
 
+static int pagemap_pud_range(pud_t *pudp, unsigned long addr, unsigned long end,
+			     struct mm_walk *walk)
+{
+	int err = 0;
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+	struct vm_area_struct *vma = walk->vma;
+	struct pagemapread *pm = walk->private;
+	u64 flags = 0, frame = 0;
+	pud_t pud = *pudp;
+
+	if ((vma->vm_flags & VM_SOFTDIRTY) || pud_soft_dirty(pud))
+		flags |= PM_SOFT_DIRTY;
+
+	/*
+	 * Currently pud for thp is always present because thp
+	 * can not be swapped-out, migrated, or HWPOISONed
+	 * (split in such cases instead.)
+	 * This if-check is just to prepare for future implementation.
+	 */
+	if (pud_present(pud)) {
+		flags |= PM_PRESENT;
+		if (!(vma->vm_flags & VM_SHARED))
+			flags |= PM_MMAP_EXCLUSIVE;
+
+		if (pm->show_pfn)
+			frame = pud_pfn(pud) +
+					((addr & ~PUD_MASK) >> PAGE_SHIFT);
+
+		for (; addr != end; addr += PAGE_SIZE) {
+			pagemap_entry_t pme = make_pme(frame, flags);
+
+			err = add_to_pagemap(addr, &pme, pm);
+			if (err)
+				break;
+			if (pm->show_pfn && (flags & PM_PRESENT))
+				frame++;
+		}
+	}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+	return err;
+}
+
 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
 			     struct mm_walk *walk)
 {
@@ -1363,6 +1471,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
 	if (!pm.buffer)
 		goto out_mm;
 
+	pagemap_walk.pud_entry = pagemap_pud_range;
 	pagemap_walk.pmd_entry = pagemap_pmd_range;
 	pagemap_walk.pte_hole = pagemap_pte_hole;
 #ifdef CONFIG_HUGETLB_PAGE
-- 
2.7.0

WARNING: multiple messages have this Message-ID (diff)
From: Matthew Wilcox <matthew.r.wilcox@intel.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Matthew Wilcox <willy@linux.intel.com>,
	linux-mm@kvack.org, linux-nvdimm@lists.01.org,
	linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
	x86@kernel.org
Subject: [PATCH v5 06/14] procfs: Add support for PUDs to smaps, clear_refs and pagemap
Date: Thu, 10 Mar 2016 18:55:23 -0500	[thread overview]
Message-ID: <1457654131-4562-7-git-send-email-matthew.r.wilcox@intel.com> (raw)
In-Reply-To: <1457654131-4562-1-git-send-email-matthew.r.wilcox@intel.com>

From: Matthew Wilcox <willy@linux.intel.com>

Because there's no 'struct page' for DAX THPs, a lot of this code is
simpler than the PMD code it mimics.  Extra code would need to be added
to support PUDs of anonymous or page-cache THPs.

Signed-off-by: Matthew Wilcox <willy@linux.intel.com>
---
 fs/proc/task_mmu.c | 109 +++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 109 insertions(+)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 9df4316..197f37d 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -586,6 +586,33 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
 }
 #endif
 
+static int smaps_pud_range(pud_t *pud, unsigned long addr, unsigned long end,
+		struct mm_walk *walk)
+{
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+	struct vm_area_struct *vma = walk->vma;
+	struct mem_size_stats *mss = walk->private;
+
+	if (is_huge_zero_pud(*pud))
+		return 0;
+
+	mss->resident += HPAGE_PUD_SIZE;
+	if (vma->vm_flags & VM_SHARED) {
+		if (pud_dirty(*pud))
+			mss->shared_dirty += HPAGE_PUD_SIZE;
+		else
+			mss->shared_clean += HPAGE_PUD_SIZE;
+	} else {
+		if (pud_dirty(*pud))
+			mss->private_dirty += HPAGE_PUD_SIZE;
+		else
+			mss->private_clean += HPAGE_PUD_SIZE;
+	}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
+	return 0;
+}
+
 static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 			   struct mm_walk *walk)
 {
@@ -720,6 +747,7 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
 	struct vm_area_struct *vma = v;
 	struct mem_size_stats mss;
 	struct mm_walk smaps_walk = {
+		.pud_entry = smaps_pud_range,
 		.pmd_entry = smaps_pte_range,
 #ifdef CONFIG_HUGETLB_PAGE
 		.hugetlb_entry = smaps_hugetlb_range,
@@ -903,13 +931,50 @@ static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 
 	set_pmd_at(vma->vm_mm, addr, pmdp, pmd);
 }
+static inline void clear_soft_dirty_pud(struct vm_area_struct *vma,
+		unsigned long addr, pud_t *pudp)
+{
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+	pud_t pud = pudp_huge_get_and_clear(vma->vm_mm, addr, pudp);
+
+	pud = pud_wrprotect(pud);
+	pud = pud_clear_soft_dirty(pud);
+
+	if (vma->vm_flags & VM_SOFTDIRTY)
+		vma->vm_flags &= ~VM_SOFTDIRTY;
+
+	set_pud_at(vma->vm_mm, addr, pudp, pud);
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+}
 #else
 static inline void clear_soft_dirty_pmd(struct vm_area_struct *vma,
 		unsigned long addr, pmd_t *pmdp)
 {
 }
+static inline void clear_soft_dirty_pud(struct vm_area_struct *vma,
+		unsigned long addr, pud_t *pudp)
+{
+}
 #endif
 
+static int clear_refs_pud_range(pud_t *pud, unsigned long addr,
+				unsigned long end, struct mm_walk *walk)
+{
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+	struct clear_refs_private *cp = walk->private;
+	struct vm_area_struct *vma = walk->vma;
+
+	if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
+		clear_soft_dirty_pud(vma, addr, pud);
+	} else {
+		/* Clear accessed and referenced bits. */
+		pudp_test_and_clear_young(vma, addr, pud);
+	}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+
+	return 0;
+}
+
 static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
 				unsigned long end, struct mm_walk *walk)
 {
@@ -1020,6 +1085,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
 			.type = type,
 		};
 		struct mm_walk clear_refs_walk = {
+			.pud_entry = clear_refs_pud_range,
 			.pmd_entry = clear_refs_pte_range,
 			.test_walk = clear_refs_test_walk,
 			.mm = mm,
@@ -1184,6 +1250,48 @@ static pagemap_entry_t pte_to_pagemap_entry(struct pagemapread *pm,
 	return make_pme(frame, flags);
 }
 
+static int pagemap_pud_range(pud_t *pudp, unsigned long addr, unsigned long end,
+			     struct mm_walk *walk)
+{
+	int err = 0;
+#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
+	struct vm_area_struct *vma = walk->vma;
+	struct pagemapread *pm = walk->private;
+	u64 flags = 0, frame = 0;
+	pud_t pud = *pudp;
+
+	if ((vma->vm_flags & VM_SOFTDIRTY) || pud_soft_dirty(pud))
+		flags |= PM_SOFT_DIRTY;
+
+	/*
+	 * Currently pud for thp is always present because thp
+	 * can not be swapped-out, migrated, or HWPOISONed
+	 * (split in such cases instead.)
+	 * This if-check is just to prepare for future implementation.
+	 */
+	if (pud_present(pud)) {
+		flags |= PM_PRESENT;
+		if (!(vma->vm_flags & VM_SHARED))
+			flags |= PM_MMAP_EXCLUSIVE;
+
+		if (pm->show_pfn)
+			frame = pud_pfn(pud) +
+					((addr & ~PUD_MASK) >> PAGE_SHIFT);
+
+		for (; addr != end; addr += PAGE_SIZE) {
+			pagemap_entry_t pme = make_pme(frame, flags);
+
+			err = add_to_pagemap(addr, &pme, pm);
+			if (err)
+				break;
+			if (pm->show_pfn && (flags & PM_PRESENT))
+				frame++;
+		}
+	}
+#endif /* CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD */
+	return err;
+}
+
 static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
 			     struct mm_walk *walk)
 {
@@ -1363,6 +1471,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
 	if (!pm.buffer)
 		goto out_mm;
 
+	pagemap_walk.pud_entry = pagemap_pud_range;
 	pagemap_walk.pmd_entry = pagemap_pmd_range;
 	pagemap_walk.pte_hole = pagemap_pte_hole;
 #ifdef CONFIG_HUGETLB_PAGE
-- 
2.7.0

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  parent reply	other threads:[~2016-03-10 23:55 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-03-10 23:55 [PATCH v5 00/14] Support for transparent PUD pages for DAX files Matthew Wilcox
2016-03-10 23:55 ` Matthew Wilcox
2016-03-10 23:55 ` Matthew Wilcox
2016-03-10 23:55 ` [PATCH v5 01/14] mmdebug: Always evaluate the arguments to VM_BUG_ON_* Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55 ` [PATCH v5 02/14] mm: Convert an open-coded VM_BUG_ON_VMA Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55 ` [PATCH v5 03/14] mm,fs,dax: Change ->pmd_fault to ->huge_fault Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55 ` [PATCH v5 04/14] mm: Add support for PUD-sized transparent hugepages Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55 ` [PATCH v5 05/14] mincore: Add support for PUDs Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55 ` Matthew Wilcox [this message]
2016-03-10 23:55   ` [PATCH v5 06/14] procfs: Add support for PUDs to smaps, clear_refs and pagemap Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55 ` [PATCH v5 07/14] x86: Unify native_*_get_and_clear !SMP case Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55 ` [PATCH v5 08/14] x86: Fix whitespace issues Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55 ` [PATCH v5 09/14] x86: Add support for PUD-sized transparent hugepages Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55 ` [PATCH v5 10/14] dax: Support for transparent PUD pages Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55 ` [PATCH v5 11/14] ext4: Support for PUD-sized transparent huge pages Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55 ` [PATCH v5 12/14] dax: Use vmf->gfp_mask Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55 ` [PATCH v5 13/14] dax: Remove unnecessary rechecking of i_size Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55 ` [PATCH v5 14/14] dax: Use vmf->pgoff in fault handlers Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox
2016-03-10 23:55   ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1457654131-4562-7-git-send-email-matthew.r.wilcox@intel.com \
    --to=matthew.r.wilcox@intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nvdimm@lists.01.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.