All of lore.kernel.org
 help / color / mirror / Atom feed
From: Christoph Hellwig <hch@lst.de>
To: "Dan Williams" <dan.j.williams@intel.com>,
	"Jérôme Glisse" <jglisse@redhat.com>,
	"Jason Gunthorpe" <jgg@mellanox.com>,
	"Ben Skeggs" <bskeggs@redhat.com>
Cc: linux-nvdimm@lists.01.org, linux-pci@vger.kernel.org,
	linux-kernel@vger.kernel.org, dri-devel@lists.freedesktop.org,
	linux-mm@kvack.org, nouveau@lists.freedesktop.org
Subject: [PATCH 10/22] memremap: add a migrate callback to struct dev_pagemap_ops
Date: Thu, 13 Jun 2019 11:43:13 +0200	[thread overview]
Message-ID: <20190613094326.24093-11-hch@lst.de> (raw)
In-Reply-To: <20190613094326.24093-1-hch@lst.de>

This replaces the hacky ->fault callback, which is currently directly
called from common code through a hmm specific data structure as an
exercise in layering violations.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/hmm.h      |  6 ------
 include/linux/memremap.h |  6 ++++++
 include/linux/swapops.h  | 15 ---------------
 kernel/memremap.c        | 31 -------------------------------
 mm/hmm.c                 | 13 +++++--------
 mm/memory.c              |  9 ++-------
 6 files changed, 13 insertions(+), 67 deletions(-)

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 5761a39221a6..3c9a59dbfdb8 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -658,11 +658,6 @@ struct hmm_devmem_ops {
  * chunk, as an optimization. It must, however, prioritize the faulting address
  * over all the others.
  */
-typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma,
-				unsigned long addr,
-				const struct page *page,
-				unsigned int flags,
-				pmd_t *pmdp);
 
 struct hmm_devmem {
 	struct completion		completion;
@@ -673,7 +668,6 @@ struct hmm_devmem {
 	struct dev_pagemap		pagemap;
 	const struct hmm_devmem_ops	*ops;
 	struct percpu_ref		ref;
-	dev_page_fault_t		page_fault;
 };
 
 /*
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 96a3a6d564ad..03a4099be701 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -75,6 +75,12 @@ struct dev_pagemap_ops {
 	 * Transition the percpu_ref in struct dev_pagemap to the dead state.
 	 */
 	void (*kill)(struct dev_pagemap *pgmap);
+
+	/*
+	 * Used for private (un-addressable) device memory only.  Must migrate
+	 * the page back to a CPU accessible page.
+	 */
+	vm_fault_t (*migrate)(struct vm_fault *vmf);
 };
 
 /**
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 4d961668e5fc..15bdb6fe71e5 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -129,12 +129,6 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 {
 	return pfn_to_page(swp_offset(entry));
 }
-
-vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
-		       unsigned long addr,
-		       swp_entry_t entry,
-		       unsigned int flags,
-		       pmd_t *pmdp);
 #else /* CONFIG_DEVICE_PRIVATE */
 static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
 {
@@ -164,15 +158,6 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 {
 	return NULL;
 }
-
-static inline vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
-				     unsigned long addr,
-				     swp_entry_t entry,
-				     unsigned int flags,
-				     pmd_t *pmdp)
-{
-	return VM_FAULT_SIGBUS;
-}
 #endif /* CONFIG_DEVICE_PRIVATE */
 
 #ifdef CONFIG_MIGRATION
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 6a3183cac764..7167e717647d 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -11,7 +11,6 @@
 #include <linux/types.h>
 #include <linux/wait_bit.h>
 #include <linux/xarray.h>
-#include <linux/hmm.h>
 
 static DEFINE_XARRAY(pgmap_array);
 #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
@@ -48,36 +47,6 @@ static inline int dev_pagemap_enable(struct device *dev)
 }
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
 
-#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
-vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
-		       unsigned long addr,
-		       swp_entry_t entry,
-		       unsigned int flags,
-		       pmd_t *pmdp)
-{
-	struct page *page = device_private_entry_to_page(entry);
-	struct hmm_devmem *devmem;
-
-	devmem = container_of(page->pgmap, typeof(*devmem), pagemap);
-
-	/*
-	 * The page_fault() callback must migrate page back to system memory
-	 * so that CPU can access it. This might fail for various reasons
-	 * (device issue, device was unsafely unplugged, ...). When such
-	 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
-	 *
-	 * Note that because memory cgroup charges are accounted to the device
-	 * memory, this should never fail because of memory restrictions (but
-	 * allocation of regular system page might still fail because we are
-	 * out of memory).
-	 *
-	 * There is a more in-depth description of what that callback can and
-	 * cannot do, in include/linux/memremap.h
-	 */
-	return devmem->page_fault(vma, addr, page, flags, pmdp);
-}
-#endif /* CONFIG_DEVICE_PRIVATE */
-
 static void pgmap_array_delete(struct resource *res)
 {
 	xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
diff --git a/mm/hmm.c b/mm/hmm.c
index 6dc769feb2e1..aab799677c7d 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -1330,15 +1330,12 @@ static void hmm_devmem_ref_kill(struct dev_pagemap *pgmap)
 	percpu_ref_kill(pgmap->ref);
 }
 
-static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
-			    unsigned long addr,
-			    const struct page *page,
-			    unsigned int flags,
-			    pmd_t *pmdp)
+static vm_fault_t hmm_devmem_migrate(struct vm_fault *vmf)
 {
-	struct hmm_devmem *devmem = page->pgmap->data;
+	struct hmm_devmem *devmem = vmf->page->pgmap->data;
 
-	return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
+	return devmem->ops->fault(devmem, vmf->vma, vmf->address, vmf->page,
+			vmf->flags, vmf->pmd);
 }
 
 static void hmm_devmem_free(struct page *page, void *data)
@@ -1351,6 +1348,7 @@ static void hmm_devmem_free(struct page *page, void *data)
 static const struct dev_pagemap_ops hmm_pagemap_ops = {
 	.page_free		= hmm_devmem_free,
 	.kill			= hmm_devmem_ref_kill,
+	.migrate		= hmm_devmem_migrate,
 };
 
 /*
@@ -1405,7 +1403,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
 	devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
 	devmem->pfn_last = devmem->pfn_first +
 			   (resource_size(devmem->resource) >> PAGE_SHIFT);
-	devmem->page_fault = hmm_devmem_fault;
 
 	devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
 	devmem->pagemap.res = *devmem->resource;
diff --git a/mm/memory.c b/mm/memory.c
index ddf20bd0c317..cbf3cb598436 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2782,13 +2782,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
 			migration_entry_wait(vma->vm_mm, vmf->pmd,
 					     vmf->address);
 		} else if (is_device_private_entry(entry)) {
-			/*
-			 * For un-addressable device memory we call the pgmap
-			 * fault handler callback. The callback must migrate
-			 * the page back to some CPU accessible page.
-			 */
-			ret = device_private_entry_fault(vma, vmf->address, entry,
-						 vmf->flags, vmf->pmd);
+			vmf->page = device_private_entry_to_page(entry);
+			ret = page->pgmap->ops->migrate(vmf);
 		} else if (is_hwpoison_entry(entry)) {
 			ret = VM_FAULT_HWPOISON;
 		} else {
-- 
2.20.1

_______________________________________________
Linux-nvdimm mailing list
Linux-nvdimm@lists.01.org
https://lists.01.org/mailman/listinfo/linux-nvdimm

WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch@lst.de>
To: "Dan Williams" <dan.j.williams@intel.com>,
	"Jérôme Glisse" <jglisse@redhat.com>,
	"Jason Gunthorpe" <jgg@mellanox.com>,
	"Ben Skeggs" <bskeggs@redhat.com>
Cc: linux-mm@kvack.org, nouveau@lists.freedesktop.org,
	dri-devel@lists.freedesktop.org, linux-nvdimm@lists.01.org,
	linux-pci@vger.kernel.org, linux-kernel@vger.kernel.org
Subject: [PATCH 10/22] memremap: add a migrate callback to struct dev_pagemap_ops
Date: Thu, 13 Jun 2019 11:43:13 +0200	[thread overview]
Message-ID: <20190613094326.24093-11-hch@lst.de> (raw)
In-Reply-To: <20190613094326.24093-1-hch@lst.de>

This replaces the hacky ->fault callback, which is currently directly
called from common code through a hmm specific data structure as an
exercise in layering violations.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/hmm.h      |  6 ------
 include/linux/memremap.h |  6 ++++++
 include/linux/swapops.h  | 15 ---------------
 kernel/memremap.c        | 31 -------------------------------
 mm/hmm.c                 | 13 +++++--------
 mm/memory.c              |  9 ++-------
 6 files changed, 13 insertions(+), 67 deletions(-)

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 5761a39221a6..3c9a59dbfdb8 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -658,11 +658,6 @@ struct hmm_devmem_ops {
  * chunk, as an optimization. It must, however, prioritize the faulting address
  * over all the others.
  */
-typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma,
-				unsigned long addr,
-				const struct page *page,
-				unsigned int flags,
-				pmd_t *pmdp);
 
 struct hmm_devmem {
 	struct completion		completion;
@@ -673,7 +668,6 @@ struct hmm_devmem {
 	struct dev_pagemap		pagemap;
 	const struct hmm_devmem_ops	*ops;
 	struct percpu_ref		ref;
-	dev_page_fault_t		page_fault;
 };
 
 /*
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 96a3a6d564ad..03a4099be701 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -75,6 +75,12 @@ struct dev_pagemap_ops {
 	 * Transition the percpu_ref in struct dev_pagemap to the dead state.
 	 */
 	void (*kill)(struct dev_pagemap *pgmap);
+
+	/*
+	 * Used for private (un-addressable) device memory only.  Must migrate
+	 * the page back to a CPU accessible page.
+	 */
+	vm_fault_t (*migrate)(struct vm_fault *vmf);
 };
 
 /**
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 4d961668e5fc..15bdb6fe71e5 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -129,12 +129,6 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 {
 	return pfn_to_page(swp_offset(entry));
 }
-
-vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
-		       unsigned long addr,
-		       swp_entry_t entry,
-		       unsigned int flags,
-		       pmd_t *pmdp);
 #else /* CONFIG_DEVICE_PRIVATE */
 static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
 {
@@ -164,15 +158,6 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 {
 	return NULL;
 }
-
-static inline vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
-				     unsigned long addr,
-				     swp_entry_t entry,
-				     unsigned int flags,
-				     pmd_t *pmdp)
-{
-	return VM_FAULT_SIGBUS;
-}
 #endif /* CONFIG_DEVICE_PRIVATE */
 
 #ifdef CONFIG_MIGRATION
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 6a3183cac764..7167e717647d 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -11,7 +11,6 @@
 #include <linux/types.h>
 #include <linux/wait_bit.h>
 #include <linux/xarray.h>
-#include <linux/hmm.h>
 
 static DEFINE_XARRAY(pgmap_array);
 #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
@@ -48,36 +47,6 @@ static inline int dev_pagemap_enable(struct device *dev)
 }
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
 
-#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
-vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
-		       unsigned long addr,
-		       swp_entry_t entry,
-		       unsigned int flags,
-		       pmd_t *pmdp)
-{
-	struct page *page = device_private_entry_to_page(entry);
-	struct hmm_devmem *devmem;
-
-	devmem = container_of(page->pgmap, typeof(*devmem), pagemap);
-
-	/*
-	 * The page_fault() callback must migrate page back to system memory
-	 * so that CPU can access it. This might fail for various reasons
-	 * (device issue, device was unsafely unplugged, ...). When such
-	 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
-	 *
-	 * Note that because memory cgroup charges are accounted to the device
-	 * memory, this should never fail because of memory restrictions (but
-	 * allocation of regular system page might still fail because we are
-	 * out of memory).
-	 *
-	 * There is a more in-depth description of what that callback can and
-	 * cannot do, in include/linux/memremap.h
-	 */
-	return devmem->page_fault(vma, addr, page, flags, pmdp);
-}
-#endif /* CONFIG_DEVICE_PRIVATE */
-
 static void pgmap_array_delete(struct resource *res)
 {
 	xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
diff --git a/mm/hmm.c b/mm/hmm.c
index 6dc769feb2e1..aab799677c7d 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -1330,15 +1330,12 @@ static void hmm_devmem_ref_kill(struct dev_pagemap *pgmap)
 	percpu_ref_kill(pgmap->ref);
 }
 
-static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
-			    unsigned long addr,
-			    const struct page *page,
-			    unsigned int flags,
-			    pmd_t *pmdp)
+static vm_fault_t hmm_devmem_migrate(struct vm_fault *vmf)
 {
-	struct hmm_devmem *devmem = page->pgmap->data;
+	struct hmm_devmem *devmem = vmf->page->pgmap->data;
 
-	return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
+	return devmem->ops->fault(devmem, vmf->vma, vmf->address, vmf->page,
+			vmf->flags, vmf->pmd);
 }
 
 static void hmm_devmem_free(struct page *page, void *data)
@@ -1351,6 +1348,7 @@ static void hmm_devmem_free(struct page *page, void *data)
 static const struct dev_pagemap_ops hmm_pagemap_ops = {
 	.page_free		= hmm_devmem_free,
 	.kill			= hmm_devmem_ref_kill,
+	.migrate		= hmm_devmem_migrate,
 };
 
 /*
@@ -1405,7 +1403,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
 	devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
 	devmem->pfn_last = devmem->pfn_first +
 			   (resource_size(devmem->resource) >> PAGE_SHIFT);
-	devmem->page_fault = hmm_devmem_fault;
 
 	devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
 	devmem->pagemap.res = *devmem->resource;
diff --git a/mm/memory.c b/mm/memory.c
index ddf20bd0c317..cbf3cb598436 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2782,13 +2782,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
 			migration_entry_wait(vma->vm_mm, vmf->pmd,
 					     vmf->address);
 		} else if (is_device_private_entry(entry)) {
-			/*
-			 * For un-addressable device memory we call the pgmap
-			 * fault handler callback. The callback must migrate
-			 * the page back to some CPU accessible page.
-			 */
-			ret = device_private_entry_fault(vma, vmf->address, entry,
-						 vmf->flags, vmf->pmd);
+			vmf->page = device_private_entry_to_page(entry);
+			ret = page->pgmap->ops->migrate(vmf);
 		} else if (is_hwpoison_entry(entry)) {
 			ret = VM_FAULT_HWPOISON;
 		} else {
-- 
2.20.1


WARNING: multiple messages have this Message-ID (diff)
From: Christoph Hellwig <hch-jcswGhMUV9g@public.gmane.org>
To: "Dan Williams"
	<dan.j.williams-ral2JQCrhuEAvxtiuMwx3w@public.gmane.org>,
	"Jérôme Glisse" <jglisse-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>,
	"Jason Gunthorpe" <jgg-VPRAkNaXOzVWk0Htik3J/w@public.gmane.org>,
	"Ben Skeggs" <bskeggs-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
Cc: linux-nvdimm-hn68Rpc1hR1g9hUCZPvPmw@public.gmane.org,
	linux-pci-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	linux-kernel-u79uwXL29TY76Z2rM5mHXA@public.gmane.org,
	dri-devel-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org,
	linux-mm-Bw31MaZKKs3YtjvyW6yDsg@public.gmane.org,
	nouveau-PD4FTy7X32lNgt0PjOBp9y5qC8QIuHrW@public.gmane.org
Subject: [PATCH 10/22] memremap: add a migrate callback to struct dev_pagemap_ops
Date: Thu, 13 Jun 2019 11:43:13 +0200	[thread overview]
Message-ID: <20190613094326.24093-11-hch@lst.de> (raw)
In-Reply-To: <20190613094326.24093-1-hch-jcswGhMUV9g@public.gmane.org>

This replaces the hacky ->fault callback, which is currently directly
called from common code through a hmm specific data structure as an
exercise in layering violations.

Signed-off-by: Christoph Hellwig <hch@lst.de>
---
 include/linux/hmm.h      |  6 ------
 include/linux/memremap.h |  6 ++++++
 include/linux/swapops.h  | 15 ---------------
 kernel/memremap.c        | 31 -------------------------------
 mm/hmm.c                 | 13 +++++--------
 mm/memory.c              |  9 ++-------
 6 files changed, 13 insertions(+), 67 deletions(-)

diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 5761a39221a6..3c9a59dbfdb8 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -658,11 +658,6 @@ struct hmm_devmem_ops {
  * chunk, as an optimization. It must, however, prioritize the faulting address
  * over all the others.
  */
-typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma,
-				unsigned long addr,
-				const struct page *page,
-				unsigned int flags,
-				pmd_t *pmdp);
 
 struct hmm_devmem {
 	struct completion		completion;
@@ -673,7 +668,6 @@ struct hmm_devmem {
 	struct dev_pagemap		pagemap;
 	const struct hmm_devmem_ops	*ops;
 	struct percpu_ref		ref;
-	dev_page_fault_t		page_fault;
 };
 
 /*
diff --git a/include/linux/memremap.h b/include/linux/memremap.h
index 96a3a6d564ad..03a4099be701 100644
--- a/include/linux/memremap.h
+++ b/include/linux/memremap.h
@@ -75,6 +75,12 @@ struct dev_pagemap_ops {
 	 * Transition the percpu_ref in struct dev_pagemap to the dead state.
 	 */
 	void (*kill)(struct dev_pagemap *pgmap);
+
+	/*
+	 * Used for private (un-addressable) device memory only.  Must migrate
+	 * the page back to a CPU accessible page.
+	 */
+	vm_fault_t (*migrate)(struct vm_fault *vmf);
 };
 
 /**
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 4d961668e5fc..15bdb6fe71e5 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -129,12 +129,6 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 {
 	return pfn_to_page(swp_offset(entry));
 }
-
-vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
-		       unsigned long addr,
-		       swp_entry_t entry,
-		       unsigned int flags,
-		       pmd_t *pmdp);
 #else /* CONFIG_DEVICE_PRIVATE */
 static inline swp_entry_t make_device_private_entry(struct page *page, bool write)
 {
@@ -164,15 +158,6 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry)
 {
 	return NULL;
 }
-
-static inline vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
-				     unsigned long addr,
-				     swp_entry_t entry,
-				     unsigned int flags,
-				     pmd_t *pmdp)
-{
-	return VM_FAULT_SIGBUS;
-}
 #endif /* CONFIG_DEVICE_PRIVATE */
 
 #ifdef CONFIG_MIGRATION
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 6a3183cac764..7167e717647d 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -11,7 +11,6 @@
 #include <linux/types.h>
 #include <linux/wait_bit.h>
 #include <linux/xarray.h>
-#include <linux/hmm.h>
 
 static DEFINE_XARRAY(pgmap_array);
 #define SECTION_MASK ~((1UL << PA_SECTION_SHIFT) - 1)
@@ -48,36 +47,6 @@ static inline int dev_pagemap_enable(struct device *dev)
 }
 #endif /* CONFIG_DEV_PAGEMAP_OPS */
 
-#if IS_ENABLED(CONFIG_DEVICE_PRIVATE)
-vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
-		       unsigned long addr,
-		       swp_entry_t entry,
-		       unsigned int flags,
-		       pmd_t *pmdp)
-{
-	struct page *page = device_private_entry_to_page(entry);
-	struct hmm_devmem *devmem;
-
-	devmem = container_of(page->pgmap, typeof(*devmem), pagemap);
-
-	/*
-	 * The page_fault() callback must migrate page back to system memory
-	 * so that CPU can access it. This might fail for various reasons
-	 * (device issue, device was unsafely unplugged, ...). When such
-	 * error conditions happen, the callback must return VM_FAULT_SIGBUS.
-	 *
-	 * Note that because memory cgroup charges are accounted to the device
-	 * memory, this should never fail because of memory restrictions (but
-	 * allocation of regular system page might still fail because we are
-	 * out of memory).
-	 *
-	 * There is a more in-depth description of what that callback can and
-	 * cannot do, in include/linux/memremap.h
-	 */
-	return devmem->page_fault(vma, addr, page, flags, pmdp);
-}
-#endif /* CONFIG_DEVICE_PRIVATE */
-
 static void pgmap_array_delete(struct resource *res)
 {
 	xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
diff --git a/mm/hmm.c b/mm/hmm.c
index 6dc769feb2e1..aab799677c7d 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -1330,15 +1330,12 @@ static void hmm_devmem_ref_kill(struct dev_pagemap *pgmap)
 	percpu_ref_kill(pgmap->ref);
 }
 
-static vm_fault_t hmm_devmem_fault(struct vm_area_struct *vma,
-			    unsigned long addr,
-			    const struct page *page,
-			    unsigned int flags,
-			    pmd_t *pmdp)
+static vm_fault_t hmm_devmem_migrate(struct vm_fault *vmf)
 {
-	struct hmm_devmem *devmem = page->pgmap->data;
+	struct hmm_devmem *devmem = vmf->page->pgmap->data;
 
-	return devmem->ops->fault(devmem, vma, addr, page, flags, pmdp);
+	return devmem->ops->fault(devmem, vmf->vma, vmf->address, vmf->page,
+			vmf->flags, vmf->pmd);
 }
 
 static void hmm_devmem_free(struct page *page, void *data)
@@ -1351,6 +1348,7 @@ static void hmm_devmem_free(struct page *page, void *data)
 static const struct dev_pagemap_ops hmm_pagemap_ops = {
 	.page_free		= hmm_devmem_free,
 	.kill			= hmm_devmem_ref_kill,
+	.migrate		= hmm_devmem_migrate,
 };
 
 /*
@@ -1405,7 +1403,6 @@ struct hmm_devmem *hmm_devmem_add(const struct hmm_devmem_ops *ops,
 	devmem->pfn_first = devmem->resource->start >> PAGE_SHIFT;
 	devmem->pfn_last = devmem->pfn_first +
 			   (resource_size(devmem->resource) >> PAGE_SHIFT);
-	devmem->page_fault = hmm_devmem_fault;
 
 	devmem->pagemap.type = MEMORY_DEVICE_PRIVATE;
 	devmem->pagemap.res = *devmem->resource;
diff --git a/mm/memory.c b/mm/memory.c
index ddf20bd0c317..cbf3cb598436 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2782,13 +2782,8 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
 			migration_entry_wait(vma->vm_mm, vmf->pmd,
 					     vmf->address);
 		} else if (is_device_private_entry(entry)) {
-			/*
-			 * For un-addressable device memory we call the pgmap
-			 * fault handler callback. The callback must migrate
-			 * the page back to some CPU accessible page.
-			 */
-			ret = device_private_entry_fault(vma, vmf->address, entry,
-						 vmf->flags, vmf->pmd);
+			vmf->page = device_private_entry_to_page(entry);
+			ret = page->pgmap->ops->migrate(vmf);
 		} else if (is_hwpoison_entry(entry)) {
 			ret = VM_FAULT_HWPOISON;
 		} else {
-- 
2.20.1

_______________________________________________
Nouveau mailing list
Nouveau@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/nouveau

  parent reply	other threads:[~2019-06-13  9:44 UTC|newest]

Thread overview: 254+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-06-13  9:43 dev_pagemap related cleanups Christoph Hellwig
2019-06-13  9:43 ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 01/22] mm: remove the unused ARCH_HAS_HMM_DEVICE Kconfig option Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 18:30   ` Jason Gunthorpe
2019-06-13 18:30     ` Jason Gunthorpe
2019-06-13 18:30     ` Jason Gunthorpe
2019-06-13  9:43 ` [PATCH 02/22] mm: remove the struct hmm_device infrastructure Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 18:46   ` Jason Gunthorpe
2019-06-13 18:46     ` Jason Gunthorpe
2019-06-13 18:46     ` Jason Gunthorpe
2019-06-13 23:06   ` [Nouveau] " John Hubbard
2019-06-13 23:06     ` John Hubbard
2019-06-13 23:06     ` [Nouveau] " John Hubbard
2019-06-13  9:43 ` [PATCH 03/22] mm: remove hmm_devmem_add_resource Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 18:52   ` Jason Gunthorpe
2019-06-13 18:52     ` Jason Gunthorpe
2019-06-14  6:19     ` Christoph Hellwig
2019-06-14  0:54   ` [Nouveau] " John Hubbard
2019-06-14  0:54     ` John Hubbard
2019-06-14  0:54     ` [Nouveau] " John Hubbard
2019-06-20 19:32   ` Michal Hocko
2019-06-13  9:43 ` [PATCH 04/22] mm: don't clear ->mapping in hmm_devmem_free Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:05   ` Jason Gunthorpe
2019-06-13 19:05     ` Jason Gunthorpe
2019-06-14  6:21     ` Christoph Hellwig
2019-06-14  6:21       ` Christoph Hellwig
2019-06-14  1:46   ` John Hubbard
2019-06-14  1:46     ` John Hubbard
2019-06-14  1:46     ` John Hubbard
2019-06-20 19:36   ` Michal Hocko
2019-06-20 19:36     ` Michal Hocko
2019-06-13  9:43 ` [PATCH 05/22] mm: export alloc_pages_vma Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-14  1:47   ` John Hubbard
2019-06-14  1:47     ` John Hubbard
2019-06-14  1:47     ` John Hubbard
2019-06-14  6:23     ` Christoph Hellwig
2019-06-20 19:17   ` Michal Hocko
2019-06-24 18:24     ` Dan Williams
2019-06-24 18:24       ` Dan Williams
2019-06-25  7:23       ` Christoph Hellwig
2019-06-25  7:23         ` Christoph Hellwig
2019-06-25 15:00         ` Michal Hocko
2019-06-25 15:00           ` Michal Hocko
2019-06-25 18:03           ` Dan Williams
2019-06-25 18:03             ` Dan Williams
2019-06-25 19:00             ` Michal Hocko
2019-06-25 19:00               ` Michal Hocko
2019-06-25 19:52               ` Dan Williams
2019-06-25 19:52                 ` Dan Williams
2019-06-26  5:46                 ` Michal Hocko
2019-06-26  5:46                   ` Michal Hocko
2019-06-26  5:46                   ` Michal Hocko
2019-06-26 16:14                   ` Dan Williams
2019-06-27  6:41                     ` Michal Hocko
2019-06-27  6:41                       ` Michal Hocko
2019-06-27  6:41                       ` Michal Hocko
2019-06-13  9:43 ` [PATCH 06/22] mm: factor out a devm_request_free_mem_region helper Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:16   ` Jason Gunthorpe
2019-06-13 19:16     ` Jason Gunthorpe
2019-06-14  6:24     ` Christoph Hellwig
2019-06-14  6:24       ` Christoph Hellwig
2019-06-15  2:21   ` John Hubbard
2019-06-15  2:21     ` John Hubbard
2019-06-15  2:21     ` John Hubbard
2019-06-15 14:30     ` Christoph Hellwig
2019-06-15 14:30       ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 07/22] memremap: move dev_pagemap callbacks into a separate structure Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:18   ` Jason Gunthorpe
2019-06-13 19:18     ` Jason Gunthorpe
2019-06-13 19:18     ` Jason Gunthorpe
2019-06-13 20:14   ` Logan Gunthorpe
2019-06-13 20:14     ` Logan Gunthorpe
2019-06-13  9:43 ` [PATCH 08/22] memremap: pass a struct dev_pagemap to ->kill Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:26   ` Jason Gunthorpe
2019-06-13 19:26     ` Jason Gunthorpe
2019-06-13 20:12   ` Logan Gunthorpe
2019-06-13 20:12     ` Logan Gunthorpe
2019-06-13 20:12     ` Logan Gunthorpe
2019-06-13 20:15     ` Dan Williams
2019-06-13 20:15       ` Dan Williams
2019-06-13  9:43 ` [PATCH 09/22] memremap: lift the devmap_enable manipulation into devm_memremap_pages Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:34   ` Jason Gunthorpe
2019-06-13 19:34     ` Jason Gunthorpe
2019-06-13 19:34     ` Jason Gunthorpe
2019-06-13 20:13     ` Dan Williams
2019-06-13 20:13       ` Dan Williams
2019-06-14  6:28     ` Christoph Hellwig
2019-06-14  6:28       ` Christoph Hellwig
2019-06-14  6:28       ` Christoph Hellwig
2019-06-13  9:43 ` Christoph Hellwig [this message]
2019-06-13  9:43   ` [PATCH 10/22] memremap: add a migrate callback to struct dev_pagemap_ops Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 23:42   ` Ralph Campbell
2019-06-13 23:42     ` Ralph Campbell
2019-06-13 23:42     ` Ralph Campbell
2019-06-14  6:33     ` Christoph Hellwig
2019-06-14  6:33       ` Christoph Hellwig
2019-06-14  6:33       ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 11/22] memremap: remove the data field in struct dev_pagemap Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:37   ` Jason Gunthorpe
2019-06-13 19:37     ` Jason Gunthorpe
2019-06-13 19:37     ` Jason Gunthorpe
2019-06-13  9:43 ` [PATCH 12/22] memremap: provide an optional internal refcount " Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 13/22] device-dax: use the dev_pagemap internal refcount Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-14  0:22   ` Ira Weiny
2019-06-14  0:22     ` Ira Weiny
2019-06-14  0:22     ` Ira Weiny
2019-06-14  6:35     ` Christoph Hellwig
2019-06-14  6:35       ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 14/22] nouveau: use alloc_page_vma directly Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:39   ` Jason Gunthorpe
2019-06-13 19:39     ` Jason Gunthorpe
2019-06-13  9:43 ` [PATCH 15/22] nouveau: use devm_memremap_pages directly Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 16/22] mm: remove hmm_vma_alloc_locked_page Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 17/22] mm: remove hmm_devmem_add Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:42   ` Jason Gunthorpe
2019-06-13 19:42     ` Jason Gunthorpe
2019-06-14  6:39     ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 18/22] mm: mark DEVICE_PUBLIC as broken Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:44   ` Jason Gunthorpe
2019-06-13 19:44     ` Jason Gunthorpe
2019-06-13 19:44     ` Jason Gunthorpe
2019-06-13 19:53     ` Ralph Campbell
2019-06-13 19:53       ` Ralph Campbell
2019-06-13 19:58       ` Jason Gunthorpe
2019-06-13 19:58         ` Jason Gunthorpe
2019-06-13 19:58         ` Jason Gunthorpe
2019-06-14  0:43         ` Ira Weiny
2019-06-14  0:43           ` Ira Weiny
2019-06-14  0:43           ` Ira Weiny
2019-06-14  1:23           ` John Hubbard
2019-06-14  1:23             ` John Hubbard
2019-06-14  1:23             ` John Hubbard
2019-06-19 19:27             ` Jason Gunthorpe
2019-06-19 19:27               ` Jason Gunthorpe
2019-06-19 19:46               ` Dan Williams
2019-06-19 19:46                 ` Dan Williams
2019-06-26  3:15               ` John Hubbard
2019-06-26  3:15                 ` John Hubbard
2019-06-26  3:15                 ` John Hubbard
2019-06-26  5:45                 ` Michal Hocko
2019-06-26  5:45                   ` Michal Hocko
2019-06-26  5:45                   ` Michal Hocko
2019-06-26  6:07                   ` John Hubbard
2019-06-26  6:07                     ` John Hubbard
2019-06-14  6:43           ` Christoph Hellwig
2019-06-14  6:43             ` Christoph Hellwig
2019-06-20 19:26   ` Michal Hocko
2019-06-25  7:29     ` Christoph Hellwig
2019-06-25 11:44       ` Jason Gunthorpe
2019-06-25 11:44         ` Jason Gunthorpe
2019-06-25 11:44         ` Jason Gunthorpe
2019-06-25 11:59         ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 19/22] mm: simplify ZONE_DEVICE page private data Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 20/22] mm: sort out the DEVICE_PRIVATE Kconfig mess Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 19:55   ` Jason Gunthorpe
2019-06-13 19:55     ` Jason Gunthorpe
2019-06-13  9:43 ` [PATCH 21/22] mm: remove the HMM config option Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 20:01   ` Jason Gunthorpe
2019-06-13 20:01     ` Jason Gunthorpe
2019-06-14  6:47     ` Christoph Hellwig
2019-06-14  6:47       ` Christoph Hellwig
2019-06-14  6:47       ` Christoph Hellwig
2019-06-13  9:43 ` [PATCH 22/22] mm: don't select MIGRATE_VMA_HELPER from HMM_MIRROR Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13  9:43   ` Christoph Hellwig
2019-06-13 20:04   ` Jason Gunthorpe
2019-06-13 20:04     ` Jason Gunthorpe
2019-06-13 20:04     ` Jason Gunthorpe
2019-06-14  1:53   ` [Nouveau] " John Hubbard
2019-06-14  1:53     ` John Hubbard
2019-06-14  1:53     ` John Hubbard
2019-06-14  6:48     ` Christoph Hellwig
2019-06-14  6:48       ` Christoph Hellwig
2019-06-13 14:16 ` dev_pagemap related cleanups Jason Gunthorpe
2019-06-13 14:16   ` Jason Gunthorpe
2019-06-14  6:12   ` Christoph Hellwig
2019-06-14  6:12     ` Christoph Hellwig
2019-06-14  6:12     ` Christoph Hellwig
2019-06-13 18:27 ` Dan Williams
2019-06-13 18:27   ` Dan Williams
2019-06-13 18:27   ` Dan Williams
2019-06-13 20:17   ` Logan Gunthorpe
2019-06-13 20:17     ` Logan Gunthorpe
2019-06-13 20:21     ` Dan Williams
2019-06-13 20:24       ` Logan Gunthorpe
2019-06-13 20:24         ` Logan Gunthorpe
2019-06-13 20:48         ` Andrew Morton
2019-06-13 20:48           ` Andrew Morton
2019-06-13 20:48           ` Andrew Morton
2019-06-13 20:40   ` Jason Gunthorpe
2019-06-13 20:40     ` Jason Gunthorpe
2019-06-13 20:40     ` Jason Gunthorpe
2019-06-13 21:21     ` Christoph Hellwig
2019-06-13 21:21       ` Christoph Hellwig
2019-06-13 23:10       ` Jason Gunthorpe
2019-06-13 23:10         ` Jason Gunthorpe
2019-06-13 23:10         ` Jason Gunthorpe
2019-06-14  6:14         ` Christoph Hellwig
2019-06-14  6:14           ` Christoph Hellwig
2019-06-14  6:14           ` Christoph Hellwig
2019-06-14  0:31     ` Ira Weiny
2019-06-14  0:31       ` Ira Weiny
2019-06-14  0:31       ` Ira Weiny
2019-06-14  6:13   ` Christoph Hellwig
2019-06-14  6:13     ` Christoph Hellwig
2019-06-15  1:14     ` Dan Williams
2019-06-15  1:14       ` Dan Williams
2019-06-15  8:33       ` Christoph Hellwig
2019-06-15  8:33         ` Christoph Hellwig
2019-06-15  8:33         ` Christoph Hellwig
2019-06-15 18:09         ` Dan Williams
2019-06-15 18:09           ` Dan Williams
2019-06-15 18:09           ` Dan Williams

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190613094326.24093-11-hch@lst.de \
    --to=hch@lst.de \
    --cc=bskeggs@redhat.com \
    --cc=dan.j.williams@intel.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=jgg@mellanox.com \
    --cc=jglisse@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nvdimm@lists.01.org \
    --cc=linux-pci@vger.kernel.org \
    --cc=nouveau@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.