linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: <ankita@nvidia.com>
To: <ankita@nvidia.com>, <jgg@nvidia.com>,
	<alex.williamson@redhat.com>, <naoya.horiguchi@nec.com>,
	<maz@kernel.org>, <oliver.upton@linux.dev>
Cc: <aniketa@nvidia.com>, <cjia@nvidia.com>, <kwankhede@nvidia.com>,
	<targupta@nvidia.com>, <vsethi@nvidia.com>, <acurrid@nvidia.com>,
	<apopple@nvidia.com>, <jhubbard@nvidia.com>, <danw@nvidia.com>,
	<kvm@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
	<linux-arm-kernel@lists.infradead.org>, <linux-mm@kvack.org>
Subject: [PATCH v3 6/6] vfio/nvgpu: register device memory for poison handling
Date: Wed, 5 Apr 2023 11:01:34 -0700	[thread overview]
Message-ID: <20230405180134.16932-7-ankita@nvidia.com> (raw)
In-Reply-To: <20230405180134.16932-1-ankita@nvidia.com>

From: Ankit Agrawal <ankita@nvidia.com>

The nvgpu-vfio-pci module maps QEMU VMA to device memory through
remap_pfn_range(). The new mechanism to handle poison on memory not backed
by struct page is leveraged here.

nvgpu-vfio-pci defines a function pfn_memory_failure() to get the ECC PFN
from the MM. The function is registered with kernel MM along with the
address space and PFN range through register_pfn_address_space().

Track poisoned PFN in the nvgpu-vfio-pci module as bitmap with a bit per
PFN. The PFN is communicated by the kernel MM to the module through the
failure function, which sets the appropriate bit in the bitmap.

Register a VMA fault ops for the module. It returns VM_FAULT_HWPOISON
in case the bit for the PFN is set in the bitmap.

Clear bitmap on reset to reflect the clean state of the device memory
after reset.

Signed-off-by: Ankit Agrawal <ankita@nvidia.com>
---
 drivers/vfio/pci/nvgpu/main.c | 116 ++++++++++++++++++++++++++++++++--
 1 file changed, 110 insertions(+), 6 deletions(-)

diff --git a/drivers/vfio/pci/nvgpu/main.c b/drivers/vfio/pci/nvgpu/main.c
index 2dd8cc6e0145..8ccd3fe33a0f 100644
--- a/drivers/vfio/pci/nvgpu/main.c
+++ b/drivers/vfio/pci/nvgpu/main.c
@@ -5,6 +5,8 @@
 
 #include <linux/pci.h>
 #include <linux/vfio_pci_core.h>
+#include <linux/bitmap.h>
+#include <linux/memory-failure.h>
 
 #define DUMMY_PFN \
 	(((nvdev->mem_prop.hpa + nvdev->mem_prop.mem_length) >> PAGE_SHIFT) - 1)
@@ -12,12 +14,78 @@
 struct dev_mem_properties {
 	uint64_t hpa;
 	uint64_t mem_length;
+	unsigned long *pfn_bitmap;
 	int bar1_start_offset;
 };
 
 struct nvgpu_vfio_pci_core_device {
 	struct vfio_pci_core_device core_device;
 	struct dev_mem_properties mem_prop;
+	struct pfn_address_space pfn_address_space;
+};
+
+void nvgpu_vfio_pci_pfn_memory_failure(struct pfn_address_space *pfn_space,
+				       unsigned long pfn)
+{
+	struct nvgpu_vfio_pci_core_device *nvdev = container_of(
+		pfn_space, struct nvgpu_vfio_pci_core_device, pfn_address_space);
+
+	/*
+	 * MM has called to notify a poisoned page. Track that in the bitmap.
+	 */
+	__set_bit(pfn - (pfn_space->node.start), nvdev->mem_prop.pfn_bitmap);
+}
+
+struct pfn_address_space_ops nvgpu_vfio_pci_pas_ops = {
+	.failure = nvgpu_vfio_pci_pfn_memory_failure,
+};
+
+static int
+nvgpu_vfio_pci_register_pfn_range(struct nvgpu_vfio_pci_core_device *nvdev,
+				  struct vm_area_struct *vma)
+{
+	unsigned long nr_pages;
+	int ret = 0;
+
+	nr_pages = nvdev->mem_prop.mem_length >> PAGE_SHIFT;
+
+	nvdev->pfn_address_space.node.start = vma->vm_pgoff;
+	nvdev->pfn_address_space.node.last = vma->vm_pgoff + nr_pages - 1;
+	nvdev->pfn_address_space.ops = &nvgpu_vfio_pci_pas_ops;
+	nvdev->pfn_address_space.mapping = vma->vm_file->f_mapping;
+
+	ret = register_pfn_address_space(&(nvdev->pfn_address_space));
+
+	return ret;
+}
+
+static vm_fault_t nvgpu_vfio_pci_fault(struct vm_fault *vmf)
+{
+	unsigned long mem_offset = vmf->pgoff - vmf->vma->vm_pgoff;
+	struct nvgpu_vfio_pci_core_device *nvdev = container_of(
+		vmf->vma->vm_file->private_data,
+		struct nvgpu_vfio_pci_core_device, core_device.vdev);
+	int ret;
+
+	/*
+	 * Check if the page is poisoned.
+	 */
+	if (mem_offset < (nvdev->mem_prop.mem_length >> PAGE_SHIFT) &&
+		test_bit(mem_offset, nvdev->mem_prop.pfn_bitmap))
+		return VM_FAULT_HWPOISON;
+
+	ret = remap_pfn_range(vmf->vma,
+			vmf->vma->vm_start + (mem_offset << PAGE_SHIFT),
+			DUMMY_PFN, PAGE_SIZE,
+			vmf->vma->vm_page_prot);
+	if (ret)
+		return VM_FAULT_ERROR;
+
+	return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct nvgpu_vfio_pci_mmap_ops = {
+	.fault = nvgpu_vfio_pci_fault,
 };
 
 static int vfio_get_bar1_start_offset(struct vfio_pci_core_device *vdev)
@@ -26,8 +94,9 @@ static int vfio_get_bar1_start_offset(struct vfio_pci_core_device *vdev)
 
 	pci_read_config_byte(vdev->pdev, 0x10, &val);
 	/*
-	 * The BAR1 start offset in the PCI config space depends on the BAR0size.
-	 * Check if the BAR0 is 64b and return the approproiate BAR1 offset.
+	 * The BAR1 start offset in the PCI config space depends on the BAR0
+	 * size. Check if the BAR0 is 64b and return the approproiate BAR1
+	 * offset.
 	 */
 	if (val & PCI_BASE_ADDRESS_MEM_TYPE_64)
 		return VFIO_PCI_BAR2_REGION_INDEX;
@@ -54,6 +123,16 @@ static int nvgpu_vfio_pci_open_device(struct vfio_device *core_vdev)
 	return ret;
 }
 
+void nvgpu_vfio_pci_close_device(struct vfio_device *core_vdev)
+{
+	struct nvgpu_vfio_pci_core_device *nvdev = container_of(
+		core_vdev, struct nvgpu_vfio_pci_core_device, core_device.vdev);
+
+	unregister_pfn_address_space(&(nvdev->pfn_address_space));
+
+	vfio_pci_core_close_device(core_vdev);
+}
+
 int nvgpu_vfio_pci_mmap(struct vfio_device *core_vdev,
 			struct vm_area_struct *vma)
 {
@@ -93,8 +172,11 @@ int nvgpu_vfio_pci_mmap(struct vfio_device *core_vdev,
 		return ret;
 
 	vma->vm_pgoff = start_pfn + pgoff;
+	vma->vm_ops = &nvgpu_vfio_pci_mmap_ops;
 
-	return 0;
+	ret = nvgpu_vfio_pci_register_pfn_range(nvdev, vma);
+
+	return ret;
 }
 
 long nvgpu_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
@@ -140,7 +222,14 @@ long nvgpu_vfio_pci_ioctl(struct vfio_device *core_vdev, unsigned int cmd,
 		}
 
 		return vfio_pci_core_ioctl(core_vdev, cmd, arg);
-
+	case VFIO_DEVICE_RESET:
+		/*
+		 * Resetting the GPU clears up the poisoned page. Reset the
+		 * poisoned page bitmap.
+		 */
+		memset(nvdev->mem_prop.pfn_bitmap, 0,
+		       nvdev->mem_prop.mem_length >> (PAGE_SHIFT + 3));
+		return vfio_pci_core_ioctl(core_vdev, cmd, arg);
 	default:
 		return vfio_pci_core_ioctl(core_vdev, cmd, arg);
 	}
@@ -151,7 +240,7 @@ static const struct vfio_device_ops nvgpu_vfio_pci_ops = {
 	.init = vfio_pci_core_init_dev,
 	.release = vfio_pci_core_release_dev,
 	.open_device = nvgpu_vfio_pci_open_device,
-	.close_device = vfio_pci_core_close_device,
+	.close_device = nvgpu_vfio_pci_close_device,
 	.ioctl = nvgpu_vfio_pci_ioctl,
 	.read = vfio_pci_core_read,
 	.write = vfio_pci_core_write,
@@ -188,7 +277,20 @@ nvgpu_vfio_pci_fetch_memory_property(struct pci_dev *pdev,
 
 	ret = device_property_read_u64(&(pdev->dev), "nvidia,gpu-mem-size",
 				       &(nvdev->mem_prop.mem_length));
-	return ret;
+	if (ret)
+		return ret;
+
+	/*
+	 * A bitmap is maintained to teack the pages that are poisoned. Each
+	 * page is represented by a bit. Allocation size in bytes is
+	 * determined by shifting the device memory size by PAGE_SHIFT to
+	 * determine the number of pages; and further shifted by 3 as each
+	 * byte could track 8 pages.
+	 */
+	nvdev->mem_prop.pfn_bitmap
+		= vzalloc(nvdev->mem_prop.mem_length >> (PAGE_SHIFT + 3));
+
+	return 0;
 }
 
 static int nvgpu_vfio_pci_probe(struct pci_dev *pdev,
@@ -224,6 +326,8 @@ static void nvgpu_vfio_pci_remove(struct pci_dev *pdev)
 	struct nvgpu_vfio_pci_core_device *nvdev = nvgpu_drvdata(pdev);
 	struct vfio_pci_core_device *vdev = &nvdev->core_device;
 
+	vfree(nvdev->mem_prop.pfn_bitmap);
+
 	vfio_pci_core_unregister_device(vdev);
 	vfio_put_device(&vdev->vdev);
 }
-- 
2.17.1



  parent reply	other threads:[~2023-04-05 18:02 UTC|newest]

Thread overview: 31+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-05 18:01 [PATCH v3 0/6] Expose GPU memory as coherently CPU accessible ankita
2023-04-05 18:01 ` [PATCH v3 1/6] kvm: determine memory type from VMA ankita
2023-04-12 12:43   ` Marc Zyngier
2023-04-12 13:01     ` Jason Gunthorpe
2023-05-31 11:35       ` Catalin Marinas
2023-06-14 12:44         ` Jason Gunthorpe
2023-07-14  8:10         ` Benjamin Herrenschmidt
2023-07-16 15:09           ` Catalin Marinas
2023-07-16 22:30             ` Jason Gunthorpe
2023-07-17 18:35               ` Alex Williamson
2023-07-25  6:18                 ` Benjamin Herrenschmidt
2023-04-05 18:01 ` [PATCH v3 2/6] vfio/nvgpu: expose GPU device memory as BAR1 ankita
2023-04-05 21:07   ` kernel test robot
2023-04-05 18:01 ` [PATCH v3 3/6] mm: handle poisoning of pfn without struct pages ankita
2023-04-05 21:07   ` kernel test robot
2023-05-09  9:51   ` HORIGUCHI NAOYA(堀口 直也)
2023-05-15 11:18     ` Ankit Agrawal
2023-05-23  5:43       ` HORIGUCHI NAOYA(堀口 直也)
2023-04-05 18:01 ` [PATCH v3 4/6] mm: Add poison error check in fixup_user_fault() for mapped PFN ankita
2023-04-05 18:01 ` [PATCH v3 5/6] mm: Change ghes code to allow poison of non-struct PFN ankita
2023-04-05 18:01 ` ankita [this message]
2023-04-05 20:24   ` [PATCH v3 6/6] vfio/nvgpu: register device memory for poison handling Zhi Wang
2023-04-05 21:50   ` kernel test robot
2023-05-24  9:53   ` Dan Carpenter
2023-04-06 12:07 ` [PATCH v3 0/6] Expose GPU memory as coherently CPU accessible David Hildenbrand
2023-04-12  8:43   ` Ankit Agrawal
2023-04-12  9:48     ` Marc Zyngier
2023-04-12 12:28 ` Marc Zyngier
2023-04-12 12:53   ` Jason Gunthorpe
2023-04-13  9:52     ` Marc Zyngier
2023-04-13 13:19       ` Jason Gunthorpe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230405180134.16932-7-ankita@nvidia.com \
    --to=ankita@nvidia.com \
    --cc=acurrid@nvidia.com \
    --cc=alex.williamson@redhat.com \
    --cc=aniketa@nvidia.com \
    --cc=apopple@nvidia.com \
    --cc=cjia@nvidia.com \
    --cc=danw@nvidia.com \
    --cc=jgg@nvidia.com \
    --cc=jhubbard@nvidia.com \
    --cc=kvm@vger.kernel.org \
    --cc=kwankhede@nvidia.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=maz@kernel.org \
    --cc=naoya.horiguchi@nec.com \
    --cc=oliver.upton@linux.dev \
    --cc=targupta@nvidia.com \
    --cc=vsethi@nvidia.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).