All of lore.kernel.org
 help / color / mirror / Atom feed
From: Eric Auger <eric.auger@redhat.com>
To: eric.auger.pro@gmail.com, eric.auger@redhat.com,
	iommu@lists.linux-foundation.org, linux-kernel@vger.kernel.org,
	kvm@vger.kernel.org, kvmarm@lists.cs.columbia.edu,
	will@kernel.org, joro@8bytes.org, maz@kernel.org,
	robin.murphy@arm.com, alex.williamson@redhat.com
Cc: jean-philippe@linaro.org, zhangfei.gao@linaro.org,
	zhangfei.gao@gmail.com, vivek.gautam@arm.com,
	shameerali.kolothum.thodi@huawei.com,
	jacob.jun.pan@linux.intel.com, yi.l.liu@intel.com,
	tn@semihalf.com, nicoleotsuka@gmail.com, yuzenghui@huawei.com
Subject: [PATCH v11 13/13] vfio/pci: Inject page response upon response region fill
Date: Mon, 16 Nov 2020 12:00:30 +0100	[thread overview]
Message-ID: <20201116110030.32335-14-eric.auger@redhat.com> (raw)
In-Reply-To: <20201116110030.32335-1-eric.auger@redhat.com>

When the userspace increments the head of the page response
buffer ring, let's push the response into the iommu layer.
This is done through a workqueue that pops the responses from
the ring buffer and increment the tail.

Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
 drivers/vfio/pci/vfio_pci.c         | 40 +++++++++++++++++++++++++++++
 drivers/vfio/pci/vfio_pci_private.h |  8 ++++++
 drivers/vfio/pci/vfio_pci_rdwr.c    |  1 +
 3 files changed, 49 insertions(+)

diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index e9a904ce3f0d..beea70d70151 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -542,6 +542,32 @@ static int vfio_pci_dma_fault_init(struct vfio_pci_device *vdev)
 	return ret;
 }
 
+static void dma_response_inject(struct work_struct *work)
+{
+	struct vfio_pci_dma_fault_response_work *rwork =
+		container_of(work, struct vfio_pci_dma_fault_response_work, inject);
+	struct vfio_region_dma_fault_response *header = rwork->header;
+	struct vfio_pci_device *vdev = rwork->vdev;
+	struct iommu_page_response *resp;
+	u32 tail, head, size;
+
+	mutex_lock(&vdev->fault_response_queue_lock);
+
+	tail = header->tail;
+	head = header->head;
+	size = header->nb_entries;
+
+	while (CIRC_CNT(head, tail, size) >= 1) {
+		resp = (struct iommu_page_response *)(vdev->fault_response_pages + header->offset +
+						tail * header->entry_size);
+
+		/* TODO: properly handle the return value */
+		iommu_page_response(&vdev->pdev->dev, resp);
+		header->tail = tail = (tail + 1) % size;
+	}
+	mutex_unlock(&vdev->fault_response_queue_lock);
+}
+
 #define DMA_FAULT_RESPONSE_RING_LENGTH 512
 
 static int vfio_pci_dma_fault_response_init(struct vfio_pci_device *vdev)
@@ -585,8 +611,22 @@ static int vfio_pci_dma_fault_response_init(struct vfio_pci_device *vdev)
 	header->nb_entries = DMA_FAULT_RESPONSE_RING_LENGTH;
 	header->offset = PAGE_SIZE;
 
+	vdev->response_work = kzalloc(sizeof(*vdev->response_work), GFP_KERNEL);
+	if (!vdev->response_work)
+		goto out;
+	vdev->response_work->header = header;
+	vdev->response_work->vdev = vdev;
+
+	/* launch the thread that will extract the response */
+	INIT_WORK(&vdev->response_work->inject, dma_response_inject);
+	vdev->dma_fault_response_wq =
+		create_singlethread_workqueue("vfio-dma-fault-response");
+	if (!vdev->dma_fault_response_wq)
+		return -ENOMEM;
+
 	return 0;
 out:
+	kfree(vdev->fault_response_pages);
 	vdev->fault_response_pages = NULL;
 	return ret;
 }
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 035634521cd0..5944f96ced0c 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -52,6 +52,12 @@ struct vfio_pci_irq_ctx {
 	struct irq_bypass_producer	producer;
 };
 
+struct vfio_pci_dma_fault_response_work {
+	struct work_struct inject;
+	struct vfio_region_dma_fault_response *header;
+	struct vfio_pci_device *vdev;
+};
+
 struct vfio_pci_device;
 struct vfio_pci_region;
 
@@ -145,6 +151,8 @@ struct vfio_pci_device {
 	struct eventfd_ctx	*req_trigger;
 	u8			*fault_pages;
 	u8			*fault_response_pages;
+	struct workqueue_struct *dma_fault_response_wq;
+	struct vfio_pci_dma_fault_response_work *response_work;
 	struct mutex		fault_queue_lock;
 	struct mutex		fault_response_queue_lock;
 	struct list_head	dummy_resources_list;
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index efde0793360b..78c494fe35cc 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -430,6 +430,7 @@ size_t vfio_pci_dma_fault_response_rw(struct vfio_pci_device *vdev, char __user
 		mutex_lock(&vdev->fault_response_queue_lock);
 		header->head = new_head;
 		mutex_unlock(&vdev->fault_response_queue_lock);
+		queue_work(vdev->dma_fault_response_wq, &vdev->response_work->inject);
 	} else {
 		if (copy_to_user(buf, base + pos, count))
 			return -EFAULT;
-- 
2.21.3


WARNING: multiple messages have this Message-ID (diff)
From: Eric Auger <eric.auger@redhat.com>
To: eric.auger.pro@gmail.com, eric.auger@redhat.com,
	iommu@lists.linux-foundation.org, linux-kernel@vger.kernel.org,
	kvm@vger.kernel.org, kvmarm@lists.cs.columbia.edu,
	will@kernel.org, joro@8bytes.org, maz@kernel.org,
	robin.murphy@arm.com, alex.williamson@redhat.com
Cc: jean-philippe@linaro.org, vivek.gautam@arm.com, zhangfei.gao@linaro.org
Subject: [PATCH v11 13/13] vfio/pci: Inject page response upon response region fill
Date: Mon, 16 Nov 2020 12:00:30 +0100	[thread overview]
Message-ID: <20201116110030.32335-14-eric.auger@redhat.com> (raw)
In-Reply-To: <20201116110030.32335-1-eric.auger@redhat.com>

When the userspace increments the head of the page response
buffer ring, let's push the response into the iommu layer.
This is done through a workqueue that pops the responses from
the ring buffer and increment the tail.

Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
 drivers/vfio/pci/vfio_pci.c         | 40 +++++++++++++++++++++++++++++
 drivers/vfio/pci/vfio_pci_private.h |  8 ++++++
 drivers/vfio/pci/vfio_pci_rdwr.c    |  1 +
 3 files changed, 49 insertions(+)

diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index e9a904ce3f0d..beea70d70151 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -542,6 +542,32 @@ static int vfio_pci_dma_fault_init(struct vfio_pci_device *vdev)
 	return ret;
 }
 
+static void dma_response_inject(struct work_struct *work)
+{
+	struct vfio_pci_dma_fault_response_work *rwork =
+		container_of(work, struct vfio_pci_dma_fault_response_work, inject);
+	struct vfio_region_dma_fault_response *header = rwork->header;
+	struct vfio_pci_device *vdev = rwork->vdev;
+	struct iommu_page_response *resp;
+	u32 tail, head, size;
+
+	mutex_lock(&vdev->fault_response_queue_lock);
+
+	tail = header->tail;
+	head = header->head;
+	size = header->nb_entries;
+
+	while (CIRC_CNT(head, tail, size) >= 1) {
+		resp = (struct iommu_page_response *)(vdev->fault_response_pages + header->offset +
+						tail * header->entry_size);
+
+		/* TODO: properly handle the return value */
+		iommu_page_response(&vdev->pdev->dev, resp);
+		header->tail = tail = (tail + 1) % size;
+	}
+	mutex_unlock(&vdev->fault_response_queue_lock);
+}
+
 #define DMA_FAULT_RESPONSE_RING_LENGTH 512
 
 static int vfio_pci_dma_fault_response_init(struct vfio_pci_device *vdev)
@@ -585,8 +611,22 @@ static int vfio_pci_dma_fault_response_init(struct vfio_pci_device *vdev)
 	header->nb_entries = DMA_FAULT_RESPONSE_RING_LENGTH;
 	header->offset = PAGE_SIZE;
 
+	vdev->response_work = kzalloc(sizeof(*vdev->response_work), GFP_KERNEL);
+	if (!vdev->response_work)
+		goto out;
+	vdev->response_work->header = header;
+	vdev->response_work->vdev = vdev;
+
+	/* launch the thread that will extract the response */
+	INIT_WORK(&vdev->response_work->inject, dma_response_inject);
+	vdev->dma_fault_response_wq =
+		create_singlethread_workqueue("vfio-dma-fault-response");
+	if (!vdev->dma_fault_response_wq)
+		return -ENOMEM;
+
 	return 0;
 out:
+	kfree(vdev->fault_response_pages);
 	vdev->fault_response_pages = NULL;
 	return ret;
 }
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 035634521cd0..5944f96ced0c 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -52,6 +52,12 @@ struct vfio_pci_irq_ctx {
 	struct irq_bypass_producer	producer;
 };
 
+struct vfio_pci_dma_fault_response_work {
+	struct work_struct inject;
+	struct vfio_region_dma_fault_response *header;
+	struct vfio_pci_device *vdev;
+};
+
 struct vfio_pci_device;
 struct vfio_pci_region;
 
@@ -145,6 +151,8 @@ struct vfio_pci_device {
 	struct eventfd_ctx	*req_trigger;
 	u8			*fault_pages;
 	u8			*fault_response_pages;
+	struct workqueue_struct *dma_fault_response_wq;
+	struct vfio_pci_dma_fault_response_work *response_work;
 	struct mutex		fault_queue_lock;
 	struct mutex		fault_response_queue_lock;
 	struct list_head	dummy_resources_list;
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index efde0793360b..78c494fe35cc 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -430,6 +430,7 @@ size_t vfio_pci_dma_fault_response_rw(struct vfio_pci_device *vdev, char __user
 		mutex_lock(&vdev->fault_response_queue_lock);
 		header->head = new_head;
 		mutex_unlock(&vdev->fault_response_queue_lock);
+		queue_work(vdev->dma_fault_response_wq, &vdev->response_work->inject);
 	} else {
 		if (copy_to_user(buf, base + pos, count))
 			return -EFAULT;
-- 
2.21.3

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

WARNING: multiple messages have this Message-ID (diff)
From: Eric Auger <eric.auger@redhat.com>
To: eric.auger.pro@gmail.com, eric.auger@redhat.com,
	iommu@lists.linux-foundation.org, linux-kernel@vger.kernel.org,
	kvm@vger.kernel.org, kvmarm@lists.cs.columbia.edu,
	will@kernel.org, joro@8bytes.org, maz@kernel.org,
	robin.murphy@arm.com, alex.williamson@redhat.com
Cc: jean-philippe@linaro.org, jacob.jun.pan@linux.intel.com,
	nicoleotsuka@gmail.com, vivek.gautam@arm.com, yi.l.liu@intel.com,
	zhangfei.gao@linaro.org
Subject: [PATCH v11 13/13] vfio/pci: Inject page response upon response region fill
Date: Mon, 16 Nov 2020 12:00:30 +0100	[thread overview]
Message-ID: <20201116110030.32335-14-eric.auger@redhat.com> (raw)
In-Reply-To: <20201116110030.32335-1-eric.auger@redhat.com>

When the userspace increments the head of the page response
buffer ring, let's push the response into the iommu layer.
This is done through a workqueue that pops the responses from
the ring buffer and increment the tail.

Signed-off-by: Eric Auger <eric.auger@redhat.com>
---
 drivers/vfio/pci/vfio_pci.c         | 40 +++++++++++++++++++++++++++++
 drivers/vfio/pci/vfio_pci_private.h |  8 ++++++
 drivers/vfio/pci/vfio_pci_rdwr.c    |  1 +
 3 files changed, 49 insertions(+)

diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index e9a904ce3f0d..beea70d70151 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -542,6 +542,32 @@ static int vfio_pci_dma_fault_init(struct vfio_pci_device *vdev)
 	return ret;
 }
 
+static void dma_response_inject(struct work_struct *work)
+{
+	struct vfio_pci_dma_fault_response_work *rwork =
+		container_of(work, struct vfio_pci_dma_fault_response_work, inject);
+	struct vfio_region_dma_fault_response *header = rwork->header;
+	struct vfio_pci_device *vdev = rwork->vdev;
+	struct iommu_page_response *resp;
+	u32 tail, head, size;
+
+	mutex_lock(&vdev->fault_response_queue_lock);
+
+	tail = header->tail;
+	head = header->head;
+	size = header->nb_entries;
+
+	while (CIRC_CNT(head, tail, size) >= 1) {
+		resp = (struct iommu_page_response *)(vdev->fault_response_pages + header->offset +
+						tail * header->entry_size);
+
+		/* TODO: properly handle the return value */
+		iommu_page_response(&vdev->pdev->dev, resp);
+		header->tail = tail = (tail + 1) % size;
+	}
+	mutex_unlock(&vdev->fault_response_queue_lock);
+}
+
 #define DMA_FAULT_RESPONSE_RING_LENGTH 512
 
 static int vfio_pci_dma_fault_response_init(struct vfio_pci_device *vdev)
@@ -585,8 +611,22 @@ static int vfio_pci_dma_fault_response_init(struct vfio_pci_device *vdev)
 	header->nb_entries = DMA_FAULT_RESPONSE_RING_LENGTH;
 	header->offset = PAGE_SIZE;
 
+	vdev->response_work = kzalloc(sizeof(*vdev->response_work), GFP_KERNEL);
+	if (!vdev->response_work)
+		goto out;
+	vdev->response_work->header = header;
+	vdev->response_work->vdev = vdev;
+
+	/* launch the thread that will extract the response */
+	INIT_WORK(&vdev->response_work->inject, dma_response_inject);
+	vdev->dma_fault_response_wq =
+		create_singlethread_workqueue("vfio-dma-fault-response");
+	if (!vdev->dma_fault_response_wq)
+		return -ENOMEM;
+
 	return 0;
 out:
+	kfree(vdev->fault_response_pages);
 	vdev->fault_response_pages = NULL;
 	return ret;
 }
diff --git a/drivers/vfio/pci/vfio_pci_private.h b/drivers/vfio/pci/vfio_pci_private.h
index 035634521cd0..5944f96ced0c 100644
--- a/drivers/vfio/pci/vfio_pci_private.h
+++ b/drivers/vfio/pci/vfio_pci_private.h
@@ -52,6 +52,12 @@ struct vfio_pci_irq_ctx {
 	struct irq_bypass_producer	producer;
 };
 
+struct vfio_pci_dma_fault_response_work {
+	struct work_struct inject;
+	struct vfio_region_dma_fault_response *header;
+	struct vfio_pci_device *vdev;
+};
+
 struct vfio_pci_device;
 struct vfio_pci_region;
 
@@ -145,6 +151,8 @@ struct vfio_pci_device {
 	struct eventfd_ctx	*req_trigger;
 	u8			*fault_pages;
 	u8			*fault_response_pages;
+	struct workqueue_struct *dma_fault_response_wq;
+	struct vfio_pci_dma_fault_response_work *response_work;
 	struct mutex		fault_queue_lock;
 	struct mutex		fault_response_queue_lock;
 	struct list_head	dummy_resources_list;
diff --git a/drivers/vfio/pci/vfio_pci_rdwr.c b/drivers/vfio/pci/vfio_pci_rdwr.c
index efde0793360b..78c494fe35cc 100644
--- a/drivers/vfio/pci/vfio_pci_rdwr.c
+++ b/drivers/vfio/pci/vfio_pci_rdwr.c
@@ -430,6 +430,7 @@ size_t vfio_pci_dma_fault_response_rw(struct vfio_pci_device *vdev, char __user
 		mutex_lock(&vdev->fault_response_queue_lock);
 		header->head = new_head;
 		mutex_unlock(&vdev->fault_response_queue_lock);
+		queue_work(vdev->dma_fault_response_wq, &vdev->response_work->inject);
 	} else {
 		if (copy_to_user(buf, base + pos, count))
 			return -EFAULT;
-- 
2.21.3

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

  parent reply	other threads:[~2020-11-16 12:37 UTC|newest]

Thread overview: 99+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-11-16 11:00 [PATCH v11 00/13] SMMUv3 Nested Stage Setup (VFIO part) Eric Auger
2020-11-16 11:00 ` Eric Auger
2020-11-16 11:00 ` Eric Auger
2020-11-16 11:00 ` [PATCH v11 01/13] vfio: VFIO_IOMMU_SET_PASID_TABLE Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-17  2:11   ` kernel test robot
2020-11-17  2:11     ` kernel test robot
2020-11-17  2:11     ` kernel test robot
2020-11-17  2:11     ` kernel test robot
2020-11-24 21:31   ` Alex Williamson
2020-11-24 21:31     ` Alex Williamson
2020-11-24 21:31     ` Alex Williamson
2021-02-02 12:34   ` Keqian Zhu
2021-02-02 12:34     ` Keqian Zhu
2021-02-02 12:34     ` Keqian Zhu
2021-02-22 10:53     ` Auger Eric
2021-02-22 10:53       ` Auger Eric
2021-02-22 10:53       ` Auger Eric
2021-02-22 12:20       ` Keqian Zhu
2021-02-22 12:20         ` Keqian Zhu
2021-02-22 12:20         ` Keqian Zhu
2021-02-22 16:12         ` Auger Eric
2021-02-22 16:12           ` Auger Eric
2021-02-22 16:12           ` Auger Eric
2020-11-16 11:00 ` [PATCH v11 02/13] vfio: VFIO_IOMMU_CACHE_INVALIDATE Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00 ` [PATCH v11 03/13] vfio: VFIO_IOMMU_SET_MSI_BINDING Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00   ` Eric Auger
2021-02-02 12:08   ` Keqian Zhu
2021-02-02 12:08     ` Keqian Zhu
2021-02-02 12:08     ` Keqian Zhu
2020-11-16 11:00 ` [PATCH v11 04/13] vfio/pci: Add VFIO_REGION_TYPE_NESTED region type Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-12-17 11:49   ` Kunkun Jiang
2020-12-17 11:49     ` Kunkun Jiang
2020-12-17 11:49     ` Kunkun Jiang
2021-02-23 12:45     ` Shenming Lu
2021-02-23 12:45       ` Shenming Lu
2021-02-23 12:45       ` Shenming Lu
2021-02-23 12:57       ` Auger Eric
2021-02-23 12:57         ` Auger Eric
2021-02-23 12:57         ` Auger Eric
2020-11-16 11:00 ` [PATCH v11 05/13] vfio/pci: Register an iommu fault handler Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 13:51   ` kernel test robot
2020-11-16 13:51     ` kernel test robot
2020-11-16 13:51     ` kernel test robot
2020-11-16 13:51     ` kernel test robot
2020-11-16 19:30   ` kernel test robot
2020-11-16 19:30     ` kernel test robot
2020-11-16 19:30     ` kernel test robot
2020-11-16 19:30     ` kernel test robot
2020-11-16 11:00 ` [PATCH v11 06/13] vfio/pci: Allow to mmap the fault queue Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00 ` [PATCH v11 07/13] vfio: Use capability chains to handle device specific irq Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00 ` [PATCH v11 08/13] vfio/pci: Add framework for custom interrupt indices Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-23 12:51   ` Shameerali Kolothum Thodi
2020-11-23 12:51     ` Shameerali Kolothum Thodi
2020-11-23 12:51     ` Shameerali Kolothum Thodi
2020-11-24  8:35     ` Auger Eric
2020-11-24  8:35       ` Auger Eric
2020-11-24  8:35       ` Auger Eric
2020-11-16 11:00 ` [PATCH v11 09/13] vfio: Add new IRQ for DMA fault reporting Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00 ` [PATCH v11 10/13] vfio/pci: Register and allow DMA FAULT IRQ signaling Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00 ` [PATCH v11 11/13] vfio: Document nested stage control Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00 ` [PATCH v11 12/13] vfio/pci: Register a DMA fault response region Eric Auger
2020-11-16 11:00   ` Eric Auger
2020-11-16 11:00   ` Eric Auger
2021-01-08 15:19   ` Shameerali Kolothum Thodi
2021-01-08 15:19     ` Shameerali Kolothum Thodi
2021-01-08 15:19     ` Shameerali Kolothum Thodi
2021-02-18 10:36   ` Shameerali Kolothum Thodi
2021-02-18 10:36     ` Shameerali Kolothum Thodi
2021-02-18 10:36     ` Shameerali Kolothum Thodi
2021-02-18 10:48     ` Auger Eric
2021-02-18 10:48       ` Auger Eric
2021-02-18 10:48       ` Auger Eric
2020-11-16 11:00 ` Eric Auger [this message]
2020-11-16 11:00   ` [PATCH v11 13/13] vfio/pci: Inject page response upon response region fill Eric Auger
2020-11-16 11:00   ` Eric Auger
2021-03-15 18:04 ` [PATCH v11 00/13] SMMUv3 Nested Stage Setup (VFIO part) Krishna Reddy
2021-03-15 18:04   ` Krishna Reddy
2021-03-15 18:04   ` Krishna Reddy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20201116110030.32335-14-eric.auger@redhat.com \
    --to=eric.auger@redhat.com \
    --cc=alex.williamson@redhat.com \
    --cc=eric.auger.pro@gmail.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jacob.jun.pan@linux.intel.com \
    --cc=jean-philippe@linaro.org \
    --cc=joro@8bytes.org \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maz@kernel.org \
    --cc=nicoleotsuka@gmail.com \
    --cc=robin.murphy@arm.com \
    --cc=shameerali.kolothum.thodi@huawei.com \
    --cc=tn@semihalf.com \
    --cc=vivek.gautam@arm.com \
    --cc=will@kernel.org \
    --cc=yi.l.liu@intel.com \
    --cc=yuzenghui@huawei.com \
    --cc=zhangfei.gao@gmail.com \
    --cc=zhangfei.gao@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.