kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Gautam Dawar <gautam.dawar@xilinx.com>
To: unlisted-recipients:; (no To-header on input)
Cc: <gdawar@xilinx.com>, <martinh@xilinx.com>, <hanand@xilinx.com>,
	<tanujk@xilinx.com>, <eperezma@redhat.com>,
	Jason Wang <jasowang@redhat.com>,
	"Michael S. Tsirkin" <mst@redhat.com>,
	Zhu Lingshan <lingshan.zhu@intel.com>,
	Stefano Garzarella <sgarzare@redhat.com>,
	Xie Yongji <xieyongji@bytedance.com>, Eli Cohen <elic@nvidia.com>,
	Si-Wei Liu <si-wei.liu@oracle.com>,
	Parav Pandit <parav@nvidia.com>, Longpeng <longpeng2@huawei.com>,
	<virtualization@lists.linux-foundation.org>,
	<linux-kernel@vger.kernel.org>, <kvm@vger.kernel.org>,
	<netdev@vger.kernel.org>
Subject: [RFC PATCH v2 03/19] vhost-vdpa: passing iotlb to IOMMU mapping helpers
Date: Fri, 25 Feb 2022 02:52:43 +0530	[thread overview]
Message-ID: <20220224212314.1326-4-gdawar@xilinx.com> (raw)
In-Reply-To: <20220224212314.1326-1-gdawar@xilinx.com>

To prepare for the ASID support for vhost-vdpa, try to pass IOTLB
object to dma helpers. No functional changes, it's just a preparation
for support multiple IOTLBs.

Signed-off-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Gautam Dawar <gdawar@xilinx.com>
---
 drivers/vhost/vdpa.c | 67 ++++++++++++++++++++++++--------------------
 1 file changed, 37 insertions(+), 30 deletions(-)

diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 851539807bc9..146911082514 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -503,10 +503,11 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep,
 	return r;
 }
 
-static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v,
+				struct vhost_iotlb *iotlb,
+				u64 start, u64 last)
 {
 	struct vhost_dev *dev = &v->vdev;
-	struct vhost_iotlb *iotlb = dev->iotlb;
 	struct vhost_iotlb_map *map;
 	struct page *page;
 	unsigned long pfn, pinned;
@@ -525,10 +526,10 @@ static void vhost_vdpa_pa_unmap(struct vhost_vdpa *v, u64 start, u64 last)
 	}
 }
 
-static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+static void vhost_vdpa_va_unmap(struct vhost_vdpa *v,
+				struct vhost_iotlb *iotlb,
+				u64 start, u64 last)
 {
-	struct vhost_dev *dev = &v->vdev;
-	struct vhost_iotlb *iotlb = dev->iotlb;
 	struct vhost_iotlb_map *map;
 	struct vdpa_map_file *map_file;
 
@@ -540,21 +541,24 @@ static void vhost_vdpa_va_unmap(struct vhost_vdpa *v, u64 start, u64 last)
 	}
 }
 
-static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v, u64 start, u64 last)
+static void vhost_vdpa_iotlb_unmap(struct vhost_vdpa *v,
+				   struct vhost_iotlb *iotlb,
+				   u64 start, u64 last)
 {
 	struct vdpa_device *vdpa = v->vdpa;
 
 	if (vdpa->use_va)
-		return vhost_vdpa_va_unmap(v, start, last);
+		return vhost_vdpa_va_unmap(v, iotlb, start, last);
 
-	return vhost_vdpa_pa_unmap(v, start, last);
+	return vhost_vdpa_pa_unmap(v, iotlb, start, last);
 }
 
 static void vhost_vdpa_iotlb_free(struct vhost_vdpa *v)
 {
 	struct vhost_dev *dev = &v->vdev;
+	struct vhost_iotlb *iotlb = dev->iotlb;
 
-	vhost_vdpa_iotlb_unmap(v, 0ULL, 0ULL - 1);
+	vhost_vdpa_iotlb_unmap(v, iotlb, 0ULL, 0ULL - 1);
 	kfree(dev->iotlb);
 	dev->iotlb = NULL;
 }
@@ -581,15 +585,15 @@ static int perm_to_iommu_flags(u32 perm)
 	return flags | IOMMU_CACHE;
 }
 
-static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
-			  u64 size, u64 pa, u32 perm, void *opaque)
+static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
+			  u64 iova, u64 size, u64 pa, u32 perm, void *opaque)
 {
 	struct vhost_dev *dev = &v->vdev;
 	struct vdpa_device *vdpa = v->vdpa;
 	const struct vdpa_config_ops *ops = vdpa->config;
 	int r = 0;
 
-	r = vhost_iotlb_add_range_ctx(dev->iotlb, iova, iova + size - 1,
+	r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
 				      pa, perm, opaque);
 	if (r)
 		return r;
@@ -598,13 +602,13 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
 		r = ops->dma_map(vdpa, iova, size, pa, perm, opaque);
 	} else if (ops->set_map) {
 		if (!v->in_batch)
-			r = ops->set_map(vdpa, dev->iotlb);
+			r = ops->set_map(vdpa, iotlb);
 	} else {
 		r = iommu_map(v->domain, iova, pa, size,
 			      perm_to_iommu_flags(perm));
 	}
 	if (r) {
-		vhost_iotlb_del_range(dev->iotlb, iova, iova + size - 1);
+		vhost_iotlb_del_range(iotlb, iova, iova + size - 1);
 		return r;
 	}
 
@@ -614,25 +618,27 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, u64 iova,
 	return 0;
 }
 
-static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
+static void vhost_vdpa_unmap(struct vhost_vdpa *v,
+			     struct vhost_iotlb *iotlb,
+			     u64 iova, u64 size)
 {
-	struct vhost_dev *dev = &v->vdev;
 	struct vdpa_device *vdpa = v->vdpa;
 	const struct vdpa_config_ops *ops = vdpa->config;
 
-	vhost_vdpa_iotlb_unmap(v, iova, iova + size - 1);
+	vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
 
 	if (ops->dma_map) {
 		ops->dma_unmap(vdpa, iova, size);
 	} else if (ops->set_map) {
 		if (!v->in_batch)
-			ops->set_map(vdpa, dev->iotlb);
+			ops->set_map(vdpa, iotlb);
 	} else {
 		iommu_unmap(v->domain, iova, size);
 	}
 }
 
 static int vhost_vdpa_va_map(struct vhost_vdpa *v,
+			     struct vhost_iotlb *iotlb,
 			     u64 iova, u64 size, u64 uaddr, u32 perm)
 {
 	struct vhost_dev *dev = &v->vdev;
@@ -662,7 +668,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
 		offset = (vma->vm_pgoff << PAGE_SHIFT) + uaddr - vma->vm_start;
 		map_file->offset = offset;
 		map_file->file = get_file(vma->vm_file);
-		ret = vhost_vdpa_map(v, map_iova, map_size, uaddr,
+		ret = vhost_vdpa_map(v, iotlb, map_iova, map_size, uaddr,
 				     perm, map_file);
 		if (ret) {
 			fput(map_file->file);
@@ -675,7 +681,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
 		map_iova += map_size;
 	}
 	if (ret)
-		vhost_vdpa_unmap(v, iova, map_iova - iova);
+		vhost_vdpa_unmap(v, iotlb, iova, map_iova - iova);
 
 	mmap_read_unlock(dev->mm);
 
@@ -683,6 +689,7 @@ static int vhost_vdpa_va_map(struct vhost_vdpa *v,
 }
 
 static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
+			     struct vhost_iotlb *iotlb,
 			     u64 iova, u64 size, u64 uaddr, u32 perm)
 {
 	struct vhost_dev *dev = &v->vdev;
@@ -746,7 +753,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
 			if (last_pfn && (this_pfn != last_pfn + 1)) {
 				/* Pin a contiguous chunk of memory */
 				csize = PFN_PHYS(last_pfn - map_pfn + 1);
-				ret = vhost_vdpa_map(v, iova, csize,
+				ret = vhost_vdpa_map(v, iotlb, iova, csize,
 						     PFN_PHYS(map_pfn),
 						     perm, NULL);
 				if (ret) {
@@ -776,7 +783,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
 	}
 
 	/* Pin the rest chunk */
-	ret = vhost_vdpa_map(v, iova, PFN_PHYS(last_pfn - map_pfn + 1),
+	ret = vhost_vdpa_map(v, iotlb, iova, PFN_PHYS(last_pfn - map_pfn + 1),
 			     PFN_PHYS(map_pfn), perm, NULL);
 out:
 	if (ret) {
@@ -796,7 +803,7 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
 			for (pfn = map_pfn; pfn <= last_pfn; pfn++)
 				unpin_user_page(pfn_to_page(pfn));
 		}
-		vhost_vdpa_unmap(v, start, size);
+		vhost_vdpa_unmap(v, iotlb, start, size);
 	}
 unlock:
 	mmap_read_unlock(dev->mm);
@@ -807,11 +814,10 @@ static int vhost_vdpa_pa_map(struct vhost_vdpa *v,
 }
 
 static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
+					   struct vhost_iotlb *iotlb,
 					   struct vhost_iotlb_msg *msg)
 {
-	struct vhost_dev *dev = &v->vdev;
 	struct vdpa_device *vdpa = v->vdpa;
-	struct vhost_iotlb *iotlb = dev->iotlb;
 
 	if (msg->iova < v->range.first || !msg->size ||
 	    msg->iova > U64_MAX - msg->size + 1 ||
@@ -823,10 +829,10 @@ static int vhost_vdpa_process_iotlb_update(struct vhost_vdpa *v,
 		return -EEXIST;
 
 	if (vdpa->use_va)
-		return vhost_vdpa_va_map(v, msg->iova, msg->size,
+		return vhost_vdpa_va_map(v, iotlb, msg->iova, msg->size,
 					 msg->uaddr, msg->perm);
 
-	return vhost_vdpa_pa_map(v, msg->iova, msg->size, msg->uaddr,
+	return vhost_vdpa_pa_map(v, iotlb, msg->iova, msg->size, msg->uaddr,
 				 msg->perm);
 }
 
@@ -836,6 +842,7 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
 	struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
 	struct vdpa_device *vdpa = v->vdpa;
 	const struct vdpa_config_ops *ops = vdpa->config;
+	struct vhost_iotlb *iotlb = dev->iotlb;
 	int r = 0;
 
 	mutex_lock(&dev->mutex);
@@ -846,17 +853,17 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
 
 	switch (msg->type) {
 	case VHOST_IOTLB_UPDATE:
-		r = vhost_vdpa_process_iotlb_update(v, msg);
+		r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
 		break;
 	case VHOST_IOTLB_INVALIDATE:
-		vhost_vdpa_unmap(v, msg->iova, msg->size);
+		vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
 		break;
 	case VHOST_IOTLB_BATCH_BEGIN:
 		v->in_batch = true;
 		break;
 	case VHOST_IOTLB_BATCH_END:
 		if (v->in_batch && ops->set_map)
-			ops->set_map(vdpa, dev->iotlb);
+			ops->set_map(vdpa, iotlb);
 		v->in_batch = false;
 		break;
 	default:
-- 
2.25.0


  parent reply	other threads:[~2022-02-24 21:24 UTC|newest]

Thread overview: 92+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-12-16  6:47 [PATCH 00/21] Control VQ support in vDPA Jason Wang
2020-12-16  6:47 ` [PATCH 01/21] vhost: move the backend feature bits to vhost_types.h Jason Wang
2020-12-16  6:47 ` [PATCH 02/21] virtio-vdpa: don't set callback if virtio doesn't need it Jason Wang
2020-12-16  6:48 ` [PATCH 03/21] vhost-vdpa: passing iotlb to IOMMU mapping helpers Jason Wang
2020-12-16  6:48 ` [PATCH 04/21] vhost-vdpa: switch to use vhost-vdpa specific IOTLB Jason Wang
2020-12-16  6:48 ` [PATCH 05/21] vdpa: add the missing comment for nvqs in struct vdpa_device Jason Wang
2020-12-16  6:48 ` [PATCH 06/21] vdpa: introduce virtqueue groups Jason Wang
2021-01-04 10:04   ` Stefan Hajnoczi
2021-01-05  4:13     ` Jason Wang
2020-12-16  6:48 ` [PATCH 07/21] vdpa: multiple address spaces support Jason Wang
2020-12-29  7:28   ` Eli Cohen
2020-12-30  4:00     ` Jason Wang
2020-12-30  4:04     ` Jason Wang
2020-12-30  9:44       ` Eli Cohen
2020-12-16  6:48 ` [PATCH 08/21] vdpa: introduce config operations for associating ASID to a virtqueue group Jason Wang
2020-12-16  6:48 ` [PATCH 09/21] vhost_iotlb: split out IOTLB initialization Jason Wang
2020-12-16  6:48 ` [PATCH 10/21] vhost: support ASID in IOTLB API Jason Wang
2020-12-29 10:20   ` Eli Cohen
2020-12-30  4:27     ` Jason Wang
2020-12-16  6:48 ` [PATCH 11/21] vhost-vdpa: introduce asid based IOTLB Jason Wang
2020-12-29 11:41   ` Eli Cohen
2020-12-30  6:23     ` Jason Wang
2020-12-29 11:53   ` Eli Cohen
2020-12-30  6:34     ` Jason Wang
2020-12-29 12:05   ` Eli Cohen
2020-12-30  6:33     ` Jason Wang
2020-12-16  6:48 ` [PATCH 12/21] vhost-vdpa: introduce uAPI to get the number of virtqueue groups Jason Wang
2020-12-29 12:24   ` Eli Cohen
2020-12-30  6:49     ` Jason Wang
2020-12-30 10:05   ` Eli Cohen
2020-12-31  2:36     ` Jason Wang
2020-12-16  6:48 ` [PATCH 13/21] vhost-vdpa: introduce uAPI to get the number of address spaces Jason Wang
2020-12-16  6:48 ` [PATCH 14/21] vhost-vdpa: uAPI to get virtqueue group id Jason Wang
2020-12-16  6:48 ` [PATCH 15/21] vhost-vdpa: introduce uAPI to set group ASID Jason Wang
2020-12-16  6:48 ` [PATCH 16/21] vhost-vdpa: support ASID based IOTLB API Jason Wang
2020-12-16  6:48 ` [PATCH 17/21] vdpa_sim: split vdpasim_virtqueue's iov field in out_iov and in_iov Jason Wang
2020-12-16  6:48 ` [PATCH 18/21] vdpa_sim: advertise VIRTIO_NET_F_MTU Jason Wang
2020-12-16  6:48 ` [PATCH 19/21] vdpa_sim: factor out buffer completion logic Jason Wang
2020-12-16  6:48 ` [PATCH 20/21] vdpa_sim: filter destination mac address Jason Wang
2020-12-16  6:48 ` [PATCH 21/21] vdpasim: control virtqueue support Jason Wang
2020-12-17 20:19   ` kernel test robot
2021-01-11 12:26   ` Eli Cohen
2021-01-12  3:11     ` Jason Wang
2021-01-22 19:43       ` Eugenio Perez Martin
2021-01-25  3:16         ` Jason Wang
2020-12-16  9:47 ` [PATCH 00/21] Control VQ support in vDPA Michael S. Tsirkin
2020-12-17  3:30   ` Jason Wang
2020-12-17  7:58     ` Michael S. Tsirkin
2020-12-17  9:02       ` Jason Wang
2020-12-17 22:28         ` Michael S. Tsirkin
2020-12-18  2:56           ` Jason Wang
2020-12-17  7:26 ` Eli Cohen
2022-02-24 21:22 ` [RFC PATCH v2 00/19] " Gautam Dawar
2022-02-24 21:22   ` [RFC PATCH v2 01/19] vhost: move the backend feature bits to vhost_types.h Gautam Dawar
2022-02-24 21:22   ` [RFC PATCH v2 02/19] virtio-vdpa: don't set callback if virtio doesn't need it Gautam Dawar
2022-02-24 21:22   ` Gautam Dawar [this message]
2022-02-24 21:22   ` [RFC PATCH v2 04/19] vhost-vdpa: switch to use vhost-vdpa specific IOTLB Gautam Dawar
2022-02-24 21:22   ` [RFC PATCH v2 05/19] vdpa: introduce virtqueue groups Gautam Dawar
2022-02-28  8:07     ` Jason Wang
2022-02-28 10:57       ` Gautam Dawar
2022-02-24 21:22   ` [RFC PATCH v2 06/19] vdpa: multiple address spaces support Gautam Dawar
2022-03-03 19:39     ` Eugenio Perez Martin
2022-03-04  6:30       ` Gautam Dawar
2022-03-04 17:45         ` Eugenio Perez Martin
2022-02-24 21:22   ` [RFC PATCH v2 07/19] vdpa: introduce config operations for associating ASID to a virtqueue group Gautam Dawar
2022-03-04  9:54     ` Eugenio Perez Martin
2022-03-04 17:48       ` Gautam Dawar
2022-02-24 21:22   ` [RFC PATCH v2 08/19] vhost_iotlb: split out IOTLB initialization Gautam Dawar
2022-02-24 21:22   ` [RFC PATCH v2 09/19] vhost: support ASID in IOTLB API Gautam Dawar
2022-03-04 10:25     ` Eugenio Perez Martin
2022-03-04 17:52       ` Gautam Dawar
2022-02-24 21:22   ` [RFC PATCH v2 10/19] vhost-vdpa: introduce asid based IOTLB Gautam Dawar
2022-03-04 17:56     ` Eugenio Perez Martin
2022-03-07 10:07       ` Gautam Dawar
2022-02-24 21:22   ` [RFC PATCH v2 11/19] vhost-vdpa: introduce uAPI to get the number of virtqueue groups Gautam Dawar
2022-02-24 21:22   ` [RFC PATCH v2 12/19] vhost-vdpa: introduce uAPI to get the number of address spaces Gautam Dawar
2022-02-24 21:22   ` [RFC PATCH v2 13/19] vhost-vdpa: uAPI to get virtqueue group id Gautam Dawar
2022-02-24 21:22   ` [RFC PATCH v2 14/19] vhost-vdpa: introduce uAPI to set group ASID Gautam Dawar
2022-02-24 21:22   ` [RFC PATCH v2 15/19] vhost-vdpa: support ASID based IOTLB API Gautam Dawar
2022-03-04 18:04     ` Eugenio Perez Martin
2022-03-07 10:23       ` Gautam Dawar
2022-02-24 21:22   ` [RFC PATCH v2 16/19] vdpa_sim: advertise VIRTIO_NET_F_MTU Gautam Dawar
2022-03-10 17:55     ` Eugenio Perez Martin
2022-02-24 21:22   ` [RFC PATCH v2 17/19] vdpa_sim: factor out buffer completion logic Gautam Dawar
2022-02-24 21:22   ` [RFC PATCH v2 18/19] vdpa_sim: filter destination mac address Gautam Dawar
2022-03-10 18:22     ` Eugenio Perez Martin
2022-02-24 21:22   ` [RFC PATCH v2 19/19] vdpasim: control virtqueue support Gautam Dawar
2022-03-10 18:20     ` Eugenio Perez Martin
2022-03-18  7:35     ` Eugenio Perez Martin
2022-03-22  8:46       ` Gautam Dawar
2022-02-28  8:17   ` [RFC PATCH v2 00/19] Control VQ support in vDPA Jason Wang
2022-02-28 10:56     ` Gautam Dawar

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220224212314.1326-4-gdawar@xilinx.com \
    --to=gautam.dawar@xilinx.com \
    --cc=elic@nvidia.com \
    --cc=eperezma@redhat.com \
    --cc=gdawar@xilinx.com \
    --cc=hanand@xilinx.com \
    --cc=jasowang@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=lingshan.zhu@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=longpeng2@huawei.com \
    --cc=martinh@xilinx.com \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=parav@nvidia.com \
    --cc=sgarzare@redhat.com \
    --cc=si-wei.liu@oracle.com \
    --cc=tanujk@xilinx.com \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=xieyongji@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).