All of lore.kernel.org
 help / color / mirror / Atom feed
From: Gautam Dawar <gautam.dawar@xilinx.com>
To: "Michael S. Tsirkin" <mst@redhat.com>,
	Jason Wang <jasowang@redhat.com>, <kvm@vger.kernel.org>,
	<virtualization@lists.linux-foundation.org>,
	<netdev@vger.kernel.org>, <linux-kernel@vger.kernel.org>
Cc: <martinh@xilinx.com>, <hanand@xilinx.com>, <martinpo@xilinx.com>,
	<pabloc@xilinx.com>, <dinang@xilinx.com>, <tanuj.kamde@amd.com>,
	<habetsm.xilinx@gmail.com>, <ecree.xilinx@gmail.com>,
	<eperezma@redhat.com>, Gautam Dawar <gdawar@xilinx.com>,
	Wu Zongyong <wuzongyong@linux.alibaba.com>,
	Christophe JAILLET <christophe.jaillet@wanadoo.fr>,
	Eli Cohen <elic@nvidia.com>,
	Zhu Lingshan <lingshan.zhu@intel.com>,
	Stefano Garzarella <sgarzare@redhat.com>,
	Xie Yongji <xieyongji@bytedance.com>,
	Si-Wei Liu <si-wei.liu@oracle.com>,
	Parav Pandit <parav@nvidia.com>, Longpeng <longpeng2@huawei.com>,
	Dan Carpenter <dan.carpenter@oracle.com>,
	Zhang Min <zhang.min9@zte.com.cn>
Subject: [PATCH v2 15/19] vhost-vdpa: support ASID based IOTLB API
Date: Wed, 30 Mar 2022 23:33:55 +0530	[thread overview]
Message-ID: <20220330180436.24644-16-gdawar@xilinx.com> (raw)
In-Reply-To: <20220330180436.24644-1-gdawar@xilinx.com>

This patch extends the vhost-vdpa to support ASID based IOTLB API. The
vhost-vdpa device will allocated multiple IOTLBs for vDPA device that
supports multiple address spaces. The IOTLBs and vDPA device memory
mappings is determined and maintained through ASID.

Note that we still don't support vDPA device with more than one
address spaces that depends on platform IOMMU. This work will be done
by moving the IOMMU logic from vhost-vDPA to vDPA device driver.

Signed-off-by: Jason Wang <jasowang@redhat.com>
Signed-off-by: Gautam Dawar <gdawar@xilinx.com>
---
 drivers/vhost/vdpa.c  | 109 ++++++++++++++++++++++++++++++++++--------
 drivers/vhost/vhost.c |   2 +-
 2 files changed, 91 insertions(+), 20 deletions(-)

diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 6c7ee0f18892..1f1d1c425573 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -28,7 +28,8 @@
 enum {
 	VHOST_VDPA_BACKEND_FEATURES =
 	(1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
-	(1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
+	(1ULL << VHOST_BACKEND_F_IOTLB_BATCH) |
+	(1ULL << VHOST_BACKEND_F_IOTLB_ASID),
 };
 
 #define VHOST_VDPA_DEV_MAX (1U << MINORBITS)
@@ -57,12 +58,20 @@ struct vhost_vdpa {
 	struct eventfd_ctx *config_ctx;
 	int in_batch;
 	struct vdpa_iova_range range;
+	u32 batch_asid;
 };
 
 static DEFINE_IDA(vhost_vdpa_ida);
 
 static dev_t vhost_vdpa_major;
 
+static inline u32 iotlb_to_asid(struct vhost_iotlb *iotlb)
+{
+	struct vhost_vdpa_as *as = container_of(iotlb, struct
+						vhost_vdpa_as, iotlb);
+	return as->id;
+}
+
 static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
 {
 	struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
@@ -75,6 +84,16 @@ static struct vhost_vdpa_as *asid_to_as(struct vhost_vdpa *v, u32 asid)
 	return NULL;
 }
 
+static struct vhost_iotlb *asid_to_iotlb(struct vhost_vdpa *v, u32 asid)
+{
+	struct vhost_vdpa_as *as = asid_to_as(v, asid);
+
+	if (!as)
+		return NULL;
+
+	return &as->iotlb;
+}
+
 static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
 {
 	struct hlist_head *head = &v->as[asid % VHOST_VDPA_IOTLB_BUCKETS];
@@ -83,6 +102,9 @@ static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
 	if (asid_to_as(v, asid))
 		return NULL;
 
+	if (asid >= v->vdpa->nas)
+		return NULL;
+
 	as = kmalloc(sizeof(*as), GFP_KERNEL);
 	if (!as)
 		return NULL;
@@ -94,6 +116,17 @@ static struct vhost_vdpa_as *vhost_vdpa_alloc_as(struct vhost_vdpa *v, u32 asid)
 	return as;
 }
 
+static struct vhost_vdpa_as *vhost_vdpa_find_alloc_as(struct vhost_vdpa *v,
+						      u32 asid)
+{
+	struct vhost_vdpa_as *as = asid_to_as(v, asid);
+
+	if (as)
+		return as;
+
+	return vhost_vdpa_alloc_as(v, asid);
+}
+
 static int vhost_vdpa_remove_as(struct vhost_vdpa *v, u32 asid)
 {
 	struct vhost_vdpa_as *as = asid_to_as(v, asid);
@@ -692,6 +725,7 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
 	struct vhost_dev *dev = &v->vdev;
 	struct vdpa_device *vdpa = v->vdpa;
 	const struct vdpa_config_ops *ops = vdpa->config;
+	u32 asid = iotlb_to_asid(iotlb);
 	int r = 0;
 
 	r = vhost_iotlb_add_range_ctx(iotlb, iova, iova + size - 1,
@@ -700,10 +734,10 @@ static int vhost_vdpa_map(struct vhost_vdpa *v, struct vhost_iotlb *iotlb,
 		return r;
 
 	if (ops->dma_map) {
-		r = ops->dma_map(vdpa, 0, iova, size, pa, perm, opaque);
+		r = ops->dma_map(vdpa, asid, iova, size, pa, perm, opaque);
 	} else if (ops->set_map) {
 		if (!v->in_batch)
-			r = ops->set_map(vdpa, 0, iotlb);
+			r = ops->set_map(vdpa, asid, iotlb);
 	} else {
 		r = iommu_map(v->domain, iova, pa, size,
 			      perm_to_iommu_flags(perm));
@@ -725,17 +759,24 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v,
 {
 	struct vdpa_device *vdpa = v->vdpa;
 	const struct vdpa_config_ops *ops = vdpa->config;
+	u32 asid = iotlb_to_asid(iotlb);
 
 	vhost_vdpa_iotlb_unmap(v, iotlb, iova, iova + size - 1);
 
 	if (ops->dma_map) {
-		ops->dma_unmap(vdpa, 0, iova, size);
+		ops->dma_unmap(vdpa, asid, iova, size);
 	} else if (ops->set_map) {
 		if (!v->in_batch)
-			ops->set_map(vdpa, 0, iotlb);
+			ops->set_map(vdpa, asid, iotlb);
 	} else {
 		iommu_unmap(v->domain, iova, size);
 	}
+
+	/* If we are in the middle of batch processing, delay the free
+	 * of AS until BATCH_END.
+	 */
+	if (!v->in_batch && !iotlb->nmaps)
+		vhost_vdpa_remove_as(v, asid);
 }
 
 static int vhost_vdpa_va_map(struct vhost_vdpa *v,
@@ -943,19 +984,38 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
 	struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
 	struct vdpa_device *vdpa = v->vdpa;
 	const struct vdpa_config_ops *ops = vdpa->config;
-	struct vhost_vdpa_as *as = asid_to_as(v, 0);
-	struct vhost_iotlb *iotlb = &as->iotlb;
+	struct vhost_iotlb *iotlb = NULL;
+	struct vhost_vdpa_as *as = NULL;
 	int r = 0;
 
-	if (asid != 0)
-		return -EINVAL;
-
 	mutex_lock(&dev->mutex);
 
 	r = vhost_dev_check_owner(dev);
 	if (r)
 		goto unlock;
 
+	if (msg->type == VHOST_IOTLB_UPDATE ||
+	    msg->type == VHOST_IOTLB_BATCH_BEGIN) {
+		as = vhost_vdpa_find_alloc_as(v, asid);
+		if (!as) {
+			dev_err(&v->dev, "can't find and alloc asid %d\n",
+				asid);
+			return -EINVAL;
+		}
+		iotlb = &as->iotlb;
+	} else
+		iotlb = asid_to_iotlb(v, asid);
+
+	if ((v->in_batch && v->batch_asid != asid) || !iotlb) {
+		if (v->in_batch && v->batch_asid != asid) {
+			dev_info(&v->dev, "batch id %d asid %d\n",
+				 v->batch_asid, asid);
+		}
+		if (!iotlb)
+			dev_err(&v->dev, "no iotlb for asid %d\n", asid);
+		return -EINVAL;
+	}
+
 	switch (msg->type) {
 	case VHOST_IOTLB_UPDATE:
 		r = vhost_vdpa_process_iotlb_update(v, iotlb, msg);
@@ -964,12 +1024,15 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev, u32 asid,
 		vhost_vdpa_unmap(v, iotlb, msg->iova, msg->size);
 		break;
 	case VHOST_IOTLB_BATCH_BEGIN:
+		v->batch_asid = asid;
 		v->in_batch = true;
 		break;
 	case VHOST_IOTLB_BATCH_END:
 		if (v->in_batch && ops->set_map)
-			ops->set_map(vdpa, 0, iotlb);
+			ops->set_map(vdpa, asid, iotlb);
 		v->in_batch = false;
+		if (!iotlb->nmaps)
+			vhost_vdpa_remove_as(v, asid);
 		break;
 	default:
 		r = -EINVAL;
@@ -1057,9 +1120,17 @@ static void vhost_vdpa_set_iova_range(struct vhost_vdpa *v)
 
 static void vhost_vdpa_cleanup(struct vhost_vdpa *v)
 {
+	struct vhost_vdpa_as *as;
+	u32 asid;
+
 	vhost_dev_cleanup(&v->vdev);
 	kfree(v->vdev.vqs);
-	vhost_vdpa_remove_as(v, 0);
+
+	for (asid = 0; asid < v->vdpa->nas; asid++) {
+		as = asid_to_as(v, asid);
+		if (as)
+			vhost_vdpa_remove_as(v, asid);
+	}
 }
 
 static int vhost_vdpa_open(struct inode *inode, struct file *filep)
@@ -1095,12 +1166,9 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
 	vhost_dev_init(dev, vqs, nvqs, 0, 0, 0, false,
 		       vhost_vdpa_process_iotlb_msg);
 
-	if (!vhost_vdpa_alloc_as(v, 0))
-		goto err_alloc_as;
-
 	r = vhost_vdpa_alloc_domain(v);
 	if (r)
-		goto err_alloc_as;
+		goto err_alloc_domain;
 
 	vhost_vdpa_set_iova_range(v);
 
@@ -1108,7 +1176,7 @@ static int vhost_vdpa_open(struct inode *inode, struct file *filep)
 
 	return 0;
 
-err_alloc_as:
+err_alloc_domain:
 	vhost_vdpa_cleanup(v);
 err:
 	atomic_dec(&v->opened);
@@ -1233,8 +1301,11 @@ static int vhost_vdpa_probe(struct vdpa_device *vdpa)
 	int minor;
 	int i, r;
 
-	/* Only support 1 address space and 1 groups */
-	if (vdpa->ngroups != 1 || vdpa->nas != 1)
+	/* We can't support platform IOMMU device with more than 1
+	 * group or as
+	 */
+	if (!ops->set_map && !ops->dma_map &&
+	    (vdpa->ngroups > 1 || vdpa->nas > 1))
 		return -EOPNOTSUPP;
 
 	v = kzalloc(sizeof(*v), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index d1e58f976f6e..5022c648d9c0 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -1167,7 +1167,7 @@ ssize_t vhost_chr_write_iter(struct vhost_dev *dev,
 				ret = -EINVAL;
 				goto done;
 			}
-			offset = sizeof(__u16);
+			offset = 0;
 		} else
 			offset = sizeof(__u32);
 		break;
-- 
2.30.1


  parent reply	other threads:[~2022-03-30 18:17 UTC|newest]

Thread overview: 36+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-30 18:03 [PATCH v2 00/19] Control VQ support in vDPA Gautam Dawar
2022-03-30 18:03 ` [PATCH v2 01/19] vhost: move the backend feature bits to vhost_types.h Gautam Dawar
2022-03-30 18:03 ` [PATCH v2 02/19] virtio-vdpa: don't set callback if virtio doesn't need it Gautam Dawar
2022-03-30 18:03 ` [PATCH v2 03/19] vhost-vdpa: passing iotlb to IOMMU mapping helpers Gautam Dawar
2022-03-30 18:03 ` [PATCH v2 04/19] vhost-vdpa: switch to use vhost-vdpa specific IOTLB Gautam Dawar
2022-03-30 18:03 ` [PATCH v2 05/19] vdpa: introduce virtqueue groups Gautam Dawar
2022-03-30 18:03 ` [PATCH v2 06/19] vdpa: multiple address spaces support Gautam Dawar
2022-03-30 18:03 ` [PATCH v2 07/19] vdpa: introduce config operations for associating ASID to a virtqueue group Gautam Dawar
2022-03-30 18:03 ` [PATCH v2 08/19] vhost_iotlb: split out IOTLB initialization Gautam Dawar
2022-03-30 18:03 ` [PATCH v2 09/19] vhost: support ASID in IOTLB API Gautam Dawar
2022-03-30 18:03 ` [PATCH v2 10/19] vhost-vdpa: introduce asid based IOTLB Gautam Dawar
2022-03-30 18:03 ` [PATCH v2 11/19] vhost-vdpa: introduce uAPI to get the number of virtqueue groups Gautam Dawar
2022-03-30 18:03 ` [PATCH v2 12/19] vhost-vdpa: introduce uAPI to get the number of address spaces Gautam Dawar
2022-03-30 18:03 ` [PATCH v2 13/19] vhost-vdpa: uAPI to get virtqueue group id Gautam Dawar
2022-03-30 18:03 ` [PATCH v2 14/19] vhost-vdpa: introduce uAPI to set group ASID Gautam Dawar
2022-03-30 18:03 ` Gautam Dawar [this message]
2022-04-01  4:24   ` [PATCH v2 15/19] vhost-vdpa: support ASID based IOTLB API Jason Wang
2022-04-01  4:24     ` Jason Wang
2022-04-28  6:28     ` Gautam Dawar
2022-05-07 10:24       ` Jason Wang
2022-05-07 10:24         ` Jason Wang
2022-03-30 18:03 ` [PATCH v2 16/19] vdpa_sim: advertise VIRTIO_NET_F_MTU Gautam Dawar
2022-03-30 18:03 ` [PATCH v2 17/19] vdpa_sim: factor out buffer completion logic Gautam Dawar
2022-03-30 18:03 ` [PATCH v2 18/19] vdpa_sim: filter destination mac address Gautam Dawar
2022-03-30 18:03 ` [PATCH v2 19/19] vdpasim: control virtqueue support Gautam Dawar
2022-06-21 15:19   ` Stefano Garzarella
2022-06-21 15:19     ` Stefano Garzarella
2022-06-22 10:21     ` Eugenio Perez Martin
2022-06-22 15:04       ` Eugenio Perez Martin
2022-06-22 15:44         ` Stefano Garzarella
2022-06-22 15:44           ` Stefano Garzarella
2022-05-09  3:42 ` [PATCH v2 00/19] Control VQ support in vDPA Jason Wang
2022-05-09  3:42   ` Jason Wang
2022-05-09  7:07   ` Michael S. Tsirkin
2022-05-09  7:07     ` Michael S. Tsirkin
2022-05-20  8:04 ` Eugenio Perez Martin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220330180436.24644-16-gdawar@xilinx.com \
    --to=gautam.dawar@xilinx.com \
    --cc=christophe.jaillet@wanadoo.fr \
    --cc=dan.carpenter@oracle.com \
    --cc=dinang@xilinx.com \
    --cc=ecree.xilinx@gmail.com \
    --cc=elic@nvidia.com \
    --cc=eperezma@redhat.com \
    --cc=gdawar@xilinx.com \
    --cc=habetsm.xilinx@gmail.com \
    --cc=hanand@xilinx.com \
    --cc=jasowang@redhat.com \
    --cc=kvm@vger.kernel.org \
    --cc=lingshan.zhu@intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=longpeng2@huawei.com \
    --cc=martinh@xilinx.com \
    --cc=martinpo@xilinx.com \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=pabloc@xilinx.com \
    --cc=parav@nvidia.com \
    --cc=sgarzare@redhat.com \
    --cc=si-wei.liu@oracle.com \
    --cc=tanuj.kamde@amd.com \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=wuzongyong@linux.alibaba.com \
    --cc=xieyongji@bytedance.com \
    --cc=zhang.min9@zte.com.cn \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.