* [PATCH RFC 1/5] vhost-vdpa: refine ioctl pre-processing
2020-06-18 5:56 [PATCH RFC 0/5] support batched IOTLB updating in vhost-vdpa Jason Wang
@ 2020-06-18 5:56 ` Jason Wang
2020-06-18 5:56 ` [PATCH RFC 2/5] vhost: generialize backend features setting/getting Jason Wang
` (3 subsequent siblings)
4 siblings, 0 replies; 10+ messages in thread
From: Jason Wang @ 2020-06-18 5:56 UTC (permalink / raw)
To: mst, jasowang, virtualization, linux-kernel
Cc: rob.miller, lingshan.zhu, eperezma, lulu, shahafs, hanand,
mhabets, gdawar, saugatm, vmireyno, zhangweining, eli
Switch to use 'switch' to make the codes more easier to be extended.
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/vhost/vdpa.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 77a0c9fb6cc3..db4c9cb44c61 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -353,15 +353,16 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
idx = array_index_nospec(idx, v->nvqs);
vq = &v->vqs[idx];
- if (cmd == VHOST_VDPA_SET_VRING_ENABLE) {
+ switch (cmd) {
+ case VHOST_VDPA_SET_VRING_ENABLE:
if (copy_from_user(&s, argp, sizeof(s)))
return -EFAULT;
ops->set_vq_ready(vdpa, idx, s.num);
return 0;
- }
-
- if (cmd == VHOST_GET_VRING_BASE)
+ case VHOST_GET_VRING_BASE:
vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx);
+ break;
+ }
r = vhost_vring_ioctl(&v->vdev, cmd, argp);
if (r)
--
2.20.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH RFC 2/5] vhost: generialize backend features setting/getting
2020-06-18 5:56 [PATCH RFC 0/5] support batched IOTLB updating in vhost-vdpa Jason Wang
2020-06-18 5:56 ` [PATCH RFC 1/5] vhost-vdpa: refine ioctl pre-processing Jason Wang
@ 2020-06-18 5:56 ` Jason Wang
2020-06-18 5:56 ` [PATCH RFC 3/5] vhost-vdpa: support get/set backend features Jason Wang
` (2 subsequent siblings)
4 siblings, 0 replies; 10+ messages in thread
From: Jason Wang @ 2020-06-18 5:56 UTC (permalink / raw)
To: mst, jasowang, virtualization, linux-kernel
Cc: rob.miller, lingshan.zhu, eperezma, lulu, shahafs, hanand,
mhabets, gdawar, saugatm, vmireyno, zhangweining, eli
Move the backend features setting/getting from net.c to vhost.c to be
reused by vhost-vdpa.
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/vhost/net.c | 18 ++----------------
drivers/vhost/vhost.c | 15 +++++++++++++++
drivers/vhost/vhost.h | 2 ++
3 files changed, 19 insertions(+), 16 deletions(-)
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 0b509be8d7b1..d88afe3f6060 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1622,21 +1622,6 @@ static long vhost_net_reset_owner(struct vhost_net *n)
return err;
}
-static int vhost_net_set_backend_features(struct vhost_net *n, u64 features)
-{
- int i;
-
- mutex_lock(&n->dev.mutex);
- for (i = 0; i < VHOST_NET_VQ_MAX; ++i) {
- mutex_lock(&n->vqs[i].vq.mutex);
- n->vqs[i].vq.acked_backend_features = features;
- mutex_unlock(&n->vqs[i].vq.mutex);
- }
- mutex_unlock(&n->dev.mutex);
-
- return 0;
-}
-
static int vhost_net_set_features(struct vhost_net *n, u64 features)
{
size_t vhost_hlen, sock_hlen, hdr_len;
@@ -1737,7 +1722,8 @@ static long vhost_net_ioctl(struct file *f, unsigned int ioctl,
return -EFAULT;
if (features & ~VHOST_NET_BACKEND_FEATURES)
return -EOPNOTSUPP;
- return vhost_net_set_backend_features(n, features);
+ vhost_set_backend_features(&n->dev, features);
+ return 0;
case VHOST_RESET_OWNER:
return vhost_net_reset_owner(n);
case VHOST_SET_OWNER:
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index dc510dc2b1ef..ba61f499d420 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2700,6 +2700,21 @@ struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
}
EXPORT_SYMBOL_GPL(vhost_dequeue_msg);
+void vhost_set_backend_features(struct vhost_dev *dev, u64 features)
+{
+ struct vhost_virtqueue *vq;
+ int i;
+
+ mutex_lock(&dev->mutex);
+ for (i = 0; i < dev->nvqs; ++i) {
+ vq = dev->vqs[i];
+ mutex_lock(&vq->mutex);
+ vq->acked_backend_features = features;
+ mutex_unlock(&vq->mutex);
+ }
+ mutex_unlock(&dev->mutex);
+}
+EXPORT_SYMBOL_GPL(vhost_set_backend_features);
static int __init vhost_init(void)
{
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 264a2a2fae97..52753aecd82a 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -223,6 +223,8 @@ void vhost_enqueue_msg(struct vhost_dev *dev,
struct vhost_msg_node *node);
struct vhost_msg_node *vhost_dequeue_msg(struct vhost_dev *dev,
struct list_head *head);
+void vhost_set_backend_features(struct vhost_dev *dev, u64 features);
+
__poll_t vhost_chr_poll(struct file *file, struct vhost_dev *dev,
poll_table *wait);
ssize_t vhost_chr_read_iter(struct vhost_dev *dev, struct iov_iter *to,
--
2.20.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH RFC 3/5] vhost-vdpa: support get/set backend features
2020-06-18 5:56 [PATCH RFC 0/5] support batched IOTLB updating in vhost-vdpa Jason Wang
2020-06-18 5:56 ` [PATCH RFC 1/5] vhost-vdpa: refine ioctl pre-processing Jason Wang
2020-06-18 5:56 ` [PATCH RFC 2/5] vhost: generialize backend features setting/getting Jason Wang
@ 2020-06-18 5:56 ` Jason Wang
2020-06-18 5:56 ` [PATCH RFC 4/5] vhost-vdpa: support IOTLB batching hints Jason Wang
2020-06-18 5:56 ` [PATCH RFC 5/5] vdpasim: support batch updating Jason Wang
4 siblings, 0 replies; 10+ messages in thread
From: Jason Wang @ 2020-06-18 5:56 UTC (permalink / raw)
To: mst, jasowang, virtualization, linux-kernel
Cc: rob.miller, lingshan.zhu, eperezma, lulu, shahafs, hanand,
mhabets, gdawar, saugatm, vmireyno, zhangweining, eli
This patch makes userspace can get and set backend features to
vhost-vdpa.
Signed-off-by: Cindy Lu <lulu@redhat.com>
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/vhost/vdpa.c | 18 ++++++++++++++++++
1 file changed, 18 insertions(+)
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index db4c9cb44c61..453057421f80 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -55,6 +55,10 @@ enum {
(1ULL << VIRTIO_NET_F_SPEED_DUPLEX),
};
+enum {
+ VHOST_VDPA_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
+};
+
/* Currently, only network backend w/o multiqueue is supported. */
#define VHOST_VDPA_VQ_MAX 2
@@ -340,6 +344,8 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
struct vdpa_callback cb;
struct vhost_virtqueue *vq;
struct vhost_vring_state s;
+ u64 __user *featurep = argp;
+ u64 features;
u32 idx;
long r;
@@ -362,6 +368,18 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd,
case VHOST_GET_VRING_BASE:
vq->last_avail_idx = ops->get_vq_state(v->vdpa, idx);
break;
+ case VHOST_GET_BACKEND_FEATURES:
+ features = VHOST_VDPA_BACKEND_FEATURES;
+ if (copy_to_user(featurep, &features, sizeof(features)))
+ return -EFAULT;
+ return 0;
+ case VHOST_SET_BACKEND_FEATURES:
+ if (copy_from_user(&features, featurep, sizeof(features)))
+ return -EFAULT;
+ if (features & ~VHOST_VDPA_BACKEND_FEATURES)
+ return -EOPNOTSUPP;
+ vhost_set_backend_features(&v->vdev, features);
+ return 0;
}
r = vhost_vring_ioctl(&v->vdev, cmd, argp);
--
2.20.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* [PATCH RFC 4/5] vhost-vdpa: support IOTLB batching hints
2020-06-18 5:56 [PATCH RFC 0/5] support batched IOTLB updating in vhost-vdpa Jason Wang
` (2 preceding siblings ...)
2020-06-18 5:56 ` [PATCH RFC 3/5] vhost-vdpa: support get/set backend features Jason Wang
@ 2020-06-18 5:56 ` Jason Wang
2020-06-28 9:58 ` Michael S. Tsirkin
2020-06-18 5:56 ` [PATCH RFC 5/5] vdpasim: support batch updating Jason Wang
4 siblings, 1 reply; 10+ messages in thread
From: Jason Wang @ 2020-06-18 5:56 UTC (permalink / raw)
To: mst, jasowang, virtualization, linux-kernel
Cc: rob.miller, lingshan.zhu, eperezma, lulu, shahafs, hanand,
mhabets, gdawar, saugatm, vmireyno, zhangweining, eli
This patches extend the vhost IOTLB API to accept batch updating hints
form userspace. When userspace wants update the device IOTLB in a
batch, it may do:
1) Write vhost_iotlb_msg with VHOST_IOTLB_BATCH_BEGIN flag
2) Perform a batch of IOTLB updating via VHOST_IOTLB_UPDATE/INVALIDATE
3) Write vhost_iotlb_msg with VHOST_IOTLB_BATCH_END flag
Vhost-vdpa may decide to batch the IOMMU/IOTLB updating in step 3 when
vDPA device support set_map() ops. This is useful for the vDPA device
that want to know all the mappings to tweak their own DMA translation
logic.
For vDPA device that doesn't require set_map(), no behavior changes.
This capability is advertised via VHOST_BACKEND_F_IOTLB_BATCH capability.
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/vhost/vdpa.c | 30 +++++++++++++++++++++++-------
include/uapi/linux/vhost.h | 2 ++
include/uapi/linux/vhost_types.h | 7 +++++++
3 files changed, 32 insertions(+), 7 deletions(-)
diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
index 453057421f80..8f624bbafee7 100644
--- a/drivers/vhost/vdpa.c
+++ b/drivers/vhost/vdpa.c
@@ -56,7 +56,9 @@ enum {
};
enum {
- VHOST_VDPA_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
+ VHOST_VDPA_BACKEND_FEATURES =
+ (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
+ (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
};
/* Currently, only network backend w/o multiqueue is supported. */
@@ -77,6 +79,7 @@ struct vhost_vdpa {
int virtio_id;
int minor;
struct eventfd_ctx *config_ctx;
+ int in_batch;
};
static DEFINE_IDA(vhost_vdpa_ida);
@@ -125,6 +128,7 @@ static void vhost_vdpa_reset(struct vhost_vdpa *v)
const struct vdpa_config_ops *ops = vdpa->config;
ops->set_status(vdpa, 0);
+ v->in_batch = 0;
}
static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
@@ -540,9 +544,10 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
if (ops->dma_map)
r = ops->dma_map(vdpa, iova, size, pa, perm);
- else if (ops->set_map)
- r = ops->set_map(vdpa, dev->iotlb);
- else
+ else if (ops->set_map) {
+ if (!v->in_batch)
+ r = ops->set_map(vdpa, dev->iotlb);
+ } else
r = iommu_map(v->domain, iova, pa, size,
perm_to_iommu_flags(perm));
@@ -559,9 +564,10 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
if (ops->dma_map)
ops->dma_unmap(vdpa, iova, size);
- else if (ops->set_map)
- ops->set_map(vdpa, dev->iotlb);
- else
+ else if (ops->set_map) {
+ if (!v->in_batch)
+ ops->set_map(vdpa, dev->iotlb);
+ } else
iommu_unmap(v->domain, iova, size);
}
@@ -655,6 +661,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
struct vhost_iotlb_msg *msg)
{
struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
+ struct vdpa_device *vdpa = v->vdpa;
+ const struct vdpa_config_ops *ops = vdpa->config;
int r = 0;
r = vhost_dev_check_owner(dev);
@@ -668,6 +676,14 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
case VHOST_IOTLB_INVALIDATE:
vhost_vdpa_unmap(v, msg->iova, msg->size);
break;
+ case VHOST_IOTLB_BATCH_BEGIN:
+ v->in_batch = true;
+ break;
+ case VHOST_IOTLB_BATCH_END:
+ if (v->in_batch && ops->set_map)
+ ops->set_map(vdpa, dev->iotlb);
+ v->in_batch = false;
+ break;
default:
r = -EINVAL;
break;
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index 0c2349612e77..565da96f55d5 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -91,6 +91,8 @@
/* Use message type V2 */
#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+/* IOTLB can accpet batching hints */
+#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
#define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
index 669457ce5c48..5c12faffdde9 100644
--- a/include/uapi/linux/vhost_types.h
+++ b/include/uapi/linux/vhost_types.h
@@ -60,6 +60,13 @@ struct vhost_iotlb_msg {
#define VHOST_IOTLB_UPDATE 2
#define VHOST_IOTLB_INVALIDATE 3
#define VHOST_IOTLB_ACCESS_FAIL 4
+/* VHOST_IOTLB_BATCH_BEGIN is a hint that userspace will update
+ * several mappings afterwards. VHOST_IOTLB_BATCH_END is a hint that
+ * userspace had finished the mapping updating. When those two flags
+ * were set, kernel will ignore the rest fileds of the IOTLB message.
+ */
+#define VHOST_IOTLB_BATCH_BEGIN 5
+#define VHOST_IOTLB_BATCH_END 6
__u8 type;
};
--
2.20.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH RFC 4/5] vhost-vdpa: support IOTLB batching hints
2020-06-18 5:56 ` [PATCH RFC 4/5] vhost-vdpa: support IOTLB batching hints Jason Wang
@ 2020-06-28 9:58 ` Michael S. Tsirkin
2020-06-29 9:26 ` Jason Wang
0 siblings, 1 reply; 10+ messages in thread
From: Michael S. Tsirkin @ 2020-06-28 9:58 UTC (permalink / raw)
To: Jason Wang
Cc: virtualization, linux-kernel, rob.miller, lingshan.zhu, eperezma,
lulu, shahafs, hanand, mhabets, gdawar, saugatm, vmireyno,
zhangweining, eli
On Thu, Jun 18, 2020 at 01:56:25PM +0800, Jason Wang wrote:
> This patches extend the vhost IOTLB API to accept batch updating hints
> form userspace. When userspace wants update the device IOTLB in a
> batch, it may do:
>
> 1) Write vhost_iotlb_msg with VHOST_IOTLB_BATCH_BEGIN flag
> 2) Perform a batch of IOTLB updating via VHOST_IOTLB_UPDATE/INVALIDATE
> 3) Write vhost_iotlb_msg with VHOST_IOTLB_BATCH_END flag
As long as we are extending the interface,
is there some way we could cut down the number of system calls needed
here?
>
> Vhost-vdpa may decide to batch the IOMMU/IOTLB updating in step 3 when
> vDPA device support set_map() ops. This is useful for the vDPA device
> that want to know all the mappings to tweak their own DMA translation
> logic.
>
> For vDPA device that doesn't require set_map(), no behavior changes.
>
> This capability is advertised via VHOST_BACKEND_F_IOTLB_BATCH capability.
>
> Signed-off-by: Jason Wang <jasowang@redhat.com>
> ---
> drivers/vhost/vdpa.c | 30 +++++++++++++++++++++++-------
> include/uapi/linux/vhost.h | 2 ++
> include/uapi/linux/vhost_types.h | 7 +++++++
> 3 files changed, 32 insertions(+), 7 deletions(-)
>
> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> index 453057421f80..8f624bbafee7 100644
> --- a/drivers/vhost/vdpa.c
> +++ b/drivers/vhost/vdpa.c
> @@ -56,7 +56,9 @@ enum {
> };
>
> enum {
> - VHOST_VDPA_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
> + VHOST_VDPA_BACKEND_FEATURES =
> + (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
> + (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
> };
>
> /* Currently, only network backend w/o multiqueue is supported. */
> @@ -77,6 +79,7 @@ struct vhost_vdpa {
> int virtio_id;
> int minor;
> struct eventfd_ctx *config_ctx;
> + int in_batch;
> };
>
> static DEFINE_IDA(vhost_vdpa_ida);
> @@ -125,6 +128,7 @@ static void vhost_vdpa_reset(struct vhost_vdpa *v)
> const struct vdpa_config_ops *ops = vdpa->config;
>
> ops->set_status(vdpa, 0);
> + v->in_batch = 0;
> }
>
> static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
> @@ -540,9 +544,10 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
>
> if (ops->dma_map)
> r = ops->dma_map(vdpa, iova, size, pa, perm);
> - else if (ops->set_map)
> - r = ops->set_map(vdpa, dev->iotlb);
> - else
> + else if (ops->set_map) {
> + if (!v->in_batch)
> + r = ops->set_map(vdpa, dev->iotlb);
> + } else
> r = iommu_map(v->domain, iova, pa, size,
> perm_to_iommu_flags(perm));
>
> @@ -559,9 +564,10 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
>
> if (ops->dma_map)
> ops->dma_unmap(vdpa, iova, size);
> - else if (ops->set_map)
> - ops->set_map(vdpa, dev->iotlb);
> - else
> + else if (ops->set_map) {
> + if (!v->in_batch)
> + ops->set_map(vdpa, dev->iotlb);
> + } else
> iommu_unmap(v->domain, iova, size);
> }
>
> @@ -655,6 +661,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
> struct vhost_iotlb_msg *msg)
> {
> struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
> + struct vdpa_device *vdpa = v->vdpa;
> + const struct vdpa_config_ops *ops = vdpa->config;
> int r = 0;
>
> r = vhost_dev_check_owner(dev);
> @@ -668,6 +676,14 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
> case VHOST_IOTLB_INVALIDATE:
> vhost_vdpa_unmap(v, msg->iova, msg->size);
> break;
> + case VHOST_IOTLB_BATCH_BEGIN:
> + v->in_batch = true;
> + break;
> + case VHOST_IOTLB_BATCH_END:
> + if (v->in_batch && ops->set_map)
> + ops->set_map(vdpa, dev->iotlb);
> + v->in_batch = false;
> + break;
> default:
> r = -EINVAL;
> break;
> diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
> index 0c2349612e77..565da96f55d5 100644
> --- a/include/uapi/linux/vhost.h
> +++ b/include/uapi/linux/vhost.h
> @@ -91,6 +91,8 @@
>
> /* Use message type V2 */
> #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
> +/* IOTLB can accpet batching hints */
typo
> +#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
>
> #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
> #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
> diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
> index 669457ce5c48..5c12faffdde9 100644
> --- a/include/uapi/linux/vhost_types.h
> +++ b/include/uapi/linux/vhost_types.h
> @@ -60,6 +60,13 @@ struct vhost_iotlb_msg {
> #define VHOST_IOTLB_UPDATE 2
> #define VHOST_IOTLB_INVALIDATE 3
> #define VHOST_IOTLB_ACCESS_FAIL 4
> +/* VHOST_IOTLB_BATCH_BEGIN is a hint that userspace will update
> + * several mappings afterwards. VHOST_IOTLB_BATCH_END is a hint that
> + * userspace had finished the mapping updating.
Well not just hints - in fact updates do not take place
until _END.
How about:
/* VHOST_IOTLB_BATCH_BEGIN and VHOST_IOTLB_BATCH_END allow modifying
* multiple mappings in one go: beginning with
* VHOST_IOTLB_BATCH_BEGIN, followed by any number of
VHOST_IOTLB_UPDATE messages, and ending with VHOST_IOTLB_BATCH_END.
*/
> When those two flags
> + * were set, kernel will ignore the rest fileds of the IOTLB message.
how about:
when one of these two values is used as the message type, the
rest of the fields in the message are ignored.
> + */
> +#define VHOST_IOTLB_BATCH_BEGIN 5
> +#define VHOST_IOTLB_BATCH_END 6
> __u8 type;
> };
>
> --
> 2.20.1
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH RFC 4/5] vhost-vdpa: support IOTLB batching hints
2020-06-28 9:58 ` Michael S. Tsirkin
@ 2020-06-29 9:26 ` Jason Wang
2020-06-29 15:49 ` Michael S. Tsirkin
0 siblings, 1 reply; 10+ messages in thread
From: Jason Wang @ 2020-06-29 9:26 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: virtualization, linux-kernel, rob.miller, lingshan.zhu, eperezma,
lulu, shahafs, hanand, mhabets, gdawar, saugatm, vmireyno,
zhangweining, eli
On 2020/6/28 下午5:58, Michael S. Tsirkin wrote:
> On Thu, Jun 18, 2020 at 01:56:25PM +0800, Jason Wang wrote:
>> This patches extend the vhost IOTLB API to accept batch updating hints
>> form userspace. When userspace wants update the device IOTLB in a
>> batch, it may do:
>>
>> 1) Write vhost_iotlb_msg with VHOST_IOTLB_BATCH_BEGIN flag
>> 2) Perform a batch of IOTLB updating via VHOST_IOTLB_UPDATE/INVALIDATE
>> 3) Write vhost_iotlb_msg with VHOST_IOTLB_BATCH_END flag
> As long as we are extending the interface,
> is there some way we could cut down the number of system calls needed
> here?
I'm not sure it's worth to do that since usually we only have less than
10 regions.
A possible method is to carry multiple vhost_iotlb_message in one system
call.
>
>
>> Vhost-vdpa may decide to batch the IOMMU/IOTLB updating in step 3 when
>> vDPA device support set_map() ops. This is useful for the vDPA device
>> that want to know all the mappings to tweak their own DMA translation
>> logic.
>>
>> For vDPA device that doesn't require set_map(), no behavior changes.
>>
>> This capability is advertised via VHOST_BACKEND_F_IOTLB_BATCH capability.
>>
>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>> ---
>> drivers/vhost/vdpa.c | 30 +++++++++++++++++++++++-------
>> include/uapi/linux/vhost.h | 2 ++
>> include/uapi/linux/vhost_types.h | 7 +++++++
>> 3 files changed, 32 insertions(+), 7 deletions(-)
>>
>> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
>> index 453057421f80..8f624bbafee7 100644
>> --- a/drivers/vhost/vdpa.c
>> +++ b/drivers/vhost/vdpa.c
>> @@ -56,7 +56,9 @@ enum {
>> };
>>
>> enum {
>> - VHOST_VDPA_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
>> + VHOST_VDPA_BACKEND_FEATURES =
>> + (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
>> + (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
>> };
>>
>> /* Currently, only network backend w/o multiqueue is supported. */
>> @@ -77,6 +79,7 @@ struct vhost_vdpa {
>> int virtio_id;
>> int minor;
>> struct eventfd_ctx *config_ctx;
>> + int in_batch;
>> };
>>
>> static DEFINE_IDA(vhost_vdpa_ida);
>> @@ -125,6 +128,7 @@ static void vhost_vdpa_reset(struct vhost_vdpa *v)
>> const struct vdpa_config_ops *ops = vdpa->config;
>>
>> ops->set_status(vdpa, 0);
>> + v->in_batch = 0;
>> }
>>
>> static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
>> @@ -540,9 +544,10 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
>>
>> if (ops->dma_map)
>> r = ops->dma_map(vdpa, iova, size, pa, perm);
>> - else if (ops->set_map)
>> - r = ops->set_map(vdpa, dev->iotlb);
>> - else
>> + else if (ops->set_map) {
>> + if (!v->in_batch)
>> + r = ops->set_map(vdpa, dev->iotlb);
>> + } else
>> r = iommu_map(v->domain, iova, pa, size,
>> perm_to_iommu_flags(perm));
>>
>> @@ -559,9 +564,10 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
>>
>> if (ops->dma_map)
>> ops->dma_unmap(vdpa, iova, size);
>> - else if (ops->set_map)
>> - ops->set_map(vdpa, dev->iotlb);
>> - else
>> + else if (ops->set_map) {
>> + if (!v->in_batch)
>> + ops->set_map(vdpa, dev->iotlb);
>> + } else
>> iommu_unmap(v->domain, iova, size);
>> }
>>
>> @@ -655,6 +661,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
>> struct vhost_iotlb_msg *msg)
>> {
>> struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
>> + struct vdpa_device *vdpa = v->vdpa;
>> + const struct vdpa_config_ops *ops = vdpa->config;
>> int r = 0;
>>
>> r = vhost_dev_check_owner(dev);
>> @@ -668,6 +676,14 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
>> case VHOST_IOTLB_INVALIDATE:
>> vhost_vdpa_unmap(v, msg->iova, msg->size);
>> break;
>> + case VHOST_IOTLB_BATCH_BEGIN:
>> + v->in_batch = true;
>> + break;
>> + case VHOST_IOTLB_BATCH_END:
>> + if (v->in_batch && ops->set_map)
>> + ops->set_map(vdpa, dev->iotlb);
>> + v->in_batch = false;
>> + break;
>> default:
>> r = -EINVAL;
>> break;
>> diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
>> index 0c2349612e77..565da96f55d5 100644
>> --- a/include/uapi/linux/vhost.h
>> +++ b/include/uapi/linux/vhost.h
>> @@ -91,6 +91,8 @@
>>
>> /* Use message type V2 */
>> #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
>> +/* IOTLB can accpet batching hints */
> typo
Will fix.
>
>> +#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
>>
>> #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
>> #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
>> diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
>> index 669457ce5c48..5c12faffdde9 100644
>> --- a/include/uapi/linux/vhost_types.h
>> +++ b/include/uapi/linux/vhost_types.h
>> @@ -60,6 +60,13 @@ struct vhost_iotlb_msg {
>> #define VHOST_IOTLB_UPDATE 2
>> #define VHOST_IOTLB_INVALIDATE 3
>> #define VHOST_IOTLB_ACCESS_FAIL 4
>> +/* VHOST_IOTLB_BATCH_BEGIN is a hint that userspace will update
>> + * several mappings afterwards. VHOST_IOTLB_BATCH_END is a hint that
>> + * userspace had finished the mapping updating.
>
> Well not just hints - in fact updates do not take place
> until _END.
>
> How about:
>
> /* VHOST_IOTLB_BATCH_BEGIN and VHOST_IOTLB_BATCH_END allow modifying
> * multiple mappings in one go: beginning with
> * VHOST_IOTLB_BATCH_BEGIN, followed by any number of
> VHOST_IOTLB_UPDATE messages, and ending with VHOST_IOTLB_BATCH_END.
> */
That's better.
>
>
>> When those two flags
>> + * were set, kernel will ignore the rest fileds of the IOTLB message.
> how about:
>
> when one of these two values is used as the message type, the
> rest of the fields in the message are ignored.
Yes.
Will fix.
Thanks
>
>> + */
>> +#define VHOST_IOTLB_BATCH_BEGIN 5
>> +#define VHOST_IOTLB_BATCH_END 6
>> __u8 type;
>> };
>>
>> --
>> 2.20.1
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH RFC 4/5] vhost-vdpa: support IOTLB batching hints
2020-06-29 9:26 ` Jason Wang
@ 2020-06-29 15:49 ` Michael S. Tsirkin
2020-06-30 1:55 ` Jason Wang
0 siblings, 1 reply; 10+ messages in thread
From: Michael S. Tsirkin @ 2020-06-29 15:49 UTC (permalink / raw)
To: Jason Wang
Cc: virtualization, linux-kernel, rob.miller, lingshan.zhu, eperezma,
lulu, shahafs, hanand, mhabets, gdawar, saugatm, vmireyno,
zhangweining, eli
On Mon, Jun 29, 2020 at 05:26:03PM +0800, Jason Wang wrote:
>
> On 2020/6/28 下午5:58, Michael S. Tsirkin wrote:
> > On Thu, Jun 18, 2020 at 01:56:25PM +0800, Jason Wang wrote:
> > > This patches extend the vhost IOTLB API to accept batch updating hints
> > > form userspace. When userspace wants update the device IOTLB in a
> > > batch, it may do:
> > >
> > > 1) Write vhost_iotlb_msg with VHOST_IOTLB_BATCH_BEGIN flag
> > > 2) Perform a batch of IOTLB updating via VHOST_IOTLB_UPDATE/INVALIDATE
> > > 3) Write vhost_iotlb_msg with VHOST_IOTLB_BATCH_END flag
> > As long as we are extending the interface,
> > is there some way we could cut down the number of system calls needed
> > here?
>
>
> I'm not sure it's worth to do that since usually we only have less than 10
> regions.
Well with a guest iommu I'm guessing it can go up significantly, right?
> A possible method is to carry multiple vhost_iotlb_message in one system
> call.
That's an option.
Also, can kernel handle a batch that is too large by applying
parts of it iteratively?
Or must all changes take place atomically after BATCH_END?
If atomically, we might need to limit batch size to make
sure kernel can keep a copy in memory.
>
> >
> >
> > > Vhost-vdpa may decide to batch the IOMMU/IOTLB updating in step 3 when
> > > vDPA device support set_map() ops. This is useful for the vDPA device
> > > that want to know all the mappings to tweak their own DMA translation
> > > logic.
> > >
> > > For vDPA device that doesn't require set_map(), no behavior changes.
> > >
> > > This capability is advertised via VHOST_BACKEND_F_IOTLB_BATCH capability.
> > >
> > > Signed-off-by: Jason Wang <jasowang@redhat.com>
> > > ---
> > > drivers/vhost/vdpa.c | 30 +++++++++++++++++++++++-------
> > > include/uapi/linux/vhost.h | 2 ++
> > > include/uapi/linux/vhost_types.h | 7 +++++++
> > > 3 files changed, 32 insertions(+), 7 deletions(-)
> > >
> > > diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
> > > index 453057421f80..8f624bbafee7 100644
> > > --- a/drivers/vhost/vdpa.c
> > > +++ b/drivers/vhost/vdpa.c
> > > @@ -56,7 +56,9 @@ enum {
> > > };
> > > enum {
> > > - VHOST_VDPA_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
> > > + VHOST_VDPA_BACKEND_FEATURES =
> > > + (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
> > > + (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
> > > };
> > > /* Currently, only network backend w/o multiqueue is supported. */
> > > @@ -77,6 +79,7 @@ struct vhost_vdpa {
> > > int virtio_id;
> > > int minor;
> > > struct eventfd_ctx *config_ctx;
> > > + int in_batch;
> > > };
> > > static DEFINE_IDA(vhost_vdpa_ida);
> > > @@ -125,6 +128,7 @@ static void vhost_vdpa_reset(struct vhost_vdpa *v)
> > > const struct vdpa_config_ops *ops = vdpa->config;
> > > ops->set_status(vdpa, 0);
> > > + v->in_batch = 0;
> > > }
> > > static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
> > > @@ -540,9 +544,10 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
> > > if (ops->dma_map)
> > > r = ops->dma_map(vdpa, iova, size, pa, perm);
> > > - else if (ops->set_map)
> > > - r = ops->set_map(vdpa, dev->iotlb);
> > > - else
> > > + else if (ops->set_map) {
> > > + if (!v->in_batch)
> > > + r = ops->set_map(vdpa, dev->iotlb);
> > > + } else
> > > r = iommu_map(v->domain, iova, pa, size,
> > > perm_to_iommu_flags(perm));
> > > @@ -559,9 +564,10 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
> > > if (ops->dma_map)
> > > ops->dma_unmap(vdpa, iova, size);
> > > - else if (ops->set_map)
> > > - ops->set_map(vdpa, dev->iotlb);
> > > - else
> > > + else if (ops->set_map) {
> > > + if (!v->in_batch)
> > > + ops->set_map(vdpa, dev->iotlb);
> > > + } else
> > > iommu_unmap(v->domain, iova, size);
> > > }
> > > @@ -655,6 +661,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
> > > struct vhost_iotlb_msg *msg)
> > > {
> > > struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
> > > + struct vdpa_device *vdpa = v->vdpa;
> > > + const struct vdpa_config_ops *ops = vdpa->config;
> > > int r = 0;
> > > r = vhost_dev_check_owner(dev);
> > > @@ -668,6 +676,14 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
> > > case VHOST_IOTLB_INVALIDATE:
> > > vhost_vdpa_unmap(v, msg->iova, msg->size);
> > > break;
> > > + case VHOST_IOTLB_BATCH_BEGIN:
> > > + v->in_batch = true;
> > > + break;
> > > + case VHOST_IOTLB_BATCH_END:
> > > + if (v->in_batch && ops->set_map)
> > > + ops->set_map(vdpa, dev->iotlb);
> > > + v->in_batch = false;
> > > + break;
> > > default:
> > > r = -EINVAL;
> > > break;
> > > diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
> > > index 0c2349612e77..565da96f55d5 100644
> > > --- a/include/uapi/linux/vhost.h
> > > +++ b/include/uapi/linux/vhost.h
> > > @@ -91,6 +91,8 @@
> > > /* Use message type V2 */
> > > #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
> > > +/* IOTLB can accpet batching hints */
> > typo
>
>
> Will fix.
>
>
> >
> > > +#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
> > > #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
> > > #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
> > > diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
> > > index 669457ce5c48..5c12faffdde9 100644
> > > --- a/include/uapi/linux/vhost_types.h
> > > +++ b/include/uapi/linux/vhost_types.h
> > > @@ -60,6 +60,13 @@ struct vhost_iotlb_msg {
> > > #define VHOST_IOTLB_UPDATE 2
> > > #define VHOST_IOTLB_INVALIDATE 3
> > > #define VHOST_IOTLB_ACCESS_FAIL 4
> > > +/* VHOST_IOTLB_BATCH_BEGIN is a hint that userspace will update
> > > + * several mappings afterwards. VHOST_IOTLB_BATCH_END is a hint that
> > > + * userspace had finished the mapping updating.
> >
> > Well not just hints - in fact updates do not take place
> > until _END.
> >
> > How about:
> >
> > /* VHOST_IOTLB_BATCH_BEGIN and VHOST_IOTLB_BATCH_END allow modifying
> > * multiple mappings in one go: beginning with
> > * VHOST_IOTLB_BATCH_BEGIN, followed by any number of
> > VHOST_IOTLB_UPDATE messages, and ending with VHOST_IOTLB_BATCH_END.
> > */
>
>
> That's better.
Is there a guarantee that these changes take place atomically?
Let's document that.
>
> >
> >
> > > When those two flags
> > > + * were set, kernel will ignore the rest fileds of the IOTLB message.
> > how about:
> >
> > when one of these two values is used as the message type, the
> > rest of the fields in the message are ignored.
>
>
> Yes.
>
> Will fix.
>
> Thanks
>
>
> >
> > > + */
> > > +#define VHOST_IOTLB_BATCH_BEGIN 5
> > > +#define VHOST_IOTLB_BATCH_END 6
> > > __u8 type;
> > > };
> > > --
> > > 2.20.1
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH RFC 4/5] vhost-vdpa: support IOTLB batching hints
2020-06-29 15:49 ` Michael S. Tsirkin
@ 2020-06-30 1:55 ` Jason Wang
0 siblings, 0 replies; 10+ messages in thread
From: Jason Wang @ 2020-06-30 1:55 UTC (permalink / raw)
To: Michael S. Tsirkin
Cc: virtualization, linux-kernel, rob.miller, lingshan.zhu, eperezma,
lulu, shahafs, hanand, mhabets, gdawar, saugatm, vmireyno,
zhangweining, eli
On 2020/6/29 下午11:49, Michael S. Tsirkin wrote:
> On Mon, Jun 29, 2020 at 05:26:03PM +0800, Jason Wang wrote:
>> On 2020/6/28 下午5:58, Michael S. Tsirkin wrote:
>>> On Thu, Jun 18, 2020 at 01:56:25PM +0800, Jason Wang wrote:
>>>> This patches extend the vhost IOTLB API to accept batch updating hints
>>>> form userspace. When userspace wants update the device IOTLB in a
>>>> batch, it may do:
>>>>
>>>> 1) Write vhost_iotlb_msg with VHOST_IOTLB_BATCH_BEGIN flag
>>>> 2) Perform a batch of IOTLB updating via VHOST_IOTLB_UPDATE/INVALIDATE
>>>> 3) Write vhost_iotlb_msg with VHOST_IOTLB_BATCH_END flag
>>> As long as we are extending the interface,
>>> is there some way we could cut down the number of system calls needed
>>> here?
>>
>> I'm not sure it's worth to do that since usually we only have less than 10
>> regions.
>
> Well with a guest iommu I'm guessing it can go up significantly, right?
The batching flag is not mandatory, so userspace can still choose to
update a single IOTLB mapping through a single system call without batch
flag.
>
>> A possible method is to carry multiple vhost_iotlb_message in one system
>> call.
> That's an option.
> Also, can kernel handle a batch that is too large by applying
> parts of it iteratively?
I think so, we can iterate a vhost iotlb message one by one through iov
iterator.
> Or must all changes take place atomically after BATCH_END?
For changes:
- if you mean the changes in vhost IOTLB, it's not atomically
- if you mean the mapping seen by vDPA device driver, it could be
atomically for the device driver that implements set_map() method, since
we pass a interval tree to the device.
> If atomically, we might need to limit batch size to make
> sure kernel can keep a copy in memory.
It's the responsibility of guest driver to make sure there won't be a
race condition between DMA and map updating. So it looks to me we don't
need to care about that.
>
>
>>>
>>>> Vhost-vdpa may decide to batch the IOMMU/IOTLB updating in step 3 when
>>>> vDPA device support set_map() ops. This is useful for the vDPA device
>>>> that want to know all the mappings to tweak their own DMA translation
>>>> logic.
>>>>
>>>> For vDPA device that doesn't require set_map(), no behavior changes.
>>>>
>>>> This capability is advertised via VHOST_BACKEND_F_IOTLB_BATCH capability.
>>>>
>>>> Signed-off-by: Jason Wang <jasowang@redhat.com>
>>>> ---
>>>> drivers/vhost/vdpa.c | 30 +++++++++++++++++++++++-------
>>>> include/uapi/linux/vhost.h | 2 ++
>>>> include/uapi/linux/vhost_types.h | 7 +++++++
>>>> 3 files changed, 32 insertions(+), 7 deletions(-)
>>>>
>>>> diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c
>>>> index 453057421f80..8f624bbafee7 100644
>>>> --- a/drivers/vhost/vdpa.c
>>>> +++ b/drivers/vhost/vdpa.c
>>>> @@ -56,7 +56,9 @@ enum {
>>>> };
>>>> enum {
>>>> - VHOST_VDPA_BACKEND_FEATURES = (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2)
>>>> + VHOST_VDPA_BACKEND_FEATURES =
>>>> + (1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2) |
>>>> + (1ULL << VHOST_BACKEND_F_IOTLB_BATCH),
>>>> };
>>>> /* Currently, only network backend w/o multiqueue is supported. */
>>>> @@ -77,6 +79,7 @@ struct vhost_vdpa {
>>>> int virtio_id;
>>>> int minor;
>>>> struct eventfd_ctx *config_ctx;
>>>> + int in_batch;
>>>> };
>>>> static DEFINE_IDA(vhost_vdpa_ida);
>>>> @@ -125,6 +128,7 @@ static void vhost_vdpa_reset(struct vhost_vdpa *v)
>>>> const struct vdpa_config_ops *ops = vdpa->config;
>>>> ops->set_status(vdpa, 0);
>>>> + v->in_batch = 0;
>>>> }
>>>> static long vhost_vdpa_get_device_id(struct vhost_vdpa *v, u8 __user *argp)
>>>> @@ -540,9 +544,10 @@ static int vhost_vdpa_map(struct vhost_vdpa *v,
>>>> if (ops->dma_map)
>>>> r = ops->dma_map(vdpa, iova, size, pa, perm);
>>>> - else if (ops->set_map)
>>>> - r = ops->set_map(vdpa, dev->iotlb);
>>>> - else
>>>> + else if (ops->set_map) {
>>>> + if (!v->in_batch)
>>>> + r = ops->set_map(vdpa, dev->iotlb);
>>>> + } else
>>>> r = iommu_map(v->domain, iova, pa, size,
>>>> perm_to_iommu_flags(perm));
>>>> @@ -559,9 +564,10 @@ static void vhost_vdpa_unmap(struct vhost_vdpa *v, u64 iova, u64 size)
>>>> if (ops->dma_map)
>>>> ops->dma_unmap(vdpa, iova, size);
>>>> - else if (ops->set_map)
>>>> - ops->set_map(vdpa, dev->iotlb);
>>>> - else
>>>> + else if (ops->set_map) {
>>>> + if (!v->in_batch)
>>>> + ops->set_map(vdpa, dev->iotlb);
>>>> + } else
>>>> iommu_unmap(v->domain, iova, size);
>>>> }
>>>> @@ -655,6 +661,8 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
>>>> struct vhost_iotlb_msg *msg)
>>>> {
>>>> struct vhost_vdpa *v = container_of(dev, struct vhost_vdpa, vdev);
>>>> + struct vdpa_device *vdpa = v->vdpa;
>>>> + const struct vdpa_config_ops *ops = vdpa->config;
>>>> int r = 0;
>>>> r = vhost_dev_check_owner(dev);
>>>> @@ -668,6 +676,14 @@ static int vhost_vdpa_process_iotlb_msg(struct vhost_dev *dev,
>>>> case VHOST_IOTLB_INVALIDATE:
>>>> vhost_vdpa_unmap(v, msg->iova, msg->size);
>>>> break;
>>>> + case VHOST_IOTLB_BATCH_BEGIN:
>>>> + v->in_batch = true;
>>>> + break;
>>>> + case VHOST_IOTLB_BATCH_END:
>>>> + if (v->in_batch && ops->set_map)
>>>> + ops->set_map(vdpa, dev->iotlb);
>>>> + v->in_batch = false;
>>>> + break;
>>>> default:
>>>> r = -EINVAL;
>>>> break;
>>>> diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
>>>> index 0c2349612e77..565da96f55d5 100644
>>>> --- a/include/uapi/linux/vhost.h
>>>> +++ b/include/uapi/linux/vhost.h
>>>> @@ -91,6 +91,8 @@
>>>> /* Use message type V2 */
>>>> #define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
>>>> +/* IOTLB can accpet batching hints */
>>> typo
>>
>> Will fix.
>>
>>
>>>> +#define VHOST_BACKEND_F_IOTLB_BATCH 0x2
>>>> #define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
>>>> #define VHOST_GET_BACKEND_FEATURES _IOR(VHOST_VIRTIO, 0x26, __u64)
>>>> diff --git a/include/uapi/linux/vhost_types.h b/include/uapi/linux/vhost_types.h
>>>> index 669457ce5c48..5c12faffdde9 100644
>>>> --- a/include/uapi/linux/vhost_types.h
>>>> +++ b/include/uapi/linux/vhost_types.h
>>>> @@ -60,6 +60,13 @@ struct vhost_iotlb_msg {
>>>> #define VHOST_IOTLB_UPDATE 2
>>>> #define VHOST_IOTLB_INVALIDATE 3
>>>> #define VHOST_IOTLB_ACCESS_FAIL 4
>>>> +/* VHOST_IOTLB_BATCH_BEGIN is a hint that userspace will update
>>>> + * several mappings afterwards. VHOST_IOTLB_BATCH_END is a hint that
>>>> + * userspace had finished the mapping updating.
>>> Well not just hints - in fact updates do not take place
>>> until _END.
>>>
>>> How about:
>>>
>>> /* VHOST_IOTLB_BATCH_BEGIN and VHOST_IOTLB_BATCH_END allow modifying
>>> * multiple mappings in one go: beginning with
>>> * VHOST_IOTLB_BATCH_BEGIN, followed by any number of
>>> VHOST_IOTLB_UPDATE messages, and ending with VHOST_IOTLB_BATCH_END.
>>> */
>>
>> That's better.
>
> Is there a guarantee that these changes take place atomically?
> Let's document that.
There's no guarantee. Will document this.
Thanks
>
>>>
>>>> When those two flags
>>>> + * were set, kernel will ignore the rest fileds of the IOTLB message.
>>> how about:
>>>
>>> when one of these two values is used as the message type, the
>>> rest of the fields in the message are ignored.
>>
>> Yes.
>>
>> Will fix.
>>
>> Thanks
>>
>>
>>>> + */
>>>> +#define VHOST_IOTLB_BATCH_BEGIN 5
>>>> +#define VHOST_IOTLB_BATCH_END 6
>>>> __u8 type;
>>>> };
>>>> --
>>>> 2.20.1
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH RFC 5/5] vdpasim: support batch updating
2020-06-18 5:56 [PATCH RFC 0/5] support batched IOTLB updating in vhost-vdpa Jason Wang
` (3 preceding siblings ...)
2020-06-18 5:56 ` [PATCH RFC 4/5] vhost-vdpa: support IOTLB batching hints Jason Wang
@ 2020-06-18 5:56 ` Jason Wang
4 siblings, 0 replies; 10+ messages in thread
From: Jason Wang @ 2020-06-18 5:56 UTC (permalink / raw)
To: mst, jasowang, virtualization, linux-kernel
Cc: rob.miller, lingshan.zhu, eperezma, lulu, shahafs, hanand,
mhabets, gdawar, saugatm, vmireyno, zhangweining, eli
The vDPA simulator support both set_map() and dma_map()/dma_unmap()
operations. But vhost-vdpa can only use one of them. So this patch
introduce a module parameter (batch_mapping) that let vpda_sim to
support only one of those dma operations. The batched mapping via
set_map() is enabled by default.
Signed-off-by: Jason Wang <jasowang@redhat.com>
---
drivers/vdpa/vdpa_sim/vdpa_sim.c | 40 +++++++++++++++++++++++++++++---
1 file changed, 37 insertions(+), 3 deletions(-)
diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c
index c7334cc65bb2..a7a0962ed8a8 100644
--- a/drivers/vdpa/vdpa_sim/vdpa_sim.c
+++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c
@@ -33,6 +33,10 @@
#define DRV_DESC "vDPA Device Simulator"
#define DRV_LICENSE "GPL v2"
+static int batch_mapping = 1;
+module_param(batch_mapping, int, 0444);
+MODULE_PARM_DESC(batch_mapping, "Batched mapping 1 -Enable; 0 - Disable");
+
struct vdpasim_virtqueue {
struct vringh vring;
struct vringh_kiov iov;
@@ -303,16 +307,22 @@ static const struct dma_map_ops vdpasim_dma_ops = {
};
static const struct vdpa_config_ops vdpasim_net_config_ops;
+static const struct vdpa_config_ops vdpasim_net_batch_config_ops;
static struct vdpasim *vdpasim_create(void)
{
+ const struct vdpa_config_ops *ops;
struct virtio_net_config *config;
struct vdpasim *vdpasim;
struct device *dev;
int ret = -ENOMEM;
- vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL,
- &vdpasim_net_config_ops);
+ if (batch_mapping)
+ ops = &vdpasim_net_batch_config_ops;
+ else
+ ops = &vdpasim_net_config_ops;
+
+ vdpasim = vdpa_alloc_device(struct vdpasim, vdpa, NULL, ops);
if (!vdpasim)
goto err_alloc;
@@ -597,12 +607,36 @@ static const struct vdpa_config_ops vdpasim_net_config_ops = {
.get_config = vdpasim_get_config,
.set_config = vdpasim_set_config,
.get_generation = vdpasim_get_generation,
- .set_map = vdpasim_set_map,
.dma_map = vdpasim_dma_map,
.dma_unmap = vdpasim_dma_unmap,
.free = vdpasim_free,
};
+static const struct vdpa_config_ops vdpasim_net_batch_config_ops = {
+ .set_vq_address = vdpasim_set_vq_address,
+ .set_vq_num = vdpasim_set_vq_num,
+ .kick_vq = vdpasim_kick_vq,
+ .set_vq_cb = vdpasim_set_vq_cb,
+ .set_vq_ready = vdpasim_set_vq_ready,
+ .get_vq_ready = vdpasim_get_vq_ready,
+ .set_vq_state = vdpasim_set_vq_state,
+ .get_vq_state = vdpasim_get_vq_state,
+ .get_vq_align = vdpasim_get_vq_align,
+ .get_features = vdpasim_get_features,
+ .set_features = vdpasim_set_features,
+ .set_config_cb = vdpasim_set_config_cb,
+ .get_vq_num_max = vdpasim_get_vq_num_max,
+ .get_device_id = vdpasim_get_device_id,
+ .get_vendor_id = vdpasim_get_vendor_id,
+ .get_status = vdpasim_get_status,
+ .set_status = vdpasim_set_status,
+ .get_config = vdpasim_get_config,
+ .set_config = vdpasim_set_config,
+ .get_generation = vdpasim_get_generation,
+ .set_map = vdpasim_set_map,
+ .free = vdpasim_free,
+};
+
static int __init vdpasim_dev_init(void)
{
vdpasim_dev = vdpasim_create();
--
2.20.1
^ permalink raw reply related [flat|nested] 10+ messages in thread