* [PATCH 0/2] vdpa: map shadow vrings with MAP_SHARED
@ 2023-06-02 8:53 Eugenio Pérez
2023-06-02 8:53 ` [PATCH 1/2] vdpa: reorder vhost_vdpa_net_cvq_cmd_page_len function Eugenio Pérez
2023-06-02 8:53 ` [PATCH 2/2] vdpa: map shadow vrings with MAP_SHARED Eugenio Pérez
0 siblings, 2 replies; 4+ messages in thread
From: Eugenio Pérez @ 2023-06-02 8:53 UTC (permalink / raw)
To: qemu-devel; +Cc: Lei Yang, Jason Wang, Michael S. Tsirkin, Eugenio Pérez
The vdpa devices that use va addresses neeeds these maps shared. Otherwise,
vhost_vdpa checks will refuse to accept the maps.
Discovered this issue while testing SVQ with vdpa sim, now defaulting to
use_va=on.
Eugenio Pérez (2):
vdpa: reorder vhost_vdpa_net_cvq_cmd_page_len function
vdpa: map shadow vrings with MAP_SHARED
hw/virtio/vhost-shadow-virtqueue.c | 16 +++++-----
net/vhost-vdpa.c | 47 +++++++++++++++---------------
2 files changed, 30 insertions(+), 33 deletions(-)
--
2.31.1
^ permalink raw reply [flat|nested] 4+ messages in thread
* [PATCH 1/2] vdpa: reorder vhost_vdpa_net_cvq_cmd_page_len function
2023-06-02 8:53 [PATCH 0/2] vdpa: map shadow vrings with MAP_SHARED Eugenio Pérez
@ 2023-06-02 8:53 ` Eugenio Pérez
2023-06-02 9:25 ` Philippe Mathieu-Daudé
2023-06-02 8:53 ` [PATCH 2/2] vdpa: map shadow vrings with MAP_SHARED Eugenio Pérez
1 sibling, 1 reply; 4+ messages in thread
From: Eugenio Pérez @ 2023-06-02 8:53 UTC (permalink / raw)
To: qemu-devel; +Cc: Lei Yang, Jason Wang, Michael S. Tsirkin, Eugenio Pérez
We need to call it from resource cleanup context, as munmap needs the
size of the mappings.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
net/vhost-vdpa.c | 32 ++++++++++++++++----------------
1 file changed, 16 insertions(+), 16 deletions(-)
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 37cdc84562..5fcdc05042 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -116,6 +116,22 @@ VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
return s->vhost_net;
}
+static size_t vhost_vdpa_net_cvq_cmd_len(void)
+{
+ /*
+ * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
+ * In buffer is always 1 byte, so it should fit here
+ */
+ return sizeof(struct virtio_net_ctrl_hdr) +
+ 2 * sizeof(struct virtio_net_ctrl_mac) +
+ MAC_TABLE_ENTRIES * ETH_ALEN;
+}
+
+static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
+{
+ return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
+}
+
static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
{
uint64_t invalid_dev_features =
@@ -422,22 +438,6 @@ static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
vhost_iova_tree_remove(tree, *map);
}
-static size_t vhost_vdpa_net_cvq_cmd_len(void)
-{
- /*
- * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
- * In buffer is always 1 byte, so it should fit here
- */
- return sizeof(struct virtio_net_ctrl_hdr) +
- 2 * sizeof(struct virtio_net_ctrl_mac) +
- MAC_TABLE_ENTRIES * ETH_ALEN;
-}
-
-static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
-{
- return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
-}
-
/** Map CVQ buffer. */
static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
bool write)
--
2.31.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* [PATCH 2/2] vdpa: map shadow vrings with MAP_SHARED
2023-06-02 8:53 [PATCH 0/2] vdpa: map shadow vrings with MAP_SHARED Eugenio Pérez
2023-06-02 8:53 ` [PATCH 1/2] vdpa: reorder vhost_vdpa_net_cvq_cmd_page_len function Eugenio Pérez
@ 2023-06-02 8:53 ` Eugenio Pérez
1 sibling, 0 replies; 4+ messages in thread
From: Eugenio Pérez @ 2023-06-02 8:53 UTC (permalink / raw)
To: qemu-devel; +Cc: Lei Yang, Jason Wang, Michael S. Tsirkin, Eugenio Pérez
The vdpa devices that use va addresses neeeds these maps shared.
Otherwise, vhost_vdpa checks will refuse to accept the maps.
The mmap call will always return a page aligned address, so removing the
qemu_memalign call. Keeping the ROUND_UP for the size as we still need
to DMA-map them in full.
Not applying fixes tag as it never worked with va devices.
Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
---
hw/virtio/vhost-shadow-virtqueue.c | 16 +++++++---------
net/vhost-vdpa.c | 15 +++++++--------
2 files changed, 14 insertions(+), 17 deletions(-)
diff --git a/hw/virtio/vhost-shadow-virtqueue.c b/hw/virtio/vhost-shadow-virtqueue.c
index bd7c12b6d3..1892e2cef1 100644
--- a/hw/virtio/vhost-shadow-virtqueue.c
+++ b/hw/virtio/vhost-shadow-virtqueue.c
@@ -649,7 +649,7 @@ void vhost_svq_set_svq_kick_fd(VhostShadowVirtqueue *svq, int svq_kick_fd)
void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
VirtQueue *vq, VhostIOVATree *iova_tree)
{
- size_t desc_size, driver_size, device_size;
+ size_t desc_size;
event_notifier_set_handler(&svq->hdev_call, vhost_svq_handle_call);
svq->next_guest_avail_elem = NULL;
@@ -662,14 +662,12 @@ void vhost_svq_start(VhostShadowVirtqueue *svq, VirtIODevice *vdev,
svq->vring.num = virtio_queue_get_num(vdev, virtio_get_queue_index(vq));
svq->num_free = svq->vring.num;
- driver_size = vhost_svq_driver_area_size(svq);
- device_size = vhost_svq_device_area_size(svq);
- svq->vring.desc = qemu_memalign(qemu_real_host_page_size(), driver_size);
+ svq->vring.desc = mmap(NULL, vhost_svq_driver_area_size(svq), O_RDWR,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
desc_size = sizeof(vring_desc_t) * svq->vring.num;
svq->vring.avail = (void *)((char *)svq->vring.desc + desc_size);
- memset(svq->vring.desc, 0, driver_size);
- svq->vring.used = qemu_memalign(qemu_real_host_page_size(), device_size);
- memset(svq->vring.used, 0, device_size);
+ svq->vring.used = mmap(NULL, vhost_svq_device_area_size(svq), O_RDWR,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
svq->desc_state = g_new0(SVQDescState, svq->vring.num);
svq->desc_next = g_new0(uint16_t, svq->vring.num);
for (unsigned i = 0; i < svq->vring.num - 1; i++) {
@@ -712,8 +710,8 @@ void vhost_svq_stop(VhostShadowVirtqueue *svq)
svq->vq = NULL;
g_free(svq->desc_next);
g_free(svq->desc_state);
- qemu_vfree(svq->vring.desc);
- qemu_vfree(svq->vring.used);
+ munmap(svq->vring.desc, vhost_svq_driver_area_size(svq));
+ munmap(svq->vring.used, vhost_svq_device_area_size(svq));
event_notifier_set_handler(&svq->hdev_call, NULL);
}
diff --git a/net/vhost-vdpa.c b/net/vhost-vdpa.c
index 5fcdc05042..329b481351 100644
--- a/net/vhost-vdpa.c
+++ b/net/vhost-vdpa.c
@@ -201,8 +201,8 @@ static void vhost_vdpa_cleanup(NetClientState *nc)
{
VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
- qemu_vfree(s->cvq_cmd_out_buffer);
- qemu_vfree(s->status);
+ munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
+ munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
if (s->vhost_net) {
vhost_net_cleanup(s->vhost_net);
g_free(s->vhost_net);
@@ -826,12 +826,11 @@ static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
vhost_vdpa_net_valid_svq_features(features,
&s->vhost_vdpa.migration_blocker);
} else if (!is_datapath) {
- s->cvq_cmd_out_buffer = qemu_memalign(qemu_real_host_page_size(),
- vhost_vdpa_net_cvq_cmd_page_len());
- memset(s->cvq_cmd_out_buffer, 0, vhost_vdpa_net_cvq_cmd_page_len());
- s->status = qemu_memalign(qemu_real_host_page_size(),
- vhost_vdpa_net_cvq_cmd_page_len());
- memset(s->status, 0, vhost_vdpa_net_cvq_cmd_page_len());
+ s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
+ O_RDWR, MAP_SHARED | MAP_ANONYMOUS, -1,
+ 0);
+ s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), O_RDWR,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
s->vhost_vdpa.shadow_vq_ops_opaque = s;
--
2.31.1
^ permalink raw reply related [flat|nested] 4+ messages in thread
* Re: [PATCH 1/2] vdpa: reorder vhost_vdpa_net_cvq_cmd_page_len function
2023-06-02 8:53 ` [PATCH 1/2] vdpa: reorder vhost_vdpa_net_cvq_cmd_page_len function Eugenio Pérez
@ 2023-06-02 9:25 ` Philippe Mathieu-Daudé
0 siblings, 0 replies; 4+ messages in thread
From: Philippe Mathieu-Daudé @ 2023-06-02 9:25 UTC (permalink / raw)
To: Eugenio Pérez, qemu-devel; +Cc: Lei Yang, Jason Wang, Michael S. Tsirkin
On 2/6/23 10:53, Eugenio Pérez wrote:
> We need to call it from resource cleanup context, as munmap needs the
> size of the mappings.
>
> Signed-off-by: Eugenio Pérez <eperezma@redhat.com>
> ---
> net/vhost-vdpa.c | 32 ++++++++++++++++----------------
> 1 file changed, 16 insertions(+), 16 deletions(-)
Reviewed-by: Philippe Mathieu-Daudé <philmd@linaro.org>
^ permalink raw reply [flat|nested] 4+ messages in thread
end of thread, other threads:[~2023-06-02 9:26 UTC | newest]
Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-06-02 8:53 [PATCH 0/2] vdpa: map shadow vrings with MAP_SHARED Eugenio Pérez
2023-06-02 8:53 ` [PATCH 1/2] vdpa: reorder vhost_vdpa_net_cvq_cmd_page_len function Eugenio Pérez
2023-06-02 9:25 ` Philippe Mathieu-Daudé
2023-06-02 8:53 ` [PATCH 2/2] vdpa: map shadow vrings with MAP_SHARED Eugenio Pérez
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.