From: Jiachen Zhang <zhangjiachen.jaycee@bytedance.com>
To: "Dr . David Alan Gilbert" <dgilbert@redhat.com>,
"Michael S . Tsirkin" <mst@redhat.com>,
Stefan Hajnoczi <stefanha@redhat.com>,
Xie Yongji <xieyongji@bytedance.com>
Cc: virtio-fs@redhat.com,
Jiachen Zhang <zhangjiachen.jaycee@bytedance.com>,
qemu-devel@nongnu.org
Subject: [RFC PATCH 4/9] libvhost-user: Add vhost-user message types for sending shared memory and file fds
Date: Wed, 16 Dec 2020 00:21:14 +0800 [thread overview]
Message-ID: <20201215162119.27360-5-zhangjiachen.jaycee@bytedance.com> (raw)
In-Reply-To: <20201215162119.27360-1-zhangjiachen.jaycee@bytedance.com>
Add libvhost-user support for the 4 new vhost-user messages types:
VHOST_USER_SET_SHM
VHOST_USER_SET_FD
VHOST_USER_SLAVE_SHM
VHOST_USER_SLAVE_FD
Signed-off-by: Jiachen Zhang <zhangjiachen.jaycee@bytedance.com>
Signed-off-by: Xie Yongji <xieyongji@bytedance.com>
---
contrib/libvhost-user/libvhost-user.c | 88 +++++++++++++++++++++++++++
contrib/libvhost-user/libvhost-user.h | 70 +++++++++++++++++++++
2 files changed, 158 insertions(+)
diff --git a/contrib/libvhost-user/libvhost-user.c b/contrib/libvhost-user/libvhost-user.c
index bfec8a881a..8c97013e59 100644
--- a/contrib/libvhost-user/libvhost-user.c
+++ b/contrib/libvhost-user/libvhost-user.c
@@ -140,6 +140,8 @@ vu_request_to_string(unsigned int req)
REQ(VHOST_USER_ADD_MEM_REG),
REQ(VHOST_USER_REM_MEM_REG),
REQ(VHOST_USER_MAX),
+ REQ(VHOST_USER_SET_SHM),
+ REQ(VHOST_USER_SET_FD),
};
#undef REQ
@@ -1718,6 +1720,77 @@ vu_set_inflight_fd(VuDev *dev, VhostUserMsg *vmsg)
return false;
}
+bool vu_slave_send_shm(VuDev *dev, int memfd, uint64_t size, int map_type)
+{
+ VhostUserMsg vmsg = {
+ .request = VHOST_USER_SLAVE_SHM,
+ .flags = VHOST_USER_VERSION,
+ .size = sizeof(VhostUserShm),
+ .payload.shm = {
+ .id = map_type,
+ .size = size,
+ .offset = 0,
+ },
+ };
+
+ vmsg.fd_num = 1;
+ vmsg.fds[0] = memfd;
+
+ if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) {
+ return false;
+ }
+
+ pthread_mutex_lock(&dev->slave_mutex);
+ if (!vu_message_write(dev, dev->slave_fd, &vmsg)) {
+ pthread_mutex_unlock(&dev->slave_mutex);
+ return false;
+ }
+
+ /* Also unlocks the slave_mutex */
+ return vu_process_message_reply(dev, &vmsg);
+}
+
+static bool vu_slave_send_fd(VuDev *dev, int fd, int fd_key, int flag)
+{
+ VhostUserMsg vmsg = {
+ .request = VHOST_USER_SLAVE_FD,
+ .flags = VHOST_USER_VERSION,
+ .size = sizeof(vmsg.payload.fdinfo),
+ };
+
+ vmsg.payload.fdinfo.key = fd_key;
+ vmsg.payload.fdinfo.flag = flag;
+ if (flag == VU_FD_FLAG_ADD) {
+ vmsg.fds[0] = fd;
+ }
+ vmsg.fd_num = 1;
+
+ if (!vu_has_protocol_feature(dev, VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD)) {
+ return false;
+ }
+
+ pthread_mutex_lock(&dev->slave_mutex);
+ if (!vu_message_write(dev, dev->slave_fd, &vmsg)) {
+ pthread_mutex_unlock(&dev->slave_mutex);
+ return false;
+ }
+
+ /* Also unlocks the slave_mutex */
+ bool ret =
+ vu_process_message_reply(dev, &vmsg);
+ return ret;
+}
+
+bool vu_slave_send_fd_add(VuDev *dev, int fd, int fd_key)
+{
+ return vu_slave_send_fd(dev, fd, fd_key, VU_FD_FLAG_ADD);
+}
+
+bool vu_slave_send_fd_del(VuDev *dev, int fd_key)
+{
+ return vu_slave_send_fd(dev, -1, fd_key, VU_FD_FLAG_DEL);
+}
+
static bool
vu_handle_vring_kick(VuDev *dev, VhostUserMsg *vmsg)
{
@@ -1762,6 +1835,9 @@ static bool vu_handle_get_max_memslots(VuDev *dev, VhostUserMsg *vmsg)
return false;
}
+bool (*vu_set_shm_cb)(VuDev *dev, VhostUserMsg *vmsg);
+bool (*vu_set_fd_cb)(VuDev *dev, VhostUserMsg *vmsg);
+
static bool
vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
{
@@ -1852,6 +1928,18 @@ vu_process_message(VuDev *dev, VhostUserMsg *vmsg)
return vu_add_mem_reg(dev, vmsg);
case VHOST_USER_REM_MEM_REG:
return vu_rem_mem_reg(dev, vmsg);
+ case VHOST_USER_SET_SHM:
+ if (vu_set_shm_cb) {
+ return vu_set_shm_cb(dev, vmsg);
+ } else {
+ return false;
+ }
+ case VHOST_USER_SET_FD:
+ if (vu_set_fd_cb) {
+ return vu_set_fd_cb(dev, vmsg);
+ } else {
+ return false;
+ }
default:
vmsg_close_fds(vmsg);
vu_panic(dev, "Unhandled request: %d", vmsg->request);
diff --git a/contrib/libvhost-user/libvhost-user.h b/contrib/libvhost-user/libvhost-user.h
index a1539dbb69..5448dc5818 100644
--- a/contrib/libvhost-user/libvhost-user.h
+++ b/contrib/libvhost-user/libvhost-user.h
@@ -64,6 +64,7 @@ enum VhostUserProtocolFeature {
VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
+ VHOST_USER_PROTOCOL_F_MAP_SHMFD = 17,
VHOST_USER_PROTOCOL_F_MAX
};
@@ -109,6 +110,8 @@ typedef enum VhostUserRequest {
VHOST_USER_GET_MAX_MEM_SLOTS = 36,
VHOST_USER_ADD_MEM_REG = 37,
VHOST_USER_REM_MEM_REG = 38,
+ VHOST_USER_SET_SHM = 41,
+ VHOST_USER_SET_FD = 42,
VHOST_USER_MAX
} VhostUserRequest;
@@ -119,6 +122,8 @@ typedef enum VhostUserSlaveRequest {
VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
VHOST_USER_SLAVE_VRING_CALL = 4,
VHOST_USER_SLAVE_VRING_ERR = 5,
+ VHOST_USER_SLAVE_SHM = 6,
+ VHOST_USER_SLAVE_FD = 7,
VHOST_USER_SLAVE_MAX
} VhostUserSlaveRequest;
@@ -170,6 +175,29 @@ typedef struct VhostUserInflight {
uint16_t queue_size;
} VhostUserInflight;
+#ifndef VU_PERSIST_STRUCTS
+#define VU_PERSIST_STRUCTS
+
+typedef struct VhostUserShm {
+ int id;
+ uint64_t size;
+ uint64_t offset;
+} VhostUserShm;
+
+typedef enum VhostUserFdFlag {
+ VU_FD_FLAG_ADD = 0,
+ VU_FD_FLAG_DEL = 1,
+ VU_FD_FLAG_RESTORE = 2,
+ VU_FD_FLAG_MAX
+} VhostUserFdFlag;
+
+typedef struct VhostUserFd {
+ int key;
+ VhostUserFdFlag flag;
+} VhostUserFd;
+#endif
+
+
#if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__))
# define VU_PACKED __attribute__((gcc_struct, packed))
#else
@@ -197,6 +225,8 @@ typedef struct VhostUserMsg {
VhostUserConfig config;
VhostUserVringArea area;
VhostUserInflight inflight;
+ VhostUserShm shm;
+ VhostUserFd fdinfo;
} payload;
int fds[VHOST_MEMORY_BASELINE_NREGIONS];
@@ -687,4 +717,44 @@ void vu_queue_get_avail_bytes(VuDev *vdev, VuVirtq *vq, unsigned int *in_bytes,
bool vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
unsigned int out_bytes);
+/**
+ * vu_slave_send_shm:
+ * @dev: a VuDev context
+ * @memfd: the shared memory fd to sync with QEMU
+ * @size: shared memory lenth
+ * @map_type: the lo_map type number
+ *
+ * Sync the map_type region that shared with QEMU when memfd or its size
+ * is changed.
+ *
+ * Returns: true on success.
+ */
+bool vu_slave_send_shm(VuDev *dev, int memfd, uint64_t size, int map_type);
+
+/**
+ * vu_slave_send_fd_add:
+ * @dev: a VuDev context
+ * @fd: the fd to send to QEMU
+ * @fd_key: the fingerprint of the fd
+ *
+ * Send a opened file fd to QEMU.
+ *
+ * Returns: true on success.
+ */
+bool vu_slave_send_fd_add(VuDev *dev, int fd, int fd_key);
+
+/**
+ * vu_slave_send_fd_del:
+ * @dev: a VuDev context
+ * @fd_key: the fingerprint of the fd
+ *
+ * Remove a file fd from QEMU.
+ *
+ * Returns: true on success.
+ */
+bool vu_slave_send_fd_del(VuDev *dev, int fd_key);
+
+extern bool (*vu_set_shm_cb)(VuDev *dev, VhostUserMsg *vmsg);
+extern bool (*vu_set_fd_cb)(VuDev *dev, VhostUserMsg *vmsg);
+
#endif /* LIBVHOST_USER_H */
--
2.20.1
next prev parent reply other threads:[~2020-12-15 16:25 UTC|newest]
Thread overview: 30+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-12-15 16:21 [RFC PATCH 0/9] Support for Virtio-fs daemon crash reconnection Jiachen Zhang
2020-12-15 16:21 ` [RFC PATCH 1/9] vhost-user-fs: Add support for reconnection of vhost-user-fs backend Jiachen Zhang
2020-12-15 16:21 ` [RFC PATCH 2/9] vhost: Add vhost-user message types for sending shared memory and file fds Jiachen Zhang
2020-12-15 16:21 ` [RFC PATCH 3/9] vhost-user-fs: Support virtiofsd crash reconnection Jiachen Zhang
2020-12-15 16:21 ` Jiachen Zhang [this message]
2020-12-15 16:21 ` [RFC PATCH 5/9] virtiofsd: Convert the struct lo_map array to a more flatten layout Jiachen Zhang
2020-12-15 16:21 ` [RFC PATCH 6/9] virtiofsd: Add two new options for crash reconnection Jiachen Zhang
2021-02-04 12:08 ` Dr. David Alan Gilbert
2021-02-04 14:16 ` [External] " Jiachen Zhang
2020-12-15 16:21 ` [RFC PATCH 7/9] virtiofsd: Persist/restore lo_map and opened fds to/from QEMU Jiachen Zhang
2020-12-15 16:21 ` [RFC PATCH 8/9] virtiofsd: Ensure crash consistency after reconnection Jiachen Zhang
2020-12-15 16:21 ` [RFC PATCH 9/9] virtiofsd: (work around) Comment qsort in inflight I/O tracking Jiachen Zhang
2021-02-04 12:15 ` Dr. David Alan Gilbert
2021-02-04 14:20 ` [External] " Jiachen Zhang
2020-12-15 22:51 ` [RFC PATCH 0/9] Support for Virtio-fs daemon crash reconnection no-reply
2020-12-16 15:36 ` Marc-André Lureau
2020-12-18 9:39 ` [External] " Jiachen Zhang
2021-03-17 10:05 ` Stefan Hajnoczi
2021-03-17 11:49 ` Christian Schoenebeck
2021-03-17 12:57 ` Jiachen Zhang
2021-03-18 11:58 ` Christian Schoenebeck
2021-03-22 10:54 ` Stefan Hajnoczi
2021-03-23 12:54 ` Christian Schoenebeck
2021-03-23 14:25 ` Stefan Hajnoczi
2021-03-17 12:32 ` Jiachen Zhang
2021-03-22 11:00 ` Stefan Hajnoczi
2021-03-22 20:13 ` [Virtio-fs] " Vivek Goyal
2021-03-23 13:45 ` Stefan Hajnoczi
2021-05-10 14:38 ` Jiachen Zhang
2021-05-13 15:17 ` Stefan Hajnoczi
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201215162119.27360-5-zhangjiachen.jaycee@bytedance.com \
--to=zhangjiachen.jaycee@bytedance.com \
--cc=dgilbert@redhat.com \
--cc=mst@redhat.com \
--cc=qemu-devel@nongnu.org \
--cc=stefanha@redhat.com \
--cc=virtio-fs@redhat.com \
--cc=xieyongji@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).