From: Stefano Stabellini <sstabellini@kernel.org> To: xen-devel@lists.xen.org Cc: linux-kernel@vger.kernel.org, sstabellini@kernel.org, jgross@suse.com, boris.ostrovsky@oracle.com, Stefano Stabellini <stefano@aporeto.com> Subject: [PATCH v8 05/13] xen/pvcalls: implement connect command Date: Mon, 30 Oct 2017 15:40:55 -0700 [thread overview] Message-ID: <1509403263-15414-5-git-send-email-sstabellini@kernel.org> (raw) In-Reply-To: <1509403263-15414-1-git-send-email-sstabellini@kernel.org> Send PVCALLS_CONNECT to the backend. Allocate a new ring and evtchn for the active socket. Introduce fields in struct sock_mapping to keep track of active sockets. Introduce a waitqueue to allow the frontend to wait on data coming from the backend on the active socket (recvmsg command). Two mutexes (one of reads and one for writes) will be used to protect the active socket in and out rings from concurrent accesses. Signed-off-by: Stefano Stabellini <stefano@aporeto.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> CC: boris.ostrovsky@oracle.com CC: jgross@suse.com --- drivers/xen/pvcalls-front.c | 158 ++++++++++++++++++++++++++++++++++++++++++++ drivers/xen/pvcalls-front.h | 2 + 2 files changed, 160 insertions(+) diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 326395d..8d4a43e 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c @@ -59,6 +59,18 @@ struct sock_mapping { bool active_socket; struct list_head list; struct socket *sock; + union { + struct { + int irq; + grant_ref_t ref; + struct pvcalls_data_intf *ring; + struct pvcalls_data data; + struct mutex in_mutex; + struct mutex out_mutex; + + wait_queue_head_t inflight_conn_req; + } active; + }; }; static inline int get_request(struct pvcalls_bedata *bedata, int *req_id) @@ -121,6 +133,18 @@ static void pvcalls_front_free_map(struct pvcalls_bedata *bedata, { } +static irqreturn_t pvcalls_front_conn_handler(int irq, void *sock_map) +{ + struct sock_mapping *map = sock_map; + + if (map == NULL) + return IRQ_HANDLED; + + wake_up_interruptible(&map->active.inflight_conn_req); + + return IRQ_HANDLED; +} + int pvcalls_front_socket(struct socket *sock) { struct pvcalls_bedata *bedata; @@ -196,6 +220,132 @@ int pvcalls_front_socket(struct socket *sock) return ret; } +static int create_active(struct sock_mapping *map, int *evtchn) +{ + void *bytes; + int ret = -ENOMEM, irq = -1, i; + + *evtchn = -1; + init_waitqueue_head(&map->active.inflight_conn_req); + + map->active.ring = (struct pvcalls_data_intf *) + __get_free_page(GFP_KERNEL | __GFP_ZERO); + if (map->active.ring == NULL) + goto out_error; + map->active.ring->ring_order = PVCALLS_RING_ORDER; + bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + PVCALLS_RING_ORDER); + if (bytes == NULL) + goto out_error; + for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) + map->active.ring->ref[i] = gnttab_grant_foreign_access( + pvcalls_front_dev->otherend_id, + pfn_to_gfn(virt_to_pfn(bytes) + i), 0); + + map->active.ref = gnttab_grant_foreign_access( + pvcalls_front_dev->otherend_id, + pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0); + + map->active.data.in = bytes; + map->active.data.out = bytes + + XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); + + ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn); + if (ret) + goto out_error; + irq = bind_evtchn_to_irqhandler(*evtchn, pvcalls_front_conn_handler, + 0, "pvcalls-frontend", map); + if (irq < 0) { + ret = irq; + goto out_error; + } + + map->active.irq = irq; + map->active_socket = true; + mutex_init(&map->active.in_mutex); + mutex_init(&map->active.out_mutex); + + return 0; + +out_error: + if (irq >= 0) + unbind_from_irqhandler(irq, map); + else if (*evtchn >= 0) + xenbus_free_evtchn(pvcalls_front_dev, *evtchn); + kfree(map->active.data.in); + kfree(map->active.ring); + return ret; +} + +int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr, + int addr_len, int flags) +{ + struct pvcalls_bedata *bedata; + struct sock_mapping *map = NULL; + struct xen_pvcalls_request *req; + int notify, req_id, ret, evtchn; + + if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM) + return -EOPNOTSUPP; + + pvcalls_enter(); + if (!pvcalls_front_dev) { + pvcalls_exit(); + return -ENOTCONN; + } + + bedata = dev_get_drvdata(&pvcalls_front_dev->dev); + + map = (struct sock_mapping *)sock->sk->sk_send_head; + if (!map) { + pvcalls_exit(); + return -ENOTSOCK; + } + + spin_lock(&bedata->socket_lock); + ret = get_request(bedata, &req_id); + if (ret < 0) { + spin_unlock(&bedata->socket_lock); + pvcalls_exit(); + return ret; + } + ret = create_active(map, &evtchn); + if (ret < 0) { + spin_unlock(&bedata->socket_lock); + pvcalls_exit(); + return ret; + } + + req = RING_GET_REQUEST(&bedata->ring, req_id); + req->req_id = req_id; + req->cmd = PVCALLS_CONNECT; + req->u.connect.id = (uintptr_t)map; + req->u.connect.len = addr_len; + req->u.connect.flags = flags; + req->u.connect.ref = map->active.ref; + req->u.connect.evtchn = evtchn; + memcpy(req->u.connect.addr, addr, sizeof(*addr)); + + map->sock = sock; + + bedata->ring.req_prod_pvt++; + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify); + spin_unlock(&bedata->socket_lock); + + if (notify) + notify_remote_via_irq(bedata->irq); + + wait_event(bedata->inflight_req, + READ_ONCE(bedata->rsp[req_id].req_id) == req_id); + + /* read req_id, then the content */ + smp_rmb(); + ret = bedata->rsp[req_id].ret; + bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; + pvcalls_exit(); + return ret; +} + static const struct xenbus_device_id pvcalls_front_ids[] = { { "pvcalls" }, { "" } @@ -212,6 +362,14 @@ static int pvcalls_front_remove(struct xenbus_device *dev) if (bedata->irq >= 0) unbind_from_irqhandler(bedata->irq, dev); + list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) { + map->sock->sk->sk_send_head = NULL; + if (map->active_socket) { + map->active.ring->in_error = -EBADF; + wake_up_interruptible(&map->active.inflight_conn_req); + } + } + smp_mb(); while (atomic_read(&pvcalls_refcount) > 0) cpu_relax(); diff --git a/drivers/xen/pvcalls-front.h b/drivers/xen/pvcalls-front.h index b7dabed..63b0417 100644 --- a/drivers/xen/pvcalls-front.h +++ b/drivers/xen/pvcalls-front.h @@ -4,5 +4,7 @@ #include <linux/net.h> int pvcalls_front_socket(struct socket *sock); +int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr, + int addr_len, int flags); #endif -- 1.9.1
WARNING: multiple messages have this Message-ID (diff)
From: Stefano Stabellini <sstabellini@kernel.org> To: xen-devel@lists.xen.org Cc: jgross@suse.com, Stefano Stabellini <stefano@aporeto.com>, boris.ostrovsky@oracle.com, sstabellini@kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH v8 05/13] xen/pvcalls: implement connect command Date: Mon, 30 Oct 2017 15:40:55 -0700 [thread overview] Message-ID: <1509403263-15414-5-git-send-email-sstabellini@kernel.org> (raw) In-Reply-To: <1509403263-15414-1-git-send-email-sstabellini@kernel.org> Send PVCALLS_CONNECT to the backend. Allocate a new ring and evtchn for the active socket. Introduce fields in struct sock_mapping to keep track of active sockets. Introduce a waitqueue to allow the frontend to wait on data coming from the backend on the active socket (recvmsg command). Two mutexes (one of reads and one for writes) will be used to protect the active socket in and out rings from concurrent accesses. Signed-off-by: Stefano Stabellini <stefano@aporeto.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> CC: boris.ostrovsky@oracle.com CC: jgross@suse.com --- drivers/xen/pvcalls-front.c | 158 ++++++++++++++++++++++++++++++++++++++++++++ drivers/xen/pvcalls-front.h | 2 + 2 files changed, 160 insertions(+) diff --git a/drivers/xen/pvcalls-front.c b/drivers/xen/pvcalls-front.c index 326395d..8d4a43e 100644 --- a/drivers/xen/pvcalls-front.c +++ b/drivers/xen/pvcalls-front.c @@ -59,6 +59,18 @@ struct sock_mapping { bool active_socket; struct list_head list; struct socket *sock; + union { + struct { + int irq; + grant_ref_t ref; + struct pvcalls_data_intf *ring; + struct pvcalls_data data; + struct mutex in_mutex; + struct mutex out_mutex; + + wait_queue_head_t inflight_conn_req; + } active; + }; }; static inline int get_request(struct pvcalls_bedata *bedata, int *req_id) @@ -121,6 +133,18 @@ static void pvcalls_front_free_map(struct pvcalls_bedata *bedata, { } +static irqreturn_t pvcalls_front_conn_handler(int irq, void *sock_map) +{ + struct sock_mapping *map = sock_map; + + if (map == NULL) + return IRQ_HANDLED; + + wake_up_interruptible(&map->active.inflight_conn_req); + + return IRQ_HANDLED; +} + int pvcalls_front_socket(struct socket *sock) { struct pvcalls_bedata *bedata; @@ -196,6 +220,132 @@ int pvcalls_front_socket(struct socket *sock) return ret; } +static int create_active(struct sock_mapping *map, int *evtchn) +{ + void *bytes; + int ret = -ENOMEM, irq = -1, i; + + *evtchn = -1; + init_waitqueue_head(&map->active.inflight_conn_req); + + map->active.ring = (struct pvcalls_data_intf *) + __get_free_page(GFP_KERNEL | __GFP_ZERO); + if (map->active.ring == NULL) + goto out_error; + map->active.ring->ring_order = PVCALLS_RING_ORDER; + bytes = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, + PVCALLS_RING_ORDER); + if (bytes == NULL) + goto out_error; + for (i = 0; i < (1 << PVCALLS_RING_ORDER); i++) + map->active.ring->ref[i] = gnttab_grant_foreign_access( + pvcalls_front_dev->otherend_id, + pfn_to_gfn(virt_to_pfn(bytes) + i), 0); + + map->active.ref = gnttab_grant_foreign_access( + pvcalls_front_dev->otherend_id, + pfn_to_gfn(virt_to_pfn((void *)map->active.ring)), 0); + + map->active.data.in = bytes; + map->active.data.out = bytes + + XEN_FLEX_RING_SIZE(PVCALLS_RING_ORDER); + + ret = xenbus_alloc_evtchn(pvcalls_front_dev, evtchn); + if (ret) + goto out_error; + irq = bind_evtchn_to_irqhandler(*evtchn, pvcalls_front_conn_handler, + 0, "pvcalls-frontend", map); + if (irq < 0) { + ret = irq; + goto out_error; + } + + map->active.irq = irq; + map->active_socket = true; + mutex_init(&map->active.in_mutex); + mutex_init(&map->active.out_mutex); + + return 0; + +out_error: + if (irq >= 0) + unbind_from_irqhandler(irq, map); + else if (*evtchn >= 0) + xenbus_free_evtchn(pvcalls_front_dev, *evtchn); + kfree(map->active.data.in); + kfree(map->active.ring); + return ret; +} + +int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr, + int addr_len, int flags) +{ + struct pvcalls_bedata *bedata; + struct sock_mapping *map = NULL; + struct xen_pvcalls_request *req; + int notify, req_id, ret, evtchn; + + if (addr->sa_family != AF_INET || sock->type != SOCK_STREAM) + return -EOPNOTSUPP; + + pvcalls_enter(); + if (!pvcalls_front_dev) { + pvcalls_exit(); + return -ENOTCONN; + } + + bedata = dev_get_drvdata(&pvcalls_front_dev->dev); + + map = (struct sock_mapping *)sock->sk->sk_send_head; + if (!map) { + pvcalls_exit(); + return -ENOTSOCK; + } + + spin_lock(&bedata->socket_lock); + ret = get_request(bedata, &req_id); + if (ret < 0) { + spin_unlock(&bedata->socket_lock); + pvcalls_exit(); + return ret; + } + ret = create_active(map, &evtchn); + if (ret < 0) { + spin_unlock(&bedata->socket_lock); + pvcalls_exit(); + return ret; + } + + req = RING_GET_REQUEST(&bedata->ring, req_id); + req->req_id = req_id; + req->cmd = PVCALLS_CONNECT; + req->u.connect.id = (uintptr_t)map; + req->u.connect.len = addr_len; + req->u.connect.flags = flags; + req->u.connect.ref = map->active.ref; + req->u.connect.evtchn = evtchn; + memcpy(req->u.connect.addr, addr, sizeof(*addr)); + + map->sock = sock; + + bedata->ring.req_prod_pvt++; + RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&bedata->ring, notify); + spin_unlock(&bedata->socket_lock); + + if (notify) + notify_remote_via_irq(bedata->irq); + + wait_event(bedata->inflight_req, + READ_ONCE(bedata->rsp[req_id].req_id) == req_id); + + /* read req_id, then the content */ + smp_rmb(); + ret = bedata->rsp[req_id].ret; + bedata->rsp[req_id].req_id = PVCALLS_INVALID_ID; + pvcalls_exit(); + return ret; +} + static const struct xenbus_device_id pvcalls_front_ids[] = { { "pvcalls" }, { "" } @@ -212,6 +362,14 @@ static int pvcalls_front_remove(struct xenbus_device *dev) if (bedata->irq >= 0) unbind_from_irqhandler(bedata->irq, dev); + list_for_each_entry_safe(map, n, &bedata->socket_mappings, list) { + map->sock->sk->sk_send_head = NULL; + if (map->active_socket) { + map->active.ring->in_error = -EBADF; + wake_up_interruptible(&map->active.inflight_conn_req); + } + } + smp_mb(); while (atomic_read(&pvcalls_refcount) > 0) cpu_relax(); diff --git a/drivers/xen/pvcalls-front.h b/drivers/xen/pvcalls-front.h index b7dabed..63b0417 100644 --- a/drivers/xen/pvcalls-front.h +++ b/drivers/xen/pvcalls-front.h @@ -4,5 +4,7 @@ #include <linux/net.h> int pvcalls_front_socket(struct socket *sock); +int pvcalls_front_connect(struct socket *sock, struct sockaddr *addr, + int addr_len, int flags); #endif -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-10-30 22:43 UTC|newest] Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top 2017-10-30 22:40 [PATCH v8 00/13] introduce the Xen PV Calls frontend Stefano Stabellini 2017-10-30 22:40 ` [PATCH v8 01/13] xen/pvcalls: introduce the pvcalls xenbus frontend Stefano Stabellini 2017-10-30 22:40 ` Stefano Stabellini 2017-10-30 22:40 ` [PATCH v8 02/13] xen/pvcalls: implement frontend disconnect Stefano Stabellini 2017-10-30 22:40 ` Stefano Stabellini 2017-10-30 22:40 ` [PATCH v8 03/13] xen/pvcalls: connect to the backend Stefano Stabellini 2017-10-30 22:40 ` Stefano Stabellini 2017-10-30 22:40 ` [PATCH v8 04/13] xen/pvcalls: implement socket command and handle events Stefano Stabellini 2017-10-30 22:40 ` Stefano Stabellini 2017-10-30 22:40 ` Stefano Stabellini [this message] 2017-10-30 22:40 ` [PATCH v8 05/13] xen/pvcalls: implement connect command Stefano Stabellini 2017-10-30 22:40 ` [PATCH v8 06/13] xen/pvcalls: implement bind command Stefano Stabellini 2017-10-30 22:40 ` Stefano Stabellini 2017-10-30 22:40 ` [PATCH v8 07/13] xen/pvcalls: implement listen command Stefano Stabellini 2017-10-30 22:40 ` Stefano Stabellini 2017-10-30 22:40 ` [PATCH v8 08/13] xen/pvcalls: implement accept command Stefano Stabellini 2017-10-30 22:40 ` Stefano Stabellini 2017-10-30 22:40 ` [PATCH v8 09/13] xen/pvcalls: implement sendmsg Stefano Stabellini 2017-10-30 22:40 ` Stefano Stabellini 2017-10-30 22:41 ` [PATCH v8 10/13] xen/pvcalls: implement recvmsg Stefano Stabellini 2017-10-30 22:41 ` Stefano Stabellini 2017-10-30 22:41 ` [PATCH v8 11/13] xen/pvcalls: implement poll command Stefano Stabellini 2017-10-30 22:41 ` Stefano Stabellini 2017-10-30 22:41 ` [PATCH v8 12/13] xen/pvcalls: implement release command Stefano Stabellini 2017-10-30 22:41 ` Stefano Stabellini 2017-10-30 22:41 ` [PATCH v8 13/13] xen: introduce a Kconfig option to enable the pvcalls frontend Stefano Stabellini 2017-10-30 22:41 ` Stefano Stabellini 2017-10-31 17:50 ` [PATCH v8 00/13] introduce the Xen PV Calls frontend Boris Ostrovsky 2017-10-31 17:50 ` Boris Ostrovsky
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=1509403263-15414-5-git-send-email-sstabellini@kernel.org \ --to=sstabellini@kernel.org \ --cc=boris.ostrovsky@oracle.com \ --cc=jgross@suse.com \ --cc=linux-kernel@vger.kernel.org \ --cc=stefano@aporeto.com \ --cc=xen-devel@lists.xen.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.