From: Stefano Stabellini <sstabellini@kernel.org> To: xen-devel@lists.xen.org Cc: linux-kernel@vger.kernel.org, sstabellini@kernel.org, jgross@suse.com, boris.ostrovsky@oracle.com, Stefano Stabellini <stefano@aporeto.com> Subject: [PATCH v2 11/18] xen/pvcalls: implement accept command Date: Fri, 19 May 2017 16:22:52 -0700 [thread overview] Message-ID: <1495236179-27776-11-git-send-email-sstabellini@kernel.org> (raw) In-Reply-To: <1495236179-27776-1-git-send-email-sstabellini@kernel.org> Implement the accept command by calling inet_accept. To avoid blocking in the kernel, call inet_accept(O_NONBLOCK) from a workqueue, which get scheduled on sk_data_ready (for a passive socket, it means that there are connections to accept). Use the reqcopy field to store the request. Accept the new socket from the delayed work function, create a new sock_mapping for it, map the indexes page and data ring, and reply to the other end. Allocate an ioworker for the socket. Only support one outstanding blocking accept request for every socket at any time. Add a field to sock_mapping to remember the passive socket from which an active socket was created. Signed-off-by: Stefano Stabellini <stefano@aporeto.com> CC: boris.ostrovsky@oracle.com CC: jgross@suse.com --- drivers/xen/pvcalls-back.c | 161 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 160 insertions(+), 1 deletion(-) diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index de82bf5..bc641a8 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -66,6 +66,7 @@ struct pvcalls_ioworker { struct sock_mapping { struct list_head list; struct pvcalls_back_priv *priv; + struct sockpass_mapping *sockpass; struct socket *sock; uint64_t id; grant_ref_t ref; @@ -267,10 +268,131 @@ static int pvcalls_back_release(struct xenbus_device *dev, static void __pvcalls_back_accept(struct work_struct *work) { + struct sockpass_mapping *mappass = container_of( + work, struct sockpass_mapping, register_work); + struct sock_mapping *map; + struct pvcalls_ioworker *iow; + struct pvcalls_back_priv *priv; + struct xen_pvcalls_response *rsp; + struct xen_pvcalls_request *req; + void *page = NULL; + int notify; + int ret = -EINVAL; + unsigned long flags; + + priv = mappass->priv; + /* We only need to check the value of "cmd" atomically on read. */ + spin_lock_irqsave(&mappass->copy_lock, flags); + req = &mappass->reqcopy; + if (req->cmd != PVCALLS_ACCEPT) { + spin_unlock_irqrestore(&mappass->copy_lock, flags); + return; + } + spin_unlock_irqrestore(&mappass->copy_lock, flags); + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (map == NULL) { + ret = -ENOMEM; + goto out_error; + } + + map->sock = sock_alloc(); + if (!map->sock) + goto out_error; + + map->ref = req->u.accept.ref; + + map->priv = priv; + map->sockpass = mappass; + map->sock->type = mappass->sock->type; + map->sock->ops = mappass->sock->ops; + map->id = req->u.accept.id_new; + + ret = xenbus_map_ring_valloc(priv->dev, &req->u.accept.ref, 1, &page); + if (ret < 0) + goto out_error; + map->ring = page; + map->ring_order = map->ring->ring_order; + /* first read the order, then map the data ring */ + virt_rmb(); + if (map->ring_order > MAX_RING_ORDER) { + ret = -EFAULT; + goto out_error; + } + ret = xenbus_map_ring_valloc(priv->dev, map->ring->ref, + (1 << map->ring_order), &page); + if (ret < 0) + goto out_error; + map->bytes = page; + + ret = bind_interdomain_evtchn_to_irqhandler(priv->dev->otherend_id, + req->u.accept.evtchn, + pvcalls_back_conn_event, + 0, + "pvcalls-backend", + map); + if (ret < 0) + goto out_error; + map->irq = ret; + + map->data.in = map->bytes; + map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order); + + map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1); + if (!map->ioworker.wq) { + ret = -ENOMEM; + goto out_error; + } + map->ioworker.cpu = get_random_int() % num_online_cpus(); + atomic_set(&map->io, 1); + INIT_WORK(&map->ioworker.register_work, pvcalls_back_ioworker); + + down(&priv->socket_lock); + list_add_tail(&map->list, &priv->socket_mappings); + up(&priv->socket_lock); + + ret = inet_accept(mappass->sock, map->sock, O_NONBLOCK, true); + if (ret == -EAGAIN) + goto out_error; + + write_lock_bh(&map->sock->sk->sk_callback_lock); + map->saved_data_ready = map->sock->sk->sk_data_ready; + map->sock->sk->sk_user_data = map; + map->sock->sk->sk_data_ready = pvcalls_sk_data_ready; + map->sock->sk->sk_state_change = pvcalls_sk_state_change; + write_unlock_bh(&map->sock->sk->sk_callback_lock); + + iow = &map->ioworker; + atomic_inc(&map->read); + atomic_inc(&map->io); + queue_work_on(iow->cpu, iow->wq, &iow->register_work); + +out_error: + if (ret < 0) + pvcalls_back_release_active(priv->dev, priv, map); + + rsp = RING_GET_RESPONSE(&priv->ring, priv->ring.rsp_prod_pvt++); + rsp->req_id = req->req_id; + rsp->cmd = req->cmd; + rsp->u.accept.id = req->u.accept.id; + rsp->ret = ret; + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&priv->ring, notify); + if (notify) + notify_remote_via_irq(priv->irq); + + spin_lock_irqsave(&mappass->copy_lock, flags); + mappass->reqcopy.cmd = 0; + spin_unlock_irqrestore(&mappass->copy_lock, flags); } static void pvcalls_pass_sk_data_ready(struct sock *sock) { + struct sockpass_mapping *mappass = sock->sk_user_data; + + if (mappass == NULL) + return; + + queue_work(mappass->wq, &mappass->register_work); } static int pvcalls_back_bind(struct xenbus_device *dev, @@ -372,7 +494,44 @@ static int pvcalls_back_listen(struct xenbus_device *dev, static int pvcalls_back_accept(struct xenbus_device *dev, struct xen_pvcalls_request *req) { - return 0; + struct pvcalls_back_priv *priv; + struct sockpass_mapping *mappass; + int ret = -EINVAL; + struct xen_pvcalls_response *rsp; + unsigned long flags; + + priv = dev_get_drvdata(&dev->dev); + + mappass = radix_tree_lookup(&priv->socketpass_mappings, + req->u.accept.id); + if (mappass == NULL) + goto out_error; + + /* + * Limitation of the current implementation: only support one + * concurrent accept or poll call on one socket. + */ + spin_lock_irqsave(&mappass->copy_lock, flags); + if (mappass->reqcopy.cmd != 0) { + spin_unlock_irqrestore(&mappass->copy_lock, flags); + ret = -EINTR; + goto out_error; + } + + mappass->reqcopy = *req; + spin_unlock_irqrestore(&mappass->copy_lock, flags); + queue_work(mappass->wq, &mappass->register_work); + + /* Tell the caller we don't need to send back a notification yet */ + return -1; + +out_error: + rsp = RING_GET_RESPONSE(&priv->ring, priv->ring.rsp_prod_pvt++); + rsp->req_id = req->req_id; + rsp->cmd = req->cmd; + rsp->u.accept.id = req->u.accept.id; + rsp->ret = ret; + return ret; } static int pvcalls_back_poll(struct xenbus_device *dev, -- 1.9.1
WARNING: multiple messages have this Message-ID (diff)
From: Stefano Stabellini <sstabellini@kernel.org> To: xen-devel@lists.xen.org Cc: jgross@suse.com, Stefano Stabellini <stefano@aporeto.com>, boris.ostrovsky@oracle.com, sstabellini@kernel.org, linux-kernel@vger.kernel.org Subject: [PATCH v2 11/18] xen/pvcalls: implement accept command Date: Fri, 19 May 2017 16:22:52 -0700 [thread overview] Message-ID: <1495236179-27776-11-git-send-email-sstabellini@kernel.org> (raw) In-Reply-To: <1495236179-27776-1-git-send-email-sstabellini@kernel.org> Implement the accept command by calling inet_accept. To avoid blocking in the kernel, call inet_accept(O_NONBLOCK) from a workqueue, which get scheduled on sk_data_ready (for a passive socket, it means that there are connections to accept). Use the reqcopy field to store the request. Accept the new socket from the delayed work function, create a new sock_mapping for it, map the indexes page and data ring, and reply to the other end. Allocate an ioworker for the socket. Only support one outstanding blocking accept request for every socket at any time. Add a field to sock_mapping to remember the passive socket from which an active socket was created. Signed-off-by: Stefano Stabellini <stefano@aporeto.com> CC: boris.ostrovsky@oracle.com CC: jgross@suse.com --- drivers/xen/pvcalls-back.c | 161 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 160 insertions(+), 1 deletion(-) diff --git a/drivers/xen/pvcalls-back.c b/drivers/xen/pvcalls-back.c index de82bf5..bc641a8 100644 --- a/drivers/xen/pvcalls-back.c +++ b/drivers/xen/pvcalls-back.c @@ -66,6 +66,7 @@ struct pvcalls_ioworker { struct sock_mapping { struct list_head list; struct pvcalls_back_priv *priv; + struct sockpass_mapping *sockpass; struct socket *sock; uint64_t id; grant_ref_t ref; @@ -267,10 +268,131 @@ static int pvcalls_back_release(struct xenbus_device *dev, static void __pvcalls_back_accept(struct work_struct *work) { + struct sockpass_mapping *mappass = container_of( + work, struct sockpass_mapping, register_work); + struct sock_mapping *map; + struct pvcalls_ioworker *iow; + struct pvcalls_back_priv *priv; + struct xen_pvcalls_response *rsp; + struct xen_pvcalls_request *req; + void *page = NULL; + int notify; + int ret = -EINVAL; + unsigned long flags; + + priv = mappass->priv; + /* We only need to check the value of "cmd" atomically on read. */ + spin_lock_irqsave(&mappass->copy_lock, flags); + req = &mappass->reqcopy; + if (req->cmd != PVCALLS_ACCEPT) { + spin_unlock_irqrestore(&mappass->copy_lock, flags); + return; + } + spin_unlock_irqrestore(&mappass->copy_lock, flags); + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (map == NULL) { + ret = -ENOMEM; + goto out_error; + } + + map->sock = sock_alloc(); + if (!map->sock) + goto out_error; + + map->ref = req->u.accept.ref; + + map->priv = priv; + map->sockpass = mappass; + map->sock->type = mappass->sock->type; + map->sock->ops = mappass->sock->ops; + map->id = req->u.accept.id_new; + + ret = xenbus_map_ring_valloc(priv->dev, &req->u.accept.ref, 1, &page); + if (ret < 0) + goto out_error; + map->ring = page; + map->ring_order = map->ring->ring_order; + /* first read the order, then map the data ring */ + virt_rmb(); + if (map->ring_order > MAX_RING_ORDER) { + ret = -EFAULT; + goto out_error; + } + ret = xenbus_map_ring_valloc(priv->dev, map->ring->ref, + (1 << map->ring_order), &page); + if (ret < 0) + goto out_error; + map->bytes = page; + + ret = bind_interdomain_evtchn_to_irqhandler(priv->dev->otherend_id, + req->u.accept.evtchn, + pvcalls_back_conn_event, + 0, + "pvcalls-backend", + map); + if (ret < 0) + goto out_error; + map->irq = ret; + + map->data.in = map->bytes; + map->data.out = map->bytes + XEN_FLEX_RING_SIZE(map->ring_order); + + map->ioworker.wq = alloc_workqueue("pvcalls_io", WQ_UNBOUND, 1); + if (!map->ioworker.wq) { + ret = -ENOMEM; + goto out_error; + } + map->ioworker.cpu = get_random_int() % num_online_cpus(); + atomic_set(&map->io, 1); + INIT_WORK(&map->ioworker.register_work, pvcalls_back_ioworker); + + down(&priv->socket_lock); + list_add_tail(&map->list, &priv->socket_mappings); + up(&priv->socket_lock); + + ret = inet_accept(mappass->sock, map->sock, O_NONBLOCK, true); + if (ret == -EAGAIN) + goto out_error; + + write_lock_bh(&map->sock->sk->sk_callback_lock); + map->saved_data_ready = map->sock->sk->sk_data_ready; + map->sock->sk->sk_user_data = map; + map->sock->sk->sk_data_ready = pvcalls_sk_data_ready; + map->sock->sk->sk_state_change = pvcalls_sk_state_change; + write_unlock_bh(&map->sock->sk->sk_callback_lock); + + iow = &map->ioworker; + atomic_inc(&map->read); + atomic_inc(&map->io); + queue_work_on(iow->cpu, iow->wq, &iow->register_work); + +out_error: + if (ret < 0) + pvcalls_back_release_active(priv->dev, priv, map); + + rsp = RING_GET_RESPONSE(&priv->ring, priv->ring.rsp_prod_pvt++); + rsp->req_id = req->req_id; + rsp->cmd = req->cmd; + rsp->u.accept.id = req->u.accept.id; + rsp->ret = ret; + RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&priv->ring, notify); + if (notify) + notify_remote_via_irq(priv->irq); + + spin_lock_irqsave(&mappass->copy_lock, flags); + mappass->reqcopy.cmd = 0; + spin_unlock_irqrestore(&mappass->copy_lock, flags); } static void pvcalls_pass_sk_data_ready(struct sock *sock) { + struct sockpass_mapping *mappass = sock->sk_user_data; + + if (mappass == NULL) + return; + + queue_work(mappass->wq, &mappass->register_work); } static int pvcalls_back_bind(struct xenbus_device *dev, @@ -372,7 +494,44 @@ static int pvcalls_back_listen(struct xenbus_device *dev, static int pvcalls_back_accept(struct xenbus_device *dev, struct xen_pvcalls_request *req) { - return 0; + struct pvcalls_back_priv *priv; + struct sockpass_mapping *mappass; + int ret = -EINVAL; + struct xen_pvcalls_response *rsp; + unsigned long flags; + + priv = dev_get_drvdata(&dev->dev); + + mappass = radix_tree_lookup(&priv->socketpass_mappings, + req->u.accept.id); + if (mappass == NULL) + goto out_error; + + /* + * Limitation of the current implementation: only support one + * concurrent accept or poll call on one socket. + */ + spin_lock_irqsave(&mappass->copy_lock, flags); + if (mappass->reqcopy.cmd != 0) { + spin_unlock_irqrestore(&mappass->copy_lock, flags); + ret = -EINTR; + goto out_error; + } + + mappass->reqcopy = *req; + spin_unlock_irqrestore(&mappass->copy_lock, flags); + queue_work(mappass->wq, &mappass->register_work); + + /* Tell the caller we don't need to send back a notification yet */ + return -1; + +out_error: + rsp = RING_GET_RESPONSE(&priv->ring, priv->ring.rsp_prod_pvt++); + rsp->req_id = req->req_id; + rsp->cmd = req->cmd; + rsp->u.accept.id = req->u.accept.id; + rsp->ret = ret; + return ret; } static int pvcalls_back_poll(struct xenbus_device *dev, -- 1.9.1 _______________________________________________ Xen-devel mailing list Xen-devel@lists.xen.org https://lists.xen.org/xen-devel
next prev parent reply other threads:[~2017-05-19 23:25 UTC|newest] Thread overview: 69+ messages / expand[flat|nested] mbox.gz Atom feed top 2017-05-19 23:17 [PATCH v2 00/18] introduce the Xen PV Calls backend Stefano Stabellini 2017-05-19 23:22 ` [PATCH v2 01/18] xen: introduce the pvcalls interface header Stefano Stabellini 2017-05-19 23:22 ` Stefano Stabellini 2017-05-19 23:22 ` [PATCH v2 02/18] xen/pvcalls: introduce the pvcalls xenbus backend Stefano Stabellini 2017-05-25 22:04 ` Boris Ostrovsky 2017-05-25 22:04 ` Boris Ostrovsky 2017-05-19 23:22 ` Stefano Stabellini 2017-05-19 23:22 ` [PATCH v2 03/18] xen/pvcalls: initialize the module and register the " Stefano Stabellini 2017-05-19 23:22 ` Stefano Stabellini 2017-05-25 22:12 ` Boris Ostrovsky 2017-05-25 22:12 ` Boris Ostrovsky 2017-05-19 23:22 ` [PATCH v2 04/18] xen/pvcalls: xenbus state handling Stefano Stabellini 2017-05-19 23:22 ` Stefano Stabellini 2017-05-26 0:12 ` Boris Ostrovsky 2017-05-26 0:12 ` Boris Ostrovsky 2017-06-01 20:54 ` Stefano Stabellini 2017-06-01 20:54 ` Stefano Stabellini 2017-05-19 23:22 ` [PATCH v2 05/18] xen/pvcalls: connect to a frontend Stefano Stabellini 2017-05-19 23:22 ` Stefano Stabellini 2017-05-26 15:23 ` Boris Ostrovsky 2017-05-26 15:23 ` Boris Ostrovsky 2017-06-01 21:06 ` Stefano Stabellini 2017-06-01 21:06 ` Stefano Stabellini 2017-05-19 23:22 ` [PATCH v2 06/18] xen/pvcalls: handle commands from the frontend Stefano Stabellini 2017-05-19 23:22 ` Stefano Stabellini 2017-05-26 15:32 ` Boris Ostrovsky 2017-05-26 15:32 ` Boris Ostrovsky 2017-06-02 18:21 ` Stefano Stabellini 2017-06-02 18:21 ` Stefano Stabellini 2017-05-19 23:22 ` [PATCH v2 07/18] xen/pvcalls: implement socket command Stefano Stabellini 2017-05-19 23:22 ` Stefano Stabellini 2017-05-26 15:53 ` Boris Ostrovsky 2017-05-26 15:53 ` Boris Ostrovsky 2017-06-02 18:35 ` Stefano Stabellini 2017-06-02 18:35 ` Stefano Stabellini 2017-05-26 18:58 ` Boris Ostrovsky 2017-05-26 18:58 ` Boris Ostrovsky 2017-06-02 18:41 ` Stefano Stabellini 2017-06-02 18:41 ` Stefano Stabellini 2017-05-19 23:22 ` [PATCH v2 08/18] xen/pvcalls: implement connect command Stefano Stabellini 2017-05-19 23:22 ` Stefano Stabellini 2017-05-19 23:22 ` [PATCH v2 09/18] xen/pvcalls: implement bind command Stefano Stabellini 2017-05-19 23:22 ` Stefano Stabellini 2017-05-19 23:22 ` [PATCH v2 10/18] xen/pvcalls: implement listen command Stefano Stabellini 2017-05-19 23:22 ` Stefano Stabellini 2017-05-19 23:22 ` Stefano Stabellini [this message] 2017-05-19 23:22 ` [PATCH v2 11/18] xen/pvcalls: implement accept command Stefano Stabellini 2017-05-26 18:18 ` Boris Ostrovsky 2017-05-26 18:18 ` Boris Ostrovsky 2017-06-02 19:19 ` Stefano Stabellini 2017-06-02 19:19 ` Stefano Stabellini 2017-05-19 23:22 ` [PATCH v2 12/18] xen/pvcalls: implement poll command Stefano Stabellini 2017-05-19 23:22 ` Stefano Stabellini 2017-05-19 23:22 ` [PATCH v2 13/18] xen/pvcalls: implement release command Stefano Stabellini 2017-05-19 23:22 ` Stefano Stabellini 2017-05-19 23:22 ` [PATCH v2 14/18] xen/pvcalls: disconnect and module_exit Stefano Stabellini 2017-05-19 23:22 ` Stefano Stabellini 2017-05-19 23:22 ` [PATCH v2 15/18] xen/pvcalls: implement the ioworker functions Stefano Stabellini 2017-05-19 23:22 ` Stefano Stabellini 2017-05-19 23:22 ` [PATCH v2 16/18] xen/pvcalls: implement read Stefano Stabellini 2017-05-19 23:22 ` Stefano Stabellini 2017-05-19 23:22 ` [PATCH v2 17/18] xen/pvcalls: implement write Stefano Stabellini 2017-05-19 23:22 ` Stefano Stabellini 2017-05-19 23:22 ` [PATCH v2 18/18] xen: introduce a Kconfig option to enable the pvcalls backend Stefano Stabellini 2017-05-19 23:22 ` Stefano Stabellini 2017-05-25 22:02 ` [PATCH v2 01/18] xen: introduce the pvcalls interface header Boris Ostrovsky 2017-06-01 20:54 ` Stefano Stabellini 2017-06-01 20:54 ` Stefano Stabellini 2017-05-25 22:02 ` Boris Ostrovsky
Reply instructions: You may reply publicly to this message via plain-text email using any one of the following methods: * Save the following mbox file, import it into your mail client, and reply-to-all from there: mbox Avoid top-posting and favor interleaved quoting: https://en.wikipedia.org/wiki/Posting_style#Interleaved_style * Reply using the --to, --cc, and --in-reply-to switches of git-send-email(1): git send-email \ --in-reply-to=1495236179-27776-11-git-send-email-sstabellini@kernel.org \ --to=sstabellini@kernel.org \ --cc=boris.ostrovsky@oracle.com \ --cc=jgross@suse.com \ --cc=linux-kernel@vger.kernel.org \ --cc=stefano@aporeto.com \ --cc=xen-devel@lists.xen.org \ /path/to/YOUR_REPLY https://kernel.org/pub/software/scm/git/docs/git-send-email.html * If your mail client supports setting the In-Reply-To header via mailto: links, try the mailto: linkBe sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes, see mirroring instructions on how to clone and mirror all data and code used by this external index.