From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-9.7 required=3.0 tests=DATE_IN_FUTURE_06_12, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY, SPF_HELO_NONE,SPF_PASS,URIBL_BLOCKED,USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id DA7ECC3A5A6 for ; Thu, 29 Aug 2019 06:27:41 +0000 (UTC) Received: from dpdk.org (dpdk.org [92.243.14.124]) by mail.kernel.org (Postfix) with ESMTP id 49DDA2189D for ; Thu, 29 Aug 2019 06:27:41 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 49DDA2189D Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=intel.com Authentication-Results: mail.kernel.org; spf=pass smtp.mailfrom=dev-bounces@dpdk.org Received: from [92.243.14.124] (localhost [127.0.0.1]) by dpdk.org (Postfix) with ESMTP id EF1251D176; Thu, 29 Aug 2019 08:27:10 +0200 (CEST) Received: from mga05.intel.com (mga05.intel.com [192.55.52.43]) by dpdk.org (Postfix) with ESMTP id 0ABD41D172 for ; Thu, 29 Aug 2019 08:27:08 +0200 (CEST) X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by fmsmga105.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 28 Aug 2019 23:27:08 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,442,1559545200"; d="scan'208";a="182252152" Received: from storage36.sh.intel.com ([10.67.110.166]) by fmsmga007.fm.intel.com with ESMTP; 28 Aug 2019 23:27:07 -0700 From: JinYu To: dev@dpdk.org Cc: changpeng.liu@intel.com, maxime.coquelin@redhat.com, tiwei.bie@intel.com, zhihong.wang@intel.com, JinYu , Lin Li , Xun Ni , Yu Zhang Date: Thu, 29 Aug 2019 22:12:19 +0800 Message-Id: <20190829141224.49700-6-jin.yu@intel.com> X-Mailer: git-send-email 2.17.2 In-Reply-To: <20190829141224.49700-1-jin.yu@intel.com> References: <20190806182500.22320> <20190829141224.49700-1-jin.yu@intel.com> Subject: [dpdk-dev] [PATCH v6 05/10] vhost: checkout and cleanup the resubmit inflight information X-BeenThere: dev@dpdk.org X-Mailman-Version: 2.1.15 Precedence: list List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch shows how to checkout the inflight ring and construct the resubmit information also include destroying resubmit info. Signed-off-by: Lin Li Signed-off-by: Xun Ni Signed-off-by: Yu Zhang Signed-off-by: Jin Yu --- lib/librte_vhost/rte_vhost.h | 19 ++++ lib/librte_vhost/vhost.c | 29 ++++- lib/librte_vhost/vhost.h | 9 ++ lib/librte_vhost/vhost_user.c | 197 ++++++++++++++++++++++++++++++++++ 4 files changed, 253 insertions(+), 1 deletion(-) diff --git a/lib/librte_vhost/rte_vhost.h b/lib/librte_vhost/rte_vhost.h index 1d6b7a1cf..e090cdfee 100644 --- a/lib/librte_vhost/rte_vhost.h +++ b/lib/librte_vhost/rte_vhost.h @@ -145,6 +145,25 @@ struct inflight_info_packed { struct inflight_desc_packed desc[0]; }; +struct rte_vhost_resubmit_desc { + uint16_t index; + uint64_t counter; +}; + +struct rte_vhost_resubmit_info { + struct rte_vhost_resubmit_desc *resubmit_list; + uint16_t resubmit_num; +}; + +struct rte_vhost_ring_inflight { + union { + struct inflight_info_split *inflight_split; + struct inflight_info_packed *inflight_packed; + }; + + struct rte_vhost_resubmit_info *resubmit_inflight; +}; + struct rte_vhost_vring { union { struct vring_desc *desc; diff --git a/lib/librte_vhost/vhost.c b/lib/librte_vhost/vhost.c index 981837b5d..660ac2a07 100644 --- a/lib/librte_vhost/vhost.c +++ b/lib/librte_vhost/vhost.c @@ -242,6 +242,31 @@ cleanup_vq(struct vhost_virtqueue *vq, int destroy) close(vq->kickfd); } +void +cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq) +{ + if (!(dev->protocol_features & + (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))) + return; + + if (vq_is_packed(dev)) { + if (vq->inflight_packed) + vq->inflight_packed = NULL; + } else { + if (vq->inflight_split) + vq->inflight_split = NULL; + } + + if (vq->resubmit_inflight) { + if (vq->resubmit_inflight->resubmit_list) { + free(vq->resubmit_inflight->resubmit_list); + vq->resubmit_inflight->resubmit_list = NULL; + } + free(vq->resubmit_inflight); + vq->resubmit_inflight = NULL; + } +} + /* * Unmap any memory, close any file descriptors and * free any memory owned by a device. @@ -253,8 +278,10 @@ cleanup_device(struct virtio_net *dev, int destroy) vhost_backend_cleanup(dev); - for (i = 0; i < dev->nr_vring; i++) + for (i = 0; i < dev->nr_vring; i++) { cleanup_vq(dev->virtqueue[i], destroy); + cleanup_vq_inflight(dev, dev->virtqueue[i]); + } } void diff --git a/lib/librte_vhost/vhost.h b/lib/librte_vhost/vhost.h index 39f645b97..0aa8f82cd 100644 --- a/lib/librte_vhost/vhost.h +++ b/lib/librte_vhost/vhost.h @@ -128,6 +128,14 @@ struct vhost_virtqueue { /* Physical address of used ring, for logging */ uint64_t log_guest_addr; + /* inflight share memory info */ + union { + struct inflight_info_split *inflight_split; + struct inflight_info_packed *inflight_packed; + }; + struct rte_vhost_resubmit_info *resubmit_inflight; + uint64_t global_counter; + uint16_t nr_zmbuf; uint16_t zmbuf_size; uint16_t last_zmbuf_idx; @@ -474,6 +482,7 @@ void vhost_destroy_device(int); void vhost_destroy_device_notify(struct virtio_net *dev); void cleanup_vq(struct vhost_virtqueue *vq, int destroy); +void cleanup_vq_inflight(struct virtio_net *dev, struct vhost_virtqueue *vq); void free_vq(struct virtio_net *dev, struct vhost_virtqueue *vq); int alloc_vring_queue(struct virtio_net *dev, uint32_t vring_idx); diff --git a/lib/librte_vhost/vhost_user.c b/lib/librte_vhost/vhost_user.c index 647929234..72be891b0 100644 --- a/lib/librte_vhost/vhost_user.c +++ b/lib/librte_vhost/vhost_user.c @@ -327,6 +327,7 @@ vhost_user_set_features(struct virtio_net **pdev, struct VhostUserMsg *msg, dev->virtqueue[dev->nr_vring] = NULL; cleanup_vq(vq, 1); + cleanup_vq_inflight(dev, vq); free_vq(dev, vq); } } @@ -1441,6 +1442,188 @@ static int vhost_user_set_vring_err(struct virtio_net **pdev __rte_unused, return RTE_VHOST_MSG_RESULT_OK; } +static int +resubmit_desc_compare(const void *a, const void *b) +{ + const struct rte_vhost_resubmit_desc *desc0 = + (const struct rte_vhost_resubmit_desc *)a; + const struct rte_vhost_resubmit_desc *desc1 = + (const struct rte_vhost_resubmit_desc *)b; + + if (desc1->counter > desc0->counter && + (desc1->counter - desc0->counter) < VIRTQUEUE_MAX_SIZE * 2) + return 1; + + return -1; +} + +static int +vhost_check_queue_inflights_split(struct virtio_net *dev, + struct vhost_virtqueue *vq) +{ + uint16_t i = 0; + uint16_t resubmit_num = 0, last_io, num; + struct vring_used *used = vq->used; + struct rte_vhost_resubmit_info *resubmit = NULL; + struct inflight_info_split *inflight_split; + + if (!(dev->protocol_features & + (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))) + return RTE_VHOST_MSG_RESULT_OK; + + if ((!vq->inflight_split)) + return RTE_VHOST_MSG_RESULT_ERR; + + if (!vq->inflight_split->version) { + vq->inflight_split->version = INFLIGHT_VERSION; + return RTE_VHOST_MSG_RESULT_OK; + } + + inflight_split = vq->inflight_split; + vq->resubmit_inflight = NULL; + vq->global_counter = 0; + last_io = inflight_split->last_inflight_io; + + if (inflight_split->used_idx != used->idx) { + inflight_split->desc[last_io].inflight = 0; + rte_compiler_barrier(); + inflight_split->used_idx = used->idx; + } + + for (i = 0; i < inflight_split->desc_num; i++) { + if (inflight_split->desc[i].inflight == 1) + resubmit_num++; + } + + vq->last_avail_idx += resubmit_num; + + if (resubmit_num) { + resubmit = calloc(1, sizeof(struct rte_vhost_resubmit_info)); + if (!resubmit) { + RTE_LOG(ERR, VHOST_CONFIG, + "Failed to allocate memory for resubmit info.\n"); + return RTE_VHOST_MSG_RESULT_ERR; + } + + resubmit->resubmit_list = calloc(resubmit_num, + sizeof(struct rte_vhost_resubmit_desc)); + if (!resubmit->resubmit_list) { + RTE_LOG(ERR, VHOST_CONFIG, + "Failed to allocate memory for inflight desc.\n"); + return RTE_VHOST_MSG_RESULT_ERR; + } + + num = 0; + for (i = 0; i < vq->inflight_split->desc_num; i++) { + if (vq->inflight_split->desc[i].inflight == 1) { + resubmit->resubmit_list[num].index = i; + resubmit->resubmit_list[num].counter = + inflight_split->desc[i].counter; + num++; + } + } + resubmit->resubmit_num = num; + + if (resubmit->resubmit_num > 1) + qsort(resubmit->resubmit_list, resubmit->resubmit_num, + sizeof(struct rte_vhost_resubmit_desc), + resubmit_desc_compare); + + vq->global_counter = resubmit->resubmit_list[0].counter + 1; + vq->resubmit_inflight = resubmit; + } + + return RTE_VHOST_MSG_RESULT_OK; +} + +static int +vhost_check_queue_inflights_packed(struct virtio_net *dev, + struct vhost_virtqueue *vq) +{ + uint16_t i = 0; + uint16_t resubmit_num = 0, old_used_idx, num; + struct rte_vhost_resubmit_info *resubmit = NULL; + struct inflight_info_packed *inflight_packed; + + if (!(dev->protocol_features & + (1ULL << VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD))) + return RTE_VHOST_MSG_RESULT_OK; + + if (!vq->inflight_packed->version) { + vq->inflight_packed->version = INFLIGHT_VERSION; + return RTE_VHOST_MSG_RESULT_OK; + } + + if ((!vq->inflight_packed)) + return RTE_VHOST_MSG_RESULT_ERR; + + inflight_packed = vq->inflight_packed; + vq->resubmit_inflight = NULL; + vq->global_counter = 0; + old_used_idx = inflight_packed->old_used_idx; + + if (inflight_packed->used_idx != old_used_idx) { + if (inflight_packed->desc[old_used_idx].inflight == 0) { + inflight_packed->old_used_idx = + inflight_packed->used_idx; + inflight_packed->old_used_wrap_counter = + inflight_packed->used_wrap_counter; + inflight_packed->old_free_head = + inflight_packed->free_head; + } else { + inflight_packed->used_idx = + inflight_packed->old_used_idx; + inflight_packed->used_wrap_counter = + inflight_packed->old_used_wrap_counter; + inflight_packed->free_head = + inflight_packed->old_free_head; + } + } + + for (i = 0; i < inflight_packed->desc_num; i++) { + if (inflight_packed->desc[i].inflight == 1) + resubmit_num++; + } + + if (resubmit_num) { + resubmit = calloc(1, sizeof(struct rte_vhost_resubmit_info)); + if (resubmit == NULL) { + RTE_LOG(ERR, VHOST_CONFIG, + "Failed to allocate memory for resubmit info.\n"); + return RTE_VHOST_MSG_RESULT_ERR; + } + + resubmit->resubmit_list = calloc(resubmit_num, + sizeof(struct rte_vhost_resubmit_desc)); + if (resubmit->resubmit_list == NULL) { + RTE_LOG(ERR, VHOST_CONFIG, + "Failed to allocate memory for resubmit desc.\n"); + return RTE_VHOST_MSG_RESULT_ERR; + } + + num = 0; + for (i = 0; i < inflight_packed->desc_num; i++) { + if (vq->inflight_packed->desc[i].inflight == 1) { + resubmit->resubmit_list[num].index = i; + resubmit->resubmit_list[num].counter = + inflight_packed->desc[i].counter; + num++; + } + } + resubmit->resubmit_num = num; + + if (resubmit->resubmit_num > 1) + qsort(resubmit->resubmit_list, resubmit->resubmit_num, + sizeof(struct rte_vhost_resubmit_desc), + resubmit_desc_compare); + + vq->global_counter = resubmit->resubmit_list[0].counter + 1; + vq->resubmit_inflight = resubmit; + } + + return RTE_VHOST_MSG_RESULT_OK; +} + static int vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg, int main_fd __rte_unused) @@ -1482,6 +1665,20 @@ vhost_user_set_vring_kick(struct virtio_net **pdev, struct VhostUserMsg *msg, close(vq->kickfd); vq->kickfd = file.fd; + if (vq_is_packed(dev)) { + if (vhost_check_queue_inflights_packed(dev, vq)) { + RTE_LOG(ERR, VHOST_CONFIG, + "Failed to inflights for vq: %d\n", file.index); + return RTE_VHOST_MSG_RESULT_ERR; + } + } else { + if (vhost_check_queue_inflights_split(dev, vq)) { + RTE_LOG(ERR, VHOST_CONFIG, + "Failed to inflights for vq: %d\n", file.index); + return RTE_VHOST_MSG_RESULT_ERR; + } + } + return RTE_VHOST_MSG_RESULT_OK; } -- 2.17.2