All of lore.kernel.org
 help / color / mirror / Atom feed
From: xuan.ding@intel.com
To: maxime.coquelin@redhat.com, chenbo.xia@intel.com
Cc: dev@dpdk.org, jiayu.hu@intel.com, xingguang.he@intel.com,
	yvonnex.yang@intel.com, cheng1.jiang@intel.com,
	yuanx.wang@intel.com, wenwux.ma@intel.com,
	Xuan Ding <xuan.ding@intel.com>
Subject: [PATCH v3 1/2] vhost: introduce DMA vchannel unconfiguration
Date: Thu, 29 Sep 2022 01:32:42 +0000	[thread overview]
Message-ID: <20220929013243.15889-2-xuan.ding@intel.com> (raw)
In-Reply-To: <20220929013243.15889-1-xuan.ding@intel.com>

From: Xuan Ding <xuan.ding@intel.com>

This patch adds a new API rte_vhost_async_dma_unconfigure() to unconfigure
DMA vchannels in vhost async data path.

Lock protection are also added to protect DMA vchannels configuration and
unconfiguration from concurrent calls.

Signed-off-by: Xuan Ding <xuan.ding@intel.com>
---
 doc/guides/prog_guide/vhost_lib.rst    |  6 +++
 doc/guides/rel_notes/release_22_11.rst |  3 ++
 lib/vhost/rte_vhost_async.h            | 18 +++++++
 lib/vhost/version.map                  |  3 ++
 lib/vhost/vhost.c                      | 69 ++++++++++++++++++++++++--
 5 files changed, 94 insertions(+), 5 deletions(-)

diff --git a/doc/guides/prog_guide/vhost_lib.rst b/doc/guides/prog_guide/vhost_lib.rst
index bad4d819e1..d3cef978d0 100644
--- a/doc/guides/prog_guide/vhost_lib.rst
+++ b/doc/guides/prog_guide/vhost_lib.rst
@@ -323,6 +323,12 @@ The following is an overview of some key Vhost API functions:
   Get device type of vDPA device, such as VDPA_DEVICE_TYPE_NET,
   VDPA_DEVICE_TYPE_BLK.
 
+* ``rte_vhost_async_dma_unconfigure(dma_id, vchan_id)``
+
+  Clean DMA vChannels finished to use. This function needs to
+  be called after the deregistration of async DMA vchannel
+  has been finished.
+
 Vhost-user Implementations
 --------------------------
 
diff --git a/doc/guides/rel_notes/release_22_11.rst b/doc/guides/rel_notes/release_22_11.rst
index 684bf74596..a641d8a6b8 100644
--- a/doc/guides/rel_notes/release_22_11.rst
+++ b/doc/guides/rel_notes/release_22_11.rst
@@ -67,6 +67,9 @@ New Features
 
   * Added support to set device link down/up.
 
+* **Added DMA vChannel unconfiguration for async vhost.**
+
+  * Added support to unconfigure DMA vChannels that have been unregistered.
 
 Removed Items
 -------------
diff --git a/lib/vhost/rte_vhost_async.h b/lib/vhost/rte_vhost_async.h
index 1db2a10124..6ee4f7258d 100644
--- a/lib/vhost/rte_vhost_async.h
+++ b/lib/vhost/rte_vhost_async.h
@@ -266,6 +266,24 @@ rte_vhost_async_try_dequeue_burst(int vid, uint16_t queue_id,
 	struct rte_mempool *mbuf_pool, struct rte_mbuf **pkts, uint16_t count,
 	int *nr_inflight, int16_t dma_id, uint16_t vchan_id);
 
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change, or be removed, without prior notice.
+ *
+ * Unconfigure DMA vChannels in asynchronous data path.
+ * This function should be called after the DMA vChannel has been unregistered.
+ *
+ * @param dma_id
+ *  the identifier of DMA device
+ * @param vchan_id
+ *  the identifier of virtual DMA channel
+ * @return
+ *  0 on success, and -1 on failure
+ */
+__rte_experimental
+int
+rte_vhost_async_dma_unconfigure(int16_t dma_id, uint16_t vchan_id);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/lib/vhost/version.map b/lib/vhost/version.map
index 18574346d5..013a6bcc42 100644
--- a/lib/vhost/version.map
+++ b/lib/vhost/version.map
@@ -96,6 +96,9 @@ EXPERIMENTAL {
 	rte_vhost_async_try_dequeue_burst;
 	rte_vhost_driver_get_vdpa_dev_type;
 	rte_vhost_clear_queue;
+
+	# added in 22.11
+	rte_vhost_async_dma_unconfigure;
 };
 
 INTERNAL {
diff --git a/lib/vhost/vhost.c b/lib/vhost/vhost.c
index aa671f47a3..f0f337bf5b 100644
--- a/lib/vhost/vhost.c
+++ b/lib/vhost/vhost.c
@@ -23,6 +23,7 @@
 
 struct virtio_net *vhost_devices[RTE_MAX_VHOST_DEVICE];
 pthread_mutex_t vhost_dev_lock = PTHREAD_MUTEX_INITIALIZER;
+static rte_spinlock_t vhost_dma_lock = RTE_SPINLOCK_INITIALIZER;
 
 struct vhost_vq_stats_name_off {
 	char name[RTE_VHOST_STATS_NAME_SIZE];
@@ -1850,19 +1851,20 @@ rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id)
 	void *pkts_cmpl_flag_addr;
 	uint16_t max_desc;
 
+	rte_spinlock_lock(&vhost_dma_lock);
 	if (!rte_dma_is_valid(dma_id)) {
 		VHOST_LOG_CONFIG("dma", ERR, "DMA %d is not found.\n", dma_id);
-		return -1;
+		goto error;
 	}
 
 	if (rte_dma_info_get(dma_id, &info) != 0) {
 		VHOST_LOG_CONFIG("dma", ERR, "Fail to get DMA %d information.\n", dma_id);
-		return -1;
+		goto error;
 	}
 
 	if (vchan_id >= info.max_vchans) {
 		VHOST_LOG_CONFIG("dma", ERR, "Invalid DMA %d vChannel %u.\n", dma_id, vchan_id);
-		return -1;
+		goto error;
 	}
 
 	if (!dma_copy_track[dma_id].vchans) {
@@ -1874,7 +1876,7 @@ rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id)
 			VHOST_LOG_CONFIG("dma", ERR,
 				"Failed to allocate vchans for DMA %d vChannel %u.\n",
 				dma_id, vchan_id);
-			return -1;
+			goto error;
 		}
 
 		dma_copy_track[dma_id].vchans = vchans;
@@ -1883,6 +1885,7 @@ rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id)
 	if (dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr) {
 		VHOST_LOG_CONFIG("dma", INFO, "DMA %d vChannel %u already registered.\n",
 			dma_id, vchan_id);
+		rte_spinlock_unlock(&vhost_dma_lock);
 		return 0;
 	}
 
@@ -1900,7 +1903,7 @@ rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id)
 			rte_free(dma_copy_track[dma_id].vchans);
 			dma_copy_track[dma_id].vchans = NULL;
 		}
-		return -1;
+		goto error;
 	}
 
 	dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr = pkts_cmpl_flag_addr;
@@ -1908,7 +1911,12 @@ rte_vhost_async_dma_configure(int16_t dma_id, uint16_t vchan_id)
 	dma_copy_track[dma_id].vchans[vchan_id].ring_mask = max_desc - 1;
 	dma_copy_track[dma_id].nr_vchans++;
 
+	rte_spinlock_unlock(&vhost_dma_lock);
 	return 0;
+
+error:
+	rte_spinlock_unlock(&vhost_dma_lock);
+	return -1;
 }
 
 int
@@ -2097,5 +2105,56 @@ int rte_vhost_vring_stats_reset(int vid, uint16_t queue_id)
 	return 0;
 }
 
+int
+rte_vhost_async_dma_unconfigure(int16_t dma_id, uint16_t vchan_id)
+{
+	struct rte_dma_info info;
+	uint16_t max_desc;
+	int i;
+
+	rte_spinlock_lock(&vhost_dma_lock);
+	if (!rte_dma_is_valid(dma_id)) {
+		VHOST_LOG_CONFIG("dma", ERR, "DMA %d is not found.\n", dma_id);
+		goto error;
+	}
+
+	if (rte_dma_info_get(dma_id, &info) != 0) {
+		VHOST_LOG_CONFIG("dma", ERR, "Fail to get DMA %d information.\n", dma_id);
+		goto error;
+	}
+
+	if (vchan_id >= info.max_vchans) {
+		VHOST_LOG_CONFIG("dma", ERR, "Invalid DMA %d vChannel %u.\n", dma_id, vchan_id);
+		goto error;
+	}
+
+	max_desc = info.max_desc;
+	for (i = 0; i < max_desc; i++) {
+		if (dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr[i] != NULL) {
+			rte_free(dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr[i]);
+			dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr[i] = NULL;
+		}
+	}
+
+	if (dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr != NULL) {
+		rte_free(dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr);
+		dma_copy_track[dma_id].vchans[vchan_id].pkts_cmpl_flag_addr = NULL;
+	}
+
+	if (dma_copy_track[dma_id].vchans != NULL) {
+		rte_free(dma_copy_track[dma_id].vchans);
+		dma_copy_track[dma_id].vchans = NULL;
+	}
+
+	dma_copy_track[dma_id].nr_vchans--;
+
+	rte_spinlock_unlock(&vhost_dma_lock);
+	return 0;
+
+error:
+	rte_spinlock_unlock(&vhost_dma_lock);
+	return -1;
+}
+
 RTE_LOG_REGISTER_SUFFIX(vhost_config_log_level, config, INFO);
 RTE_LOG_REGISTER_SUFFIX(vhost_data_log_level, data, WARNING);
-- 
2.17.1


  reply	other threads:[~2022-09-29  1:33 UTC|newest]

Thread overview: 43+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-08-14 14:04 [PATCH v1 0/2] vhost: introduce DMA vchannel unconfiguration xuan.ding
2022-08-14 14:04 ` [PATCH v1 1/2] " xuan.ding
2022-08-14 14:04 ` [PATCH v1 2/2] example/vhost: unconfigure DMA vchannel xuan.ding
2022-09-06  5:21 ` [PATCH v2 0/2] vhost: introduce DMA vchannel unconfiguration xuan.ding
2022-09-06  5:21   ` [PATCH v2 1/2] " xuan.ding
2022-09-26  6:06     ` Xia, Chenbo
2022-09-26  6:43       ` Ding, Xuan
2022-09-06  5:21   ` [PATCH v2 2/2] examples/vhost: unconfigure DMA vchannel xuan.ding
2022-09-29  1:32 ` [PATCH v3 0/2] vhost: introduce DMA vchannel unconfiguration xuan.ding
2022-09-29  1:32   ` xuan.ding [this message]
2022-09-29  1:32   ` [PATCH v3 2/2] examples/vhost: unconfigure DMA vchannel xuan.ding
2022-09-29  8:27     ` Xia, Chenbo
2022-10-08  0:38       ` Ding, Xuan
2022-10-13  6:40 ` [PATCH v4 0/2] vhost: introduce DMA vchannel unconfiguration xuan.ding
2022-10-13  6:40   ` [PATCH v4 1/2] " xuan.ding
2022-10-13  8:01     ` Maxime Coquelin
2022-10-13  8:45       ` Ding, Xuan
2022-10-13  6:40   ` [PATCH v4 2/2] examples/vhost: unconfigure DMA vchannel xuan.ding
2022-10-13  8:07     ` Maxime Coquelin
2022-10-13  8:49       ` Ding, Xuan
2022-10-13  9:27 ` [PATCH v5 0/2] vhost: introduce DMA vchannel unconfiguration xuan.ding
2022-10-13  9:27   ` [PATCH v5 1/2] " xuan.ding
2022-10-13  9:27   ` [PATCH v5 2/2] examples/vhost: unconfigure DMA vchannel xuan.ding
2022-10-18 15:22 ` [PATCH v6 0/2] vhost: introduce DMA vchannel unconfiguration xuan.ding
2022-10-18 15:22   ` [PATCH v6 1/2] " xuan.ding
2022-10-19  9:28     ` Xia, Chenbo
2022-10-18 15:22   ` [PATCH v6 2/2] examples/vhost: unconfigure DMA vchannel xuan.ding
2022-10-19  2:57     ` Ling, WeiX
2022-10-20  9:11 ` [PATCH v7 0/2] vhost: introduce DMA vchannel unconfiguration xuan.ding
2022-10-20  9:11   ` [PATCH v7 1/2] " xuan.ding
2022-10-21  8:09     ` Maxime Coquelin
2022-10-21  8:22       ` Ding, Xuan
2022-10-20  9:11   ` [PATCH v7 2/2] examples/vhost: unconfigure DMA vchannel xuan.ding
2022-10-21  8:12     ` Maxime Coquelin
2022-10-25  8:25 ` [PATCH v8 0/2] vhost: introduce DMA vchannel unconfiguration xuan.ding
2022-10-25  8:25   ` [PATCH v8 1/2] " xuan.ding
2022-10-26  5:13     ` Maxime Coquelin
2022-10-26  9:02     ` Xia, Chenbo
2022-10-25  8:25   ` [PATCH v8 2/2] examples/vhost: unconfigure DMA vchannel xuan.ding
2022-10-25  9:56     ` Ling, WeiX
2022-10-26  5:14     ` Maxime Coquelin
2022-10-26  9:03     ` Xia, Chenbo
2022-10-26  9:07   ` [PATCH v8 0/2] vhost: introduce DMA vchannel unconfiguration Xia, Chenbo

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220929013243.15889-2-xuan.ding@intel.com \
    --to=xuan.ding@intel.com \
    --cc=chenbo.xia@intel.com \
    --cc=cheng1.jiang@intel.com \
    --cc=dev@dpdk.org \
    --cc=jiayu.hu@intel.com \
    --cc=maxime.coquelin@redhat.com \
    --cc=wenwux.ma@intel.com \
    --cc=xingguang.he@intel.com \
    --cc=yuanx.wang@intel.com \
    --cc=yvonnex.yang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.