All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] vhost: add support for dynamic vhost PMD creation
@ 2016-05-05 18:11 Ferruh Yigit
  2016-05-09 21:31 ` Yuanhan Liu
  0 siblings, 1 reply; 17+ messages in thread
From: Ferruh Yigit @ 2016-05-05 18:11 UTC (permalink / raw)
  To: dev; +Cc: Tetsuya Mukawa, Yuanhan Liu, Ferruh Yigit

Add rte_eth_from_vhost() API to create vhost PMD dynamically from
applications.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
---
 drivers/net/vhost/rte_eth_vhost.c           | 117 ++++++++++++++++++++++++++++
 drivers/net/vhost/rte_eth_vhost.h           |  19 +++++
 drivers/net/vhost/rte_pmd_vhost_version.map |   7 ++
 3 files changed, 143 insertions(+)

diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 310cbef..c860ab8 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -796,6 +796,123 @@ error:
 	return -1;
 }
 
+static int
+rte_eth_from_vhost_create(const char *name, char *iface_name,
+		const unsigned int numa_node, struct rte_mempool *mb_pool)
+{
+	struct rte_eth_dev_data *data = NULL;
+	struct rte_eth_dev *eth_dev = NULL;
+	struct pmd_internal *internal = NULL;
+	struct internal_list *list;
+	int nb_queues = 1;
+	uint16_t nb_rx_queues = nb_queues;
+	uint16_t nb_tx_queues = nb_queues;
+	struct vhost_queue *vq;
+	int i;
+
+	int port_id = eth_dev_vhost_create(name, iface_name, nb_queues,
+			numa_node);
+
+	if (port_id < 0)
+		return -1;
+
+	eth_dev = &rte_eth_devices[port_id];
+	data = eth_dev->data;
+	internal = data->dev_private;
+	list = find_internal_resource(internal->iface_name);
+
+	data->rx_queues = rte_zmalloc_socket(name,
+			sizeof(void *) * nb_rx_queues, 0, numa_node);
+	if (data->rx_queues == NULL)
+		goto error;
+
+	data->tx_queues = rte_zmalloc_socket(name,
+			sizeof(void *) * nb_tx_queues, 0, numa_node);
+	if (data->tx_queues == NULL)
+		goto error;
+
+	for (i = 0; i < nb_rx_queues; i++) {
+		vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
+				RTE_CACHE_LINE_SIZE, numa_node);
+		if (vq == NULL) {
+			RTE_LOG(ERR, PMD,
+				"Failed to allocate memory for rx queue\n");
+			goto error;
+		}
+		vq->mb_pool = mb_pool;
+		vq->virtqueue_id = i * VIRTIO_QNUM + VIRTIO_TXQ;
+		data->rx_queues[i] = vq;
+	}
+
+	for (i = 0; i < nb_tx_queues; i++) {
+		vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
+				RTE_CACHE_LINE_SIZE, numa_node);
+		if (vq == NULL) {
+			RTE_LOG(ERR, PMD,
+				"Failed to allocate memory for tx queue\n");
+			goto error;
+		}
+		vq->mb_pool = mb_pool;
+		vq->virtqueue_id = i * VIRTIO_QNUM + VIRTIO_RXQ;
+		data->tx_queues[i] = vq;
+	}
+
+	return port_id;
+
+error:
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (internal)
+		free(internal->dev_name);
+	free(vring_states[port_id]);
+	free(data->mac_addrs);
+	rte_eth_dev_release_port(eth_dev);
+	if (data->rx_queues) {
+		for (i = 0; i < nb_rx_queues; i++) {
+			vq = data->rx_queues[i];
+			free(vq);
+		}
+		rte_free(data->rx_queues);
+	}
+	if (data->tx_queues) {
+		for (i = 0; i < nb_tx_queues; i++) {
+			vq = data->tx_queues[i];
+			free(vq);
+		}
+		rte_free(data->tx_queues);
+	}
+	rte_free(internal);
+	rte_free(list);
+	rte_free(data);
+
+	return -1;
+}
+
+int
+rte_eth_from_vhost(const char *name, char *iface_name,
+		const unsigned int numa_node, struct rte_mempool *mb_pool)
+{
+	int port_id;
+	int ret;
+
+	port_id = rte_eth_from_vhost_create(name, iface_name, numa_node,
+			mb_pool);
+	if (port_id < 0)
+		return port_id;
+
+	ret = rte_vhost_driver_register(iface_name);
+	if (ret < 0)
+		return ret;
+
+	ret = vhost_driver_session_start();
+	if (ret < 0)
+		return ret;
+
+	return port_id;
+}
+
 static inline int
 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
 {
diff --git a/drivers/net/vhost/rte_eth_vhost.h b/drivers/net/vhost/rte_eth_vhost.h
index ff5d877..624978c 100644
--- a/drivers/net/vhost/rte_eth_vhost.h
+++ b/drivers/net/vhost/rte_eth_vhost.h
@@ -102,6 +102,25 @@ struct rte_eth_vhost_queue_event {
 int rte_eth_vhost_get_queue_event(uint8_t port_id,
 		struct rte_eth_vhost_queue_event *event);
 
+/**
+ * API to create vhost PMD
+ *
+ * @param name
+ *  Vhost device name
+ * @param iface_name
+ *  Vhost interface name
+ * @param numa_node
+ *  Socket id
+ * @param mb_pool
+ *  Memory pool
+ *
+ * @return
+ *  - On success, port_id.
+ *  - On failure, a negative value.
+ */
+int rte_eth_from_vhost(const char *name, char *iface_name,
+		const unsigned int numa_node, struct rte_mempool *mb_pool);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/drivers/net/vhost/rte_pmd_vhost_version.map b/drivers/net/vhost/rte_pmd_vhost_version.map
index 65bf3a8..bb2fe29 100644
--- a/drivers/net/vhost/rte_pmd_vhost_version.map
+++ b/drivers/net/vhost/rte_pmd_vhost_version.map
@@ -8,3 +8,10 @@ DPDK_16.04 {
 
 	local: *;
 };
+
+DPDK_16.07 {
+	global:
+
+	rte_eth_from_vhost;
+
+} DPDK_16.04;
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH] vhost: add support for dynamic vhost PMD creation
  2016-05-05 18:11 [PATCH] vhost: add support for dynamic vhost PMD creation Ferruh Yigit
@ 2016-05-09 21:31 ` Yuanhan Liu
  2016-05-10 17:11   ` Ferruh Yigit
  2016-05-18 17:10   ` [PATCH v2] " Ferruh Yigit
  0 siblings, 2 replies; 17+ messages in thread
From: Yuanhan Liu @ 2016-05-09 21:31 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: dev, Tetsuya Mukawa

On Thu, May 05, 2016 at 07:11:09PM +0100, Ferruh Yigit wrote:
> Add rte_eth_from_vhost() API to create vhost PMD dynamically from
> applications.

This sounds a good idea to me. It could be better if you name a good
usage of it though.

> 
> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
> ---
>  drivers/net/vhost/rte_eth_vhost.c           | 117 ++++++++++++++++++++++++++++
>  drivers/net/vhost/rte_eth_vhost.h           |  19 +++++
>  drivers/net/vhost/rte_pmd_vhost_version.map |   7 ++
>  3 files changed, 143 insertions(+)
> 
> diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
> index 310cbef..c860ab8 100644
> --- a/drivers/net/vhost/rte_eth_vhost.c
> +++ b/drivers/net/vhost/rte_eth_vhost.c
> @@ -796,6 +796,123 @@ error:
>  	return -1;
>  }
>  
> +static int
> +rte_eth_from_vhost_create(const char *name, char *iface_name,

It's not a public function, so don't name it with prefix "rte_".

> +		const unsigned int numa_node, struct rte_mempool *mb_pool)
> +{
> +	struct rte_eth_dev_data *data = NULL;
> +	struct rte_eth_dev *eth_dev = NULL;
> +	struct pmd_internal *internal = NULL;
> +	struct internal_list *list;
> +	int nb_queues = 1;
> +	uint16_t nb_rx_queues = nb_queues;
> +	uint16_t nb_tx_queues = nb_queues;
> +	struct vhost_queue *vq;
> +	int i;
> +
> +	int port_id = eth_dev_vhost_create(name, iface_name, nb_queues,
> +			numa_node);
> +
> +	if (port_id < 0)
> +		return -1;
> +
> +	eth_dev = &rte_eth_devices[port_id];
> +	data = eth_dev->data;
> +	internal = data->dev_private;
> +	list = find_internal_resource(internal->iface_name);
> +
> +	data->rx_queues = rte_zmalloc_socket(name,
> +			sizeof(void *) * nb_rx_queues, 0, numa_node);
> +	if (data->rx_queues == NULL)
> +		goto error;
> +
> +	data->tx_queues = rte_zmalloc_socket(name,
> +			sizeof(void *) * nb_tx_queues, 0, numa_node);
> +	if (data->tx_queues == NULL)
> +		goto error;
> +
> +	for (i = 0; i < nb_rx_queues; i++) {
> +		vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
> +				RTE_CACHE_LINE_SIZE, numa_node);
> +		if (vq == NULL) {
> +			RTE_LOG(ERR, PMD,
> +				"Failed to allocate memory for rx queue\n");
> +			goto error;
> +		}
> +		vq->mb_pool = mb_pool;
> +		vq->virtqueue_id = i * VIRTIO_QNUM + VIRTIO_TXQ;
> +		data->rx_queues[i] = vq;
> +	}

I would invoke eth_rx_queue_setup() here, to remove the duplicated
effort of queue allocation and initiation.

> +
> +	for (i = 0; i < nb_tx_queues; i++) {
> +		vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
> +				RTE_CACHE_LINE_SIZE, numa_node);
> +		if (vq == NULL) {
> +			RTE_LOG(ERR, PMD,
> +				"Failed to allocate memory for tx queue\n");
> +			goto error;
> +		}
> +		vq->mb_pool = mb_pool;

Tx queue doesn't need a mbuf pool. And, ditto, call eth_tx_queue_setup()
instead.


> +int
> +rte_eth_from_vhost(const char *name, char *iface_name,
> +		const unsigned int numa_node, struct rte_mempool *mb_pool)

That would make this API be very limited. Assume we want to extend
vhost pmd in future, we could easily do that by adding few more
vdev options: you could reference my patch[0] to add client and
reconnect option. But here you hardcode all stuff that are needed
so far to create a vhost-pmd eth device; adding something new
would imply an API breakage in future.

So, let the vdev options as the argument of this API? That could
be friendly for future extension without breaking the API.

[0]: http://dpdk.org/dev/patchwork/patch/12608/

> +/**
> + * API to create vhost PMD
> + *
> + * @param name
> + *  Vhost device name
> + * @param iface_name
> + *  Vhost interface name
> + * @param numa_node
> + *  Socket id
> + * @param mb_pool
> + *  Memory pool
> + *
> + * @return
> + *  - On success, port_id.
> + *  - On failure, a negative value.
> + */

Hmmm, too simple.

	--yliu

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH] vhost: add support for dynamic vhost PMD creation
  2016-05-09 21:31 ` Yuanhan Liu
@ 2016-05-10 17:11   ` Ferruh Yigit
  2016-05-18 17:10   ` [PATCH v2] " Ferruh Yigit
  1 sibling, 0 replies; 17+ messages in thread
From: Ferruh Yigit @ 2016-05-10 17:11 UTC (permalink / raw)
  To: Yuanhan Liu; +Cc: dev, Tetsuya Mukawa

On 5/9/2016 10:31 PM, Yuanhan Liu wrote:
> On Thu, May 05, 2016 at 07:11:09PM +0100, Ferruh Yigit wrote:
>> Add rte_eth_from_vhost() API to create vhost PMD dynamically from
>> applications.
> 
> This sounds a good idea to me. It could be better if you name a good
> usage of it though.
> 
>>
>> Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>
>> ---
>>  drivers/net/vhost/rte_eth_vhost.c           | 117 ++++++++++++++++++++++++++++
>>  drivers/net/vhost/rte_eth_vhost.h           |  19 +++++
>>  drivers/net/vhost/rte_pmd_vhost_version.map |   7 ++
>>  3 files changed, 143 insertions(+)
>>
>> diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
>> index 310cbef..c860ab8 100644
>> --- a/drivers/net/vhost/rte_eth_vhost.c
>> +++ b/drivers/net/vhost/rte_eth_vhost.c
>> @@ -796,6 +796,123 @@ error:
>>  	return -1;
>>  }
>>  
>> +static int
>> +rte_eth_from_vhost_create(const char *name, char *iface_name,
> 
> It's not a public function, so don't name it with prefix "rte_".
> 
>> +		const unsigned int numa_node, struct rte_mempool *mb_pool)
>> +{
>> +	struct rte_eth_dev_data *data = NULL;
>> +	struct rte_eth_dev *eth_dev = NULL;
>> +	struct pmd_internal *internal = NULL;
>> +	struct internal_list *list;
>> +	int nb_queues = 1;
>> +	uint16_t nb_rx_queues = nb_queues;
>> +	uint16_t nb_tx_queues = nb_queues;
>> +	struct vhost_queue *vq;
>> +	int i;
>> +
>> +	int port_id = eth_dev_vhost_create(name, iface_name, nb_queues,
>> +			numa_node);
>> +
>> +	if (port_id < 0)
>> +		return -1;
>> +
>> +	eth_dev = &rte_eth_devices[port_id];
>> +	data = eth_dev->data;
>> +	internal = data->dev_private;
>> +	list = find_internal_resource(internal->iface_name);
>> +
>> +	data->rx_queues = rte_zmalloc_socket(name,
>> +			sizeof(void *) * nb_rx_queues, 0, numa_node);
>> +	if (data->rx_queues == NULL)
>> +		goto error;
>> +
>> +	data->tx_queues = rte_zmalloc_socket(name,
>> +			sizeof(void *) * nb_tx_queues, 0, numa_node);
>> +	if (data->tx_queues == NULL)
>> +		goto error;
>> +
>> +	for (i = 0; i < nb_rx_queues; i++) {
>> +		vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
>> +				RTE_CACHE_LINE_SIZE, numa_node);
>> +		if (vq == NULL) {
>> +			RTE_LOG(ERR, PMD,
>> +				"Failed to allocate memory for rx queue\n");
>> +			goto error;
>> +		}
>> +		vq->mb_pool = mb_pool;
>> +		vq->virtqueue_id = i * VIRTIO_QNUM + VIRTIO_TXQ;
>> +		data->rx_queues[i] = vq;
>> +	}
> 
> I would invoke eth_rx_queue_setup() here, to remove the duplicated
> effort of queue allocation and initiation.
> 
>> +
>> +	for (i = 0; i < nb_tx_queues; i++) {
>> +		vq = rte_zmalloc_socket(NULL, sizeof(struct vhost_queue),
>> +				RTE_CACHE_LINE_SIZE, numa_node);
>> +		if (vq == NULL) {
>> +			RTE_LOG(ERR, PMD,
>> +				"Failed to allocate memory for tx queue\n");
>> +			goto error;
>> +		}
>> +		vq->mb_pool = mb_pool;
> 
> Tx queue doesn't need a mbuf pool. And, ditto, call eth_tx_queue_setup()
> instead.
> 
> 
>> +int
>> +rte_eth_from_vhost(const char *name, char *iface_name,
>> +		const unsigned int numa_node, struct rte_mempool *mb_pool)
> 
> That would make this API be very limited. Assume we want to extend
> vhost pmd in future, we could easily do that by adding few more
> vdev options: you could reference my patch[0] to add client and
> reconnect option. But here you hardcode all stuff that are needed
> so far to create a vhost-pmd eth device; adding something new
> would imply an API breakage in future.
> 
> So, let the vdev options as the argument of this API? That could
> be friendly for future extension without breaking the API.
> 
> [0]: http://dpdk.org/dev/patchwork/patch/12608/
> 
>> +/**
>> + * API to create vhost PMD
>> + *
>> + * @param name
>> + *  Vhost device name
>> + * @param iface_name
>> + *  Vhost interface name
>> + * @param numa_node
>> + *  Socket id
>> + * @param mb_pool
>> + *  Memory pool
>> + *
>> + * @return
>> + *  - On success, port_id.
>> + *  - On failure, a negative value.
>> + */
> 
> Hmmm, too simple.
> 
> 	--yliu
> 

Hi Yuanhan,

Thank you for the review, I will send new version of the patch with
above issues addressed.

Thanks,
ferruh

^ permalink raw reply	[flat|nested] 17+ messages in thread

* [PATCH v2] vhost: add support for dynamic vhost PMD creation
  2016-05-09 21:31 ` Yuanhan Liu
  2016-05-10 17:11   ` Ferruh Yigit
@ 2016-05-18 17:10   ` Ferruh Yigit
  2016-05-19  8:33     ` Thomas Monjalon
  1 sibling, 1 reply; 17+ messages in thread
From: Ferruh Yigit @ 2016-05-18 17:10 UTC (permalink / raw)
  To: dev; +Cc: Tetsuya Mukawa, Yuanhan Liu, Ferruh Yigit

Add rte_eth_from_vhost() API to create vhost PMD dynamically from
applications.

Signed-off-by: Ferruh Yigit <ferruh.yigit@intel.com>

---

v2:
* drop rte_ prefix from non-public function
* re-use eth_rx_queue_setup/eth_tx_queue_setup
* pass vdev options as parameter to API
---
 drivers/net/vhost/rte_eth_vhost.c           | 130 ++++++++++++++++++++++++++++
 drivers/net/vhost/rte_eth_vhost.h           |  26 ++++++
 drivers/net/vhost/rte_pmd_vhost_version.map |   7 ++
 3 files changed, 163 insertions(+)

diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c
index 310cbef..8019eb1 100644
--- a/drivers/net/vhost/rte_eth_vhost.c
+++ b/drivers/net/vhost/rte_eth_vhost.c
@@ -796,6 +796,79 @@ error:
 	return -1;
 }
 
+static int
+eth_from_vhost_create(const char *name, char *iface_name, uint16_t nb_queues,
+		const unsigned int numa_node, struct rte_mempool *mb_pool)
+{
+	struct rte_eth_dev_data *data = NULL;
+	struct pmd_internal *internal = NULL;
+	struct rte_eth_dev *dev = NULL;
+	struct internal_list *list;
+	int port_id;
+	int ret;
+	int i;
+
+	port_id = eth_dev_vhost_create(name, iface_name, nb_queues, numa_node);
+	if (port_id < 0)
+		return -1;
+
+	dev = &rte_eth_devices[port_id];
+	data = dev->data;
+
+	data->rx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_queues,
+			0, numa_node);
+	if (data->rx_queues == NULL)
+		goto error;
+
+	data->tx_queues = rte_zmalloc_socket(name, sizeof(void *) * nb_queues,
+			0, numa_node);
+	if (data->tx_queues == NULL)
+		goto error;
+
+	for (i = 0; i < nb_queues; i++) {
+		ret = eth_rx_queue_setup(dev, i, 0, numa_node, NULL, mb_pool);
+		if (ret < 0)
+			goto error;
+	}
+
+	for (i = 0; i < nb_queues; i++) {
+		ret = eth_tx_queue_setup(dev, i, 0, numa_node, NULL);
+		if (ret < 0)
+			goto error;
+	}
+
+	return port_id;
+
+error:
+	internal = data->dev_private;
+	list = find_internal_resource(internal->iface_name);
+
+	pthread_mutex_lock(&internal_list_lock);
+	TAILQ_REMOVE(&internal_list, list, next);
+	pthread_mutex_unlock(&internal_list_lock);
+
+	if (internal)
+		free(internal->dev_name);
+	free(vring_states[port_id]);
+	free(data->mac_addrs);
+	rte_eth_dev_release_port(dev);
+	if (data->rx_queues) {
+		for (i = 0; i < nb_queues; i++)
+			free(data->rx_queues[i]);
+		rte_free(data->rx_queues);
+	}
+	if (data->tx_queues) {
+		for (i = 0; i < nb_queues; i++)
+			free(data->tx_queues[i]);
+		rte_free(data->tx_queues);
+	}
+	rte_free(internal);
+	rte_free(list);
+	rte_free(data);
+
+	return -1;
+}
+
 static inline int
 open_iface(const char *key __rte_unused, const char *value, void *extra_args)
 {
@@ -827,6 +900,63 @@ open_queues(const char *key __rte_unused, const char *value, void *extra_args)
 	return 0;
 }
 
+int
+rte_eth_from_vhost(const char *name, char *iface_name_arg,
+		const unsigned int numa_node, struct rte_mempool *mb_pool,
+		const char *params)
+{
+	char *iface_name = iface_name_arg;
+	struct rte_kvargs *kvlist = NULL;
+	uint16_t queues = 1;
+	int port_id;
+	int ret;
+
+	if (!name || !mb_pool)
+		return -1;
+
+	if (params) {
+		kvlist = rte_kvargs_parse(params, valid_arguments);
+		if (kvlist == NULL)
+			return -1;
+
+		if (rte_kvargs_count(kvlist, ETH_VHOST_IFACE_ARG) == 1) {
+			ret = rte_kvargs_process(kvlist, ETH_VHOST_IFACE_ARG,
+						 &open_iface, &iface_name);
+			if (ret < 0) {
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+		}
+
+		if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) {
+			ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG,
+						 &open_queues, &queues);
+			if (ret < 0) {
+				rte_kvargs_free(kvlist);
+				return ret;
+			}
+		}
+	}
+
+	if (!iface_name || !queues)
+		return -1;
+
+	port_id = eth_from_vhost_create(name, iface_name, queues, numa_node,
+			mb_pool);
+	if (port_id < 0)
+		return port_id;
+
+	ret = rte_vhost_driver_register(iface_name);
+	if (ret < 0)
+		return ret;
+
+	ret = vhost_driver_session_start();
+	if (ret < 0)
+		return ret;
+
+	return port_id;
+}
+
 static int
 rte_pmd_vhost_devinit(const char *name, const char *params)
 {
diff --git a/drivers/net/vhost/rte_eth_vhost.h b/drivers/net/vhost/rte_eth_vhost.h
index ff5d877..480dac8 100644
--- a/drivers/net/vhost/rte_eth_vhost.h
+++ b/drivers/net/vhost/rte_eth_vhost.h
@@ -102,6 +102,32 @@ struct rte_eth_vhost_queue_event {
 int rte_eth_vhost_get_queue_event(uint8_t port_id,
 		struct rte_eth_vhost_queue_event *event);
 
+/**
+ * Create a new ethdev from vhost device
+ *
+ * @param name
+ *    Name to be given to the new ethdev
+ * @param iface_name
+ *    Specifies a path to connect to a QEMU virtio-net device
+ * @param numa_node
+ *    The numa node on which the memory for this port is to be allocated
+ * @param mb_pool
+ *    Memory pool used by created ethdev
+ * @param params
+ *    Optional argument list, supported arguments:
+ *       iface : iface_name,
+ *       queues: number of rx/tx queues [default to 1]
+ *    These values can override direct API params when conflict.
+ *    sample params="iface=/tmp/sock0,queues=1"
+ *
+ * @return
+ *  - On success, created ehtdev port_id.
+ *  - On failure, a negative value.
+ */
+int rte_eth_from_vhost(const char *name, char *iface_name_arg,
+		const unsigned int numa_node, struct rte_mempool *mb_pool,
+		const char *params);
+
 #ifdef __cplusplus
 }
 #endif
diff --git a/drivers/net/vhost/rte_pmd_vhost_version.map b/drivers/net/vhost/rte_pmd_vhost_version.map
index 65bf3a8..bb2fe29 100644
--- a/drivers/net/vhost/rte_pmd_vhost_version.map
+++ b/drivers/net/vhost/rte_pmd_vhost_version.map
@@ -8,3 +8,10 @@ DPDK_16.04 {
 
 	local: *;
 };
+
+DPDK_16.07 {
+	global:
+
+	rte_eth_from_vhost;
+
+} DPDK_16.04;
-- 
2.5.5

^ permalink raw reply related	[flat|nested] 17+ messages in thread

* Re: [PATCH v2] vhost: add support for dynamic vhost PMD creation
  2016-05-18 17:10   ` [PATCH v2] " Ferruh Yigit
@ 2016-05-19  8:33     ` Thomas Monjalon
  2016-05-19 16:28       ` Ferruh Yigit
  0 siblings, 1 reply; 17+ messages in thread
From: Thomas Monjalon @ 2016-05-19  8:33 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: dev, Tetsuya Mukawa, Yuanhan Liu

2016-05-18 18:10, Ferruh Yigit:
> Add rte_eth_from_vhost() API to create vhost PMD dynamically from
> applications.

How is it different from rte_eth_dev_attach() calling rte_eal_vdev_init()?

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2] vhost: add support for dynamic vhost PMD creation
  2016-05-19  8:33     ` Thomas Monjalon
@ 2016-05-19 16:28       ` Ferruh Yigit
  2016-05-19 16:44         ` Thomas Monjalon
  0 siblings, 1 reply; 17+ messages in thread
From: Ferruh Yigit @ 2016-05-19 16:28 UTC (permalink / raw)
  To: Thomas Monjalon; +Cc: dev, Tetsuya Mukawa, Yuanhan Liu

On 5/19/2016 9:33 AM, Thomas Monjalon wrote:
> 2016-05-18 18:10, Ferruh Yigit:
>> Add rte_eth_from_vhost() API to create vhost PMD dynamically from
>> applications.
> 
> How is it different from rte_eth_dev_attach() calling rte_eal_vdev_init()?
> 

When used rte_eth_dev_attach(), application also needs to do:
rte_eth_dev_configure()
rte_eth_rx_queue_setup()
rte_eth_tx_queue_setup()
rte_eth_dev_start()

rte_eth_from_vhost() does these internally, easier to use for applications.


Regards,
ferruh

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2] vhost: add support for dynamic vhost PMD creation
  2016-05-19 16:28       ` Ferruh Yigit
@ 2016-05-19 16:44         ` Thomas Monjalon
  2016-05-20  1:59           ` Yuanhan Liu
  2016-05-20 10:37           ` Bruce Richardson
  0 siblings, 2 replies; 17+ messages in thread
From: Thomas Monjalon @ 2016-05-19 16:44 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: dev, Tetsuya Mukawa, Yuanhan Liu

2016-05-19 17:28, Ferruh Yigit:
> On 5/19/2016 9:33 AM, Thomas Monjalon wrote:
> > 2016-05-18 18:10, Ferruh Yigit:
> >> Add rte_eth_from_vhost() API to create vhost PMD dynamically from
> >> applications.
> > 
> > How is it different from rte_eth_dev_attach() calling rte_eal_vdev_init()?
> > 
> 
> When used rte_eth_dev_attach(), application also needs to do:
> rte_eth_dev_configure()
> rte_eth_rx_queue_setup()
> rte_eth_tx_queue_setup()
> rte_eth_dev_start()
> 
> rte_eth_from_vhost() does these internally, easier to use for applications.

This argument is not sufficient.
We are not going to add new APIs just for wrapping others.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2] vhost: add support for dynamic vhost PMD creation
  2016-05-19 16:44         ` Thomas Monjalon
@ 2016-05-20  1:59           ` Yuanhan Liu
  2016-05-20 10:37           ` Bruce Richardson
  1 sibling, 0 replies; 17+ messages in thread
From: Yuanhan Liu @ 2016-05-20  1:59 UTC (permalink / raw)
  To: Thomas Monjalon; +Cc: Ferruh Yigit, dev, Tetsuya Mukawa

On Thu, May 19, 2016 at 06:44:44PM +0200, Thomas Monjalon wrote:
> 2016-05-19 17:28, Ferruh Yigit:
> > On 5/19/2016 9:33 AM, Thomas Monjalon wrote:
> > > 2016-05-18 18:10, Ferruh Yigit:
> > >> Add rte_eth_from_vhost() API to create vhost PMD dynamically from
> > >> applications.
> > > 
> > > How is it different from rte_eth_dev_attach() calling rte_eal_vdev_init()?
> > > 
> > 
> > When used rte_eth_dev_attach(), application also needs to do:

Sigh.. I was not awared that there is such interface.

> > rte_eth_dev_configure()
> > rte_eth_rx_queue_setup()
> > rte_eth_tx_queue_setup()
> > rte_eth_dev_start()
> > 
> > rte_eth_from_vhost() does these internally, easier to use for applications.
> 
> This argument is not sufficient.
> We are not going to add new APIs just for wrapping others.

+1.
	--yliu

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2] vhost: add support for dynamic vhost PMD creation
  2016-05-19 16:44         ` Thomas Monjalon
  2016-05-20  1:59           ` Yuanhan Liu
@ 2016-05-20 10:37           ` Bruce Richardson
  2016-05-20 12:03             ` Thomas Monjalon
  2016-05-23 13:24             ` Yuanhan Liu
  1 sibling, 2 replies; 17+ messages in thread
From: Bruce Richardson @ 2016-05-20 10:37 UTC (permalink / raw)
  To: Thomas Monjalon; +Cc: Ferruh Yigit, dev, Tetsuya Mukawa, Yuanhan Liu

On Thu, May 19, 2016 at 06:44:44PM +0200, Thomas Monjalon wrote:
> 2016-05-19 17:28, Ferruh Yigit:
> > On 5/19/2016 9:33 AM, Thomas Monjalon wrote:
> > > 2016-05-18 18:10, Ferruh Yigit:
> > >> Add rte_eth_from_vhost() API to create vhost PMD dynamically from
> > >> applications.
> > > 
> > > How is it different from rte_eth_dev_attach() calling rte_eal_vdev_init()?
> > > 
> > 
> > When used rte_eth_dev_attach(), application also needs to do:
> > rte_eth_dev_configure()
> > rte_eth_rx_queue_setup()
> > rte_eth_tx_queue_setup()
> > rte_eth_dev_start()
> > 
> > rte_eth_from_vhost() does these internally, easier to use for applications.
> 
> This argument is not sufficient.
> We are not going to add new APIs just for wrapping others.

Why not - if there is a sufficient increase in developer usability by doing so?
Having one API that saves an app from having to call 5 other APIs looks like
something that should always be given fair consideration. There will obviously
be other factors to take into account too, like numbers and types of parameters
to the replacement call vs the sub-calls, but I don't think a blanket ban is
justified.

Regards,
/Bruce

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2] vhost: add support for dynamic vhost PMD creation
  2016-05-20 10:37           ` Bruce Richardson
@ 2016-05-20 12:03             ` Thomas Monjalon
  2016-05-23 13:24             ` Yuanhan Liu
  1 sibling, 0 replies; 17+ messages in thread
From: Thomas Monjalon @ 2016-05-20 12:03 UTC (permalink / raw)
  To: Bruce Richardson; +Cc: Ferruh Yigit, dev, Tetsuya Mukawa, Yuanhan Liu

2016-05-20 11:37, Bruce Richardson:
> On Thu, May 19, 2016 at 06:44:44PM +0200, Thomas Monjalon wrote:
> > 2016-05-19 17:28, Ferruh Yigit:
> > > On 5/19/2016 9:33 AM, Thomas Monjalon wrote:
> > > > 2016-05-18 18:10, Ferruh Yigit:
> > > >> Add rte_eth_from_vhost() API to create vhost PMD dynamically from
> > > >> applications.
> > > > 
> > > > How is it different from rte_eth_dev_attach() calling rte_eal_vdev_init()?
> > > > 
> > > 
> > > When used rte_eth_dev_attach(), application also needs to do:
> > > rte_eth_dev_configure()
> > > rte_eth_rx_queue_setup()
> > > rte_eth_tx_queue_setup()
> > > rte_eth_dev_start()
> > > 
> > > rte_eth_from_vhost() does these internally, easier to use for applications.
> > 
> > This argument is not sufficient.
> > We are not going to add new APIs just for wrapping others.
> 
> Why not - if there is a sufficient increase in developer usability by doing so?
> Having one API that saves an app from having to call 5 other APIs looks like
> something that should always be given fair consideration. There will obviously
> be other factors to take into account too, like numbers and types of parameters
> to the replacement call vs the sub-calls, but I don't think a blanket ban is
> justified.

Yes, everything can be discussed, especially the ethdev API which is far
from being perfect :)

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2] vhost: add support for dynamic vhost PMD creation
  2016-05-20 10:37           ` Bruce Richardson
  2016-05-20 12:03             ` Thomas Monjalon
@ 2016-05-23 13:24             ` Yuanhan Liu
  2016-05-23 17:06               ` Ferruh Yigit
  1 sibling, 1 reply; 17+ messages in thread
From: Yuanhan Liu @ 2016-05-23 13:24 UTC (permalink / raw)
  To: Bruce Richardson; +Cc: Thomas Monjalon, Ferruh Yigit, dev, Tetsuya Mukawa

On Fri, May 20, 2016 at 11:37:47AM +0100, Bruce Richardson wrote:
> On Thu, May 19, 2016 at 06:44:44PM +0200, Thomas Monjalon wrote:
> > 2016-05-19 17:28, Ferruh Yigit:
> > > On 5/19/2016 9:33 AM, Thomas Monjalon wrote:
> > > > 2016-05-18 18:10, Ferruh Yigit:
> > > >> Add rte_eth_from_vhost() API to create vhost PMD dynamically from
> > > >> applications.
> > > > 
> > > > How is it different from rte_eth_dev_attach() calling rte_eal_vdev_init()?
> > > > 
> > > 
> > > When used rte_eth_dev_attach(), application also needs to do:
> > > rte_eth_dev_configure()
> > > rte_eth_rx_queue_setup()
> > > rte_eth_tx_queue_setup()
> > > rte_eth_dev_start()
> > > 
> > > rte_eth_from_vhost() does these internally, easier to use for applications.
> > 
> > This argument is not sufficient.
> > We are not going to add new APIs just for wrapping others.
> 
> Why not - if there is a sufficient increase in developer usability by doing so?
> Having one API that saves an app from having to call 5 other APIs looks like
> something that should always be given fair consideration.

Good point. Judging that vhost is not the only virtual device we
support, and it may also look reasonable to add something similar
for others in future (say, IIRC, you proposed two more internally
that also introduced similar APIs). So, instead of introducing a
new API for each such vdev, may we introduce a common one? Say,
a refined rte_eth_dev_attach(), including dev_configure(),
queue_setup(), etc.

Makes sense?

	--yliu

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2] vhost: add support for dynamic vhost PMD creation
  2016-05-23 13:24             ` Yuanhan Liu
@ 2016-05-23 17:06               ` Ferruh Yigit
  2016-05-24  5:11                 ` Yuanhan Liu
  0 siblings, 1 reply; 17+ messages in thread
From: Ferruh Yigit @ 2016-05-23 17:06 UTC (permalink / raw)
  To: Yuanhan Liu, Bruce Richardson; +Cc: Thomas Monjalon, dev, Tetsuya Mukawa

On 5/23/2016 2:24 PM, Yuanhan Liu wrote:
> On Fri, May 20, 2016 at 11:37:47AM +0100, Bruce Richardson wrote:
>> On Thu, May 19, 2016 at 06:44:44PM +0200, Thomas Monjalon wrote:
>>> 2016-05-19 17:28, Ferruh Yigit:
>>>> On 5/19/2016 9:33 AM, Thomas Monjalon wrote:
>>>>> 2016-05-18 18:10, Ferruh Yigit:
>>>>>> Add rte_eth_from_vhost() API to create vhost PMD dynamically from
>>>>>> applications.
>>>>>
>>>>> How is it different from rte_eth_dev_attach() calling rte_eal_vdev_init()?
>>>>>
>>>>
>>>> When used rte_eth_dev_attach(), application also needs to do:
>>>> rte_eth_dev_configure()
>>>> rte_eth_rx_queue_setup()
>>>> rte_eth_tx_queue_setup()
>>>> rte_eth_dev_start()
>>>>
>>>> rte_eth_from_vhost() does these internally, easier to use for applications.
>>>
>>> This argument is not sufficient.
>>> We are not going to add new APIs just for wrapping others.
>>
>> Why not - if there is a sufficient increase in developer usability by doing so?
>> Having one API that saves an app from having to call 5 other APIs looks like
>> something that should always be given fair consideration.
> 
> Good point. Judging that vhost is not the only virtual device we
> support, and it may also look reasonable to add something similar
> for others in future (say, IIRC, you proposed two more internally
> that also introduced similar APIs). So, instead of introducing a
> new API for each such vdev, may we introduce a common one? Say,
> a refined rte_eth_dev_attach(), including dev_configure(),
> queue_setup(), etc.
> 

This sounds good to me. If there is not objection, I will send a patch
and we can discuss based on patch.
Something like: rte_eth_dev_attach_and_setup()

Regards,
ferruh

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2] vhost: add support for dynamic vhost PMD creation
  2016-05-23 17:06               ` Ferruh Yigit
@ 2016-05-24  5:11                 ` Yuanhan Liu
  2016-05-24  9:42                   ` Bruce Richardson
  0 siblings, 1 reply; 17+ messages in thread
From: Yuanhan Liu @ 2016-05-24  5:11 UTC (permalink / raw)
  To: Ferruh Yigit; +Cc: Bruce Richardson, Thomas Monjalon, dev, Tetsuya Mukawa

On Mon, May 23, 2016 at 06:06:21PM +0100, Ferruh Yigit wrote:
> On 5/23/2016 2:24 PM, Yuanhan Liu wrote:
> > On Fri, May 20, 2016 at 11:37:47AM +0100, Bruce Richardson wrote:
> >> On Thu, May 19, 2016 at 06:44:44PM +0200, Thomas Monjalon wrote:
> >>> 2016-05-19 17:28, Ferruh Yigit:
> >>>> On 5/19/2016 9:33 AM, Thomas Monjalon wrote:
> >>>>> 2016-05-18 18:10, Ferruh Yigit:
> >>>>>> Add rte_eth_from_vhost() API to create vhost PMD dynamically from
> >>>>>> applications.
> >>>>>
> >>>>> How is it different from rte_eth_dev_attach() calling rte_eal_vdev_init()?
> >>>>>
> >>>>
> >>>> When used rte_eth_dev_attach(), application also needs to do:
> >>>> rte_eth_dev_configure()
> >>>> rte_eth_rx_queue_setup()
> >>>> rte_eth_tx_queue_setup()
> >>>> rte_eth_dev_start()
> >>>>
> >>>> rte_eth_from_vhost() does these internally, easier to use for applications.
> >>>
> >>> This argument is not sufficient.
> >>> We are not going to add new APIs just for wrapping others.
> >>
> >> Why not - if there is a sufficient increase in developer usability by doing so?
> >> Having one API that saves an app from having to call 5 other APIs looks like
> >> something that should always be given fair consideration.
> > 
> > Good point. Judging that vhost is not the only virtual device we
> > support, and it may also look reasonable to add something similar
> > for others in future (say, IIRC, you proposed two more internally
> > that also introduced similar APIs). So, instead of introducing a
> > new API for each such vdev, may we introduce a common one? Say,
> > a refined rte_eth_dev_attach(), including dev_configure(),
> > queue_setup(), etc.
> > 
> 
> This sounds good to me. If there is not objection, I will send a patch
> and we can discuss based on patch.

Let's wait and gather some comments first?

	--yliu
> Something like: rte_eth_dev_attach_and_setup()
> 
> Regards,
> ferruh

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2] vhost: add support for dynamic vhost PMD creation
  2016-05-24  5:11                 ` Yuanhan Liu
@ 2016-05-24  9:42                   ` Bruce Richardson
  2016-05-25  4:41                     ` Yuanhan Liu
  0 siblings, 1 reply; 17+ messages in thread
From: Bruce Richardson @ 2016-05-24  9:42 UTC (permalink / raw)
  To: Yuanhan Liu; +Cc: Ferruh Yigit, Thomas Monjalon, dev, Tetsuya Mukawa

On Tue, May 24, 2016 at 01:11:26PM +0800, Yuanhan Liu wrote:
> On Mon, May 23, 2016 at 06:06:21PM +0100, Ferruh Yigit wrote:
> > On 5/23/2016 2:24 PM, Yuanhan Liu wrote:
> > > On Fri, May 20, 2016 at 11:37:47AM +0100, Bruce Richardson wrote:
> > >> On Thu, May 19, 2016 at 06:44:44PM +0200, Thomas Monjalon wrote:
> > >>> 2016-05-19 17:28, Ferruh Yigit:
> > >>>> On 5/19/2016 9:33 AM, Thomas Monjalon wrote:
> > >>>>> 2016-05-18 18:10, Ferruh Yigit:
> > >>>>>> Add rte_eth_from_vhost() API to create vhost PMD dynamically from
> > >>>>>> applications.
> > >>>>>
> > >>>>> How is it different from rte_eth_dev_attach() calling rte_eal_vdev_init()?
> > >>>>>
> > >>>>
> > >>>> When used rte_eth_dev_attach(), application also needs to do:
> > >>>> rte_eth_dev_configure()
> > >>>> rte_eth_rx_queue_setup()
> > >>>> rte_eth_tx_queue_setup()
> > >>>> rte_eth_dev_start()
> > >>>>
> > >>>> rte_eth_from_vhost() does these internally, easier to use for applications.
> > >>>
> > >>> This argument is not sufficient.
> > >>> We are not going to add new APIs just for wrapping others.
> > >>
> > >> Why not - if there is a sufficient increase in developer usability by doing so?
> > >> Having one API that saves an app from having to call 5 other APIs looks like
> > >> something that should always be given fair consideration.
> > > 
> > > Good point. Judging that vhost is not the only virtual device we
> > > support, and it may also look reasonable to add something similar
> > > for others in future (say, IIRC, you proposed two more internally
> > > that also introduced similar APIs). So, instead of introducing a
> > > new API for each such vdev, may we introduce a common one? Say,
> > > a refined rte_eth_dev_attach(), including dev_configure(),
> > > queue_setup(), etc.
> > > 
> > 
> > This sounds good to me. If there is not objection, I will send a patch
> > and we can discuss based on patch.
> 
> Let's wait and gather some comments first?
> 
I'm not sure that such a general approach is likely to work, as the parameters
needed for each individual driver are going to be different. For some devices,
much of the parameters can be implied, while for others they may not be and still
others needed additional setup parameters. For the simplest case, take the
rte_eth_from_ring API, which creates an ethdev backed by a single rte_ring. The
number of rx and tx queues and their sizes are all determined by the actual
underlying ring, as is the numa node and all other parameters. On the other
hand, we have something like a pcap PMD, where again none of the queue sizes
need to be specified, but we do need additional parameters to provide the
underlying pcap file/device to use. Other devices will similarly need different
options, including in some cases queue counts and sizes.

Therefore, I think trying to generalise the function is pointless. If you have
to write your code to build up a specific set of parameters to pass to a general
API, then you are no better off than just calling a specific API directly. In
both cases you need different code for each device type.

Regards,
/Bruce

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2] vhost: add support for dynamic vhost PMD creation
  2016-05-24  9:42                   ` Bruce Richardson
@ 2016-05-25  4:41                     ` Yuanhan Liu
  2016-05-25 11:54                       ` Thomas Monjalon
  0 siblings, 1 reply; 17+ messages in thread
From: Yuanhan Liu @ 2016-05-25  4:41 UTC (permalink / raw)
  To: Bruce Richardson; +Cc: Ferruh Yigit, Thomas Monjalon, dev, Tetsuya Mukawa

On Tue, May 24, 2016 at 10:42:56AM +0100, Bruce Richardson wrote:
> On Tue, May 24, 2016 at 01:11:26PM +0800, Yuanhan Liu wrote:
> > On Mon, May 23, 2016 at 06:06:21PM +0100, Ferruh Yigit wrote:
> > > On 5/23/2016 2:24 PM, Yuanhan Liu wrote:
> > > > On Fri, May 20, 2016 at 11:37:47AM +0100, Bruce Richardson wrote:
> > > >> On Thu, May 19, 2016 at 06:44:44PM +0200, Thomas Monjalon wrote:
> > > >>> 2016-05-19 17:28, Ferruh Yigit:
> > > >>>> On 5/19/2016 9:33 AM, Thomas Monjalon wrote:
> > > >>>>> 2016-05-18 18:10, Ferruh Yigit:
> > > >>>>>> Add rte_eth_from_vhost() API to create vhost PMD dynamically from
> > > >>>>>> applications.
> > > >>>>>
> > > >>>>> How is it different from rte_eth_dev_attach() calling rte_eal_vdev_init()?
> > > >>>>>
> > > >>>>
> > > >>>> When used rte_eth_dev_attach(), application also needs to do:
> > > >>>> rte_eth_dev_configure()
> > > >>>> rte_eth_rx_queue_setup()
> > > >>>> rte_eth_tx_queue_setup()
> > > >>>> rte_eth_dev_start()
> > > >>>>
> > > >>>> rte_eth_from_vhost() does these internally, easier to use for applications.
> > > >>>
> > > >>> This argument is not sufficient.
> > > >>> We are not going to add new APIs just for wrapping others.
> > > >>
> > > >> Why not - if there is a sufficient increase in developer usability by doing so?
> > > >> Having one API that saves an app from having to call 5 other APIs looks like
> > > >> something that should always be given fair consideration.
> > > > 
> > > > Good point. Judging that vhost is not the only virtual device we
> > > > support, and it may also look reasonable to add something similar
> > > > for others in future (say, IIRC, you proposed two more internally
> > > > that also introduced similar APIs). So, instead of introducing a
> > > > new API for each such vdev, may we introduce a common one? Say,
> > > > a refined rte_eth_dev_attach(), including dev_configure(),
> > > > queue_setup(), etc.
> > > > 
> > > 
> > > This sounds good to me. If there is not objection, I will send a patch
> > > and we can discuss based on patch.
> > 
> > Let's wait and gather some comments first?
> > 
> I'm not sure that such a general approach is likely to work,

Me, neither. Thus I threw it out for more discussion.

> as the parameters
> needed for each individual driver are going to be different.

Well, if you plan to pass all necessary informations to the driver by
parameters like this v1 does, then yes, that's true and a generic API
is unlikely to work. But what I was thinking is that we feed it by
strings, like the arguments for '--vdev' option. In such way, we could
have an unified interface (if that works, which is something I'm not
quite sure).

OTOH, let's assume there is a switch that supports quite many such
vdevs, as well as the ability to add a new device dynamically by
corresponding API. And assume there is just one external interface
from the switch to add a dynamical device (say, "ovs-vsctl add-port"),
you then also need build some codes to invoke the right API, as well
as constructing the right parameters, like what you said below.

This let me think of the vhost dequeue/enqueue API. Basically speaking,
it has the same functionality the rte_eth_rx/tx_burst has, but just
different API name and different parameters. This results to OVS has
to write different netdev_class, one for NIC, another one for vhost-user.
(actually, there is yet another one for vhost-cuse).

And now since we have vhost-pmd, we could just have one netdev_class
at OVS, saving their (and other application's) effort to build/maintain
similar codes.

Thus, I'm __just wondering__ could we add a generic interface to create
vdev dynamically for all such vdevs? I was thinking something like:

	rte_create_vdev(type, char *options);

Which in turn will invoke the right function pointer for different
"type" to do the right setups.

	--yliu

> For some devices,
> much of the parameters can be implied, while for others they may not be and still
> others needed additional setup parameters. For the simplest case, take the
> rte_eth_from_ring API, which creates an ethdev backed by a single rte_ring. The
> number of rx and tx queues and their sizes are all determined by the actual
> underlying ring, as is the numa node and all other parameters. On the other
> hand, we have something like a pcap PMD, where again none of the queue sizes
> need to be specified, but we do need additional parameters to provide the
> underlying pcap file/device to use. Other devices will similarly need different
> options, including in some cases queue counts and sizes.
> 
> Therefore, I think trying to generalise the function is pointless. If you have
> to write your code to build up a specific set of parameters to pass to a general
> API, then you are no better off than just calling a specific API directly. In
> both cases you need different code for each device type.
> 
> Regards,
> /Bruce

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2] vhost: add support for dynamic vhost PMD creation
  2016-05-25  4:41                     ` Yuanhan Liu
@ 2016-05-25 11:54                       ` Thomas Monjalon
  2016-05-26  7:58                         ` Yuanhan Liu
  0 siblings, 1 reply; 17+ messages in thread
From: Thomas Monjalon @ 2016-05-25 11:54 UTC (permalink / raw)
  To: Yuanhan Liu
  Cc: Bruce Richardson, Ferruh Yigit, dev, Tetsuya Mukawa, david.marchand

2016-05-25 12:41, Yuanhan Liu:
> On Tue, May 24, 2016 at 10:42:56AM +0100, Bruce Richardson wrote:
> > On Tue, May 24, 2016 at 01:11:26PM +0800, Yuanhan Liu wrote:
> > > On Mon, May 23, 2016 at 06:06:21PM +0100, Ferruh Yigit wrote:
> > > > On 5/23/2016 2:24 PM, Yuanhan Liu wrote:
> > > > > On Fri, May 20, 2016 at 11:37:47AM +0100, Bruce Richardson wrote:
> > > > >> On Thu, May 19, 2016 at 06:44:44PM +0200, Thomas Monjalon wrote:
> > > > >>> 2016-05-19 17:28, Ferruh Yigit:
> > > > >>>> On 5/19/2016 9:33 AM, Thomas Monjalon wrote:
> > > > >>>>> 2016-05-18 18:10, Ferruh Yigit:
> > > > >>>>>> Add rte_eth_from_vhost() API to create vhost PMD dynamically from
> > > > >>>>>> applications.
> > > > >>>>>
> > > > >>>>> How is it different from rte_eth_dev_attach() calling rte_eal_vdev_init()?
> > > > >>>>>
> > > > >>>>
> > > > >>>> When used rte_eth_dev_attach(), application also needs to do:
> > > > >>>> rte_eth_dev_configure()
> > > > >>>> rte_eth_rx_queue_setup()
> > > > >>>> rte_eth_tx_queue_setup()
> > > > >>>> rte_eth_dev_start()
> > > > >>>>
> > > > >>>> rte_eth_from_vhost() does these internally, easier to use for applications.
> > > > >>>
> > > > >>> This argument is not sufficient.
> > > > >>> We are not going to add new APIs just for wrapping others.
> > > > >>
> > > > >> Why not - if there is a sufficient increase in developer usability by doing so?
> > > > >> Having one API that saves an app from having to call 5 other APIs looks like
> > > > >> something that should always be given fair consideration.
> > > > > 
> > > > > Good point. Judging that vhost is not the only virtual device we
> > > > > support, and it may also look reasonable to add something similar
> > > > > for others in future (say, IIRC, you proposed two more internally
> > > > > that also introduced similar APIs). So, instead of introducing a
> > > > > new API for each such vdev, may we introduce a common one? Say,
> > > > > a refined rte_eth_dev_attach(), including dev_configure(),
> > > > > queue_setup(), etc.
> > > > > 
> > > > 
> > > > This sounds good to me. If there is not objection, I will send a patch
> > > > and we can discuss based on patch.
> > > 
> > > Let's wait and gather some comments first?
> > > 
> > I'm not sure that such a general approach is likely to work,
> 
> Me, neither. Thus I threw it out for more discussion.
> 
> > as the parameters
> > needed for each individual driver are going to be different.
> 
> Well, if you plan to pass all necessary informations to the driver by
> parameters like this v1 does, then yes, that's true and a generic API
> is unlikely to work. But what I was thinking is that we feed it by
> strings, like the arguments for '--vdev' option. In such way, we could
> have an unified interface (if that works, which is something I'm not
> quite sure).

Yes, that is the plan with the EAL rework in progress.
Hotplugging is being redefined at EAL level and needs a configuration
API with devargs to be complete.

> OTOH, let's assume there is a switch that supports quite many such
> vdevs, as well as the ability to add a new device dynamically by
> corresponding API. And assume there is just one external interface
> from the switch to add a dynamical device (say, "ovs-vsctl add-port"),
> you then also need build some codes to invoke the right API, as well
> as constructing the right parameters, like what you said below.
> 
> This let me think of the vhost dequeue/enqueue API. Basically speaking,
> it has the same functionality the rte_eth_rx/tx_burst has, but just
> different API name and different parameters. This results to OVS has
> to write different netdev_class, one for NIC, another one for vhost-user.
> (actually, there is yet another one for vhost-cuse).
> 
> And now since we have vhost-pmd, we could just have one netdev_class
> at OVS, saving their (and other application's) effort to build/maintain
> similar codes.

Yes, it was a good improvement.

> Thus, I'm __just wondering__ could we add a generic interface to create
> vdev dynamically for all such vdevs? I was thinking something like:
> 
> 	rte_create_vdev(type, char *options);

Actually, it has more sense to first create the device with an attach()
function and the configure it with devargs.
So neither attaching nor configuring are specific to vdev.
And devargs configuration can happen long after creating the device object.

I suggest to reject this patch and continue the EAL rework initiated
by David.

^ permalink raw reply	[flat|nested] 17+ messages in thread

* Re: [PATCH v2] vhost: add support for dynamic vhost PMD creation
  2016-05-25 11:54                       ` Thomas Monjalon
@ 2016-05-26  7:58                         ` Yuanhan Liu
  0 siblings, 0 replies; 17+ messages in thread
From: Yuanhan Liu @ 2016-05-26  7:58 UTC (permalink / raw)
  To: Thomas Monjalon
  Cc: Bruce Richardson, Ferruh Yigit, dev, Tetsuya Mukawa, david.marchand

On Wed, May 25, 2016 at 01:54:41PM +0200, Thomas Monjalon wrote:
> > OTOH, let's assume there is a switch that supports quite many such
> > vdevs, as well as the ability to add a new device dynamically by
> > corresponding API. And assume there is just one external interface
> > from the switch to add a dynamical device (say, "ovs-vsctl add-port"),
> > you then also need build some codes to invoke the right API, as well
> > as constructing the right parameters, like what you said below.
> > 
> > This let me think of the vhost dequeue/enqueue API. Basically speaking,
> > it has the same functionality the rte_eth_rx/tx_burst has, but just
> > different API name and different parameters. This results to OVS has
> > to write different netdev_class, one for NIC, another one for vhost-user.
> > (actually, there is yet another one for vhost-cuse).
> > 
> > And now since we have vhost-pmd, we could just have one netdev_class
> > at OVS, saving their (and other application's) effort to build/maintain
> > similar codes.
> 
> Yes, it was a good improvement.

Yeah, kudos to Tetsuya.

> > Thus, I'm __just wondering__ could we add a generic interface to create
> > vdev dynamically for all such vdevs? I was thinking something like:
> > 
> > 	rte_create_vdev(type, char *options);
> 
> Actually, it has more sense to first create the device with an attach()
> function and the configure it with devargs.
> So neither attaching nor configuring are specific to vdev.
> And devargs configuration can happen long after creating the device object.
> 
> I suggest to reject this patch and continue the EAL rework initiated
> by David.

I'm okay with that: I think it's better to use current interface
instead of adding a new one that will be refactored soon.

	--yliu

^ permalink raw reply	[flat|nested] 17+ messages in thread

end of thread, other threads:[~2016-05-26  7:57 UTC | newest]

Thread overview: 17+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-05-05 18:11 [PATCH] vhost: add support for dynamic vhost PMD creation Ferruh Yigit
2016-05-09 21:31 ` Yuanhan Liu
2016-05-10 17:11   ` Ferruh Yigit
2016-05-18 17:10   ` [PATCH v2] " Ferruh Yigit
2016-05-19  8:33     ` Thomas Monjalon
2016-05-19 16:28       ` Ferruh Yigit
2016-05-19 16:44         ` Thomas Monjalon
2016-05-20  1:59           ` Yuanhan Liu
2016-05-20 10:37           ` Bruce Richardson
2016-05-20 12:03             ` Thomas Monjalon
2016-05-23 13:24             ` Yuanhan Liu
2016-05-23 17:06               ` Ferruh Yigit
2016-05-24  5:11                 ` Yuanhan Liu
2016-05-24  9:42                   ` Bruce Richardson
2016-05-25  4:41                     ` Yuanhan Liu
2016-05-25 11:54                       ` Thomas Monjalon
2016-05-26  7:58                         ` Yuanhan Liu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.