All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Jayatheerthan, Jay" <jay.jayatheerthan@intel.com>
To: "Naga Harish K, S V" <s.v.naga.harish.k@intel.com>,
	"jerinj@marvell.com" <jerinj@marvell.com>
Cc: "dev@dpdk.org" <dev@dpdk.org>
Subject: Re: [dpdk-dev] [PATCH v5 4/5] eventdev/rx_adapter: implement per queue event buffer
Date: Tue, 5 Oct 2021 07:55:55 +0000	[thread overview]
Message-ID: <BN6SPR00MB239F2E9351B1BA53EAFE189FDAF9@BN6SPR00MB239.namprd11.prod.outlook.com> (raw)
In-Reply-To: <20211004054108.3890018-4-s.v.naga.harish.k@intel.com>

> -----Original Message-----
> From: Naga Harish K, S V <s.v.naga.harish.k@intel.com>
> Sent: Monday, October 4, 2021 11:11 AM
> To: jerinj@marvell.com; Jayatheerthan, Jay <jay.jayatheerthan@intel.com>
> Cc: dev@dpdk.org
> Subject: [PATCH v5 4/5] eventdev/rx_adapter: implement per queue event buffer
> 
> this patch implement the per queue event buffer with
> required validations.
> 
> Signed-off-by: Naga Harish K S V <s.v.naga.harish.k@intel.com>
> ---
>  lib/eventdev/rte_event_eth_rx_adapter.c | 187 +++++++++++++++++-------
>  1 file changed, 138 insertions(+), 49 deletions(-)
> 
> diff --git a/lib/eventdev/rte_event_eth_rx_adapter.c b/lib/eventdev/rte_event_eth_rx_adapter.c
> index 606db241b8..b61af0e75e 100644
> --- a/lib/eventdev/rte_event_eth_rx_adapter.c
> +++ b/lib/eventdev/rte_event_eth_rx_adapter.c
> @@ -102,10 +102,12 @@ struct rte_event_eth_rx_adapter {
>  	uint8_t rss_key_be[RSS_KEY_SIZE];
>  	/* Event device identifier */
>  	uint8_t eventdev_id;
> -	/* Per ethernet device structure */
> -	struct eth_device_info *eth_devices;
>  	/* Event port identifier */
>  	uint8_t event_port_id;
> +	/* Flag indicating per rxq event buffer */
> +	bool use_queue_event_buf;
> +	/* Per ethernet device structure */
> +	struct eth_device_info *eth_devices;
>  	/* Lock to serialize config updates with service function */
>  	rte_spinlock_t rx_lock;
>  	/* Max mbufs processed in any service function invocation */
> @@ -241,6 +243,7 @@ struct eth_rx_queue_info {
>  	uint32_t flow_id_mask;	/* Set to ~0 if app provides flow id else 0 */
>  	uint64_t event;
>  	struct eth_rx_vector_data vector_data;
> +	struct rte_eth_event_enqueue_buffer *event_buf;
>  };
> 
>  static struct rte_event_eth_rx_adapter **event_eth_rx_adapter;
> @@ -767,10 +770,9 @@ rxa_enq_block_end_ts(struct rte_event_eth_rx_adapter *rx_adapter,
> 
>  /* Enqueue buffered events to event device */
>  static inline uint16_t
> -rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter)
> +rxa_flush_event_buffer(struct rte_event_eth_rx_adapter *rx_adapter,
> +		       struct rte_eth_event_enqueue_buffer *buf)
>  {
> -	struct rte_eth_event_enqueue_buffer *buf =
> -	    &rx_adapter->event_enqueue_buffer;
>  	struct rte_event_eth_rx_adapter_stats *stats = &rx_adapter->stats;
>  	uint16_t count = buf->last ? buf->last - buf->head : buf->count;
> 
> @@ -888,15 +890,14 @@ rxa_buffer_mbufs(struct rte_event_eth_rx_adapter *rx_adapter,
>  		uint16_t eth_dev_id,
>  		uint16_t rx_queue_id,
>  		struct rte_mbuf **mbufs,
> -		uint16_t num)
> +		uint16_t num,
> +		struct rte_eth_event_enqueue_buffer *buf)
>  {
>  	uint32_t i;
>  	struct eth_device_info *dev_info =
>  					&rx_adapter->eth_devices[eth_dev_id];
>  	struct eth_rx_queue_info *eth_rx_queue_info =
>  					&dev_info->rx_queue[rx_queue_id];
> -	struct rte_eth_event_enqueue_buffer *buf =
> -					&rx_adapter->event_enqueue_buffer;
>  	uint16_t new_tail = buf->tail;
>  	uint64_t event = eth_rx_queue_info->event;
>  	uint32_t flow_id_mask = eth_rx_queue_info->flow_id_mask;
> @@ -995,11 +996,10 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
>  	uint16_t queue_id,
>  	uint32_t rx_count,
>  	uint32_t max_rx,
> -	int *rxq_empty)
> +	int *rxq_empty,
> +	struct rte_eth_event_enqueue_buffer *buf)
>  {
>  	struct rte_mbuf *mbufs[BATCH_SIZE];
> -	struct rte_eth_event_enqueue_buffer *buf =
> -					&rx_adapter->event_enqueue_buffer;
>  	struct rte_event_eth_rx_adapter_stats *stats =
>  					&rx_adapter->stats;
>  	uint16_t n;
> @@ -1012,7 +1012,7 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
>  	 */
>  	while (rxa_pkt_buf_available(buf)) {
>  		if (buf->count >= BATCH_SIZE)
> -			rxa_flush_event_buffer(rx_adapter);
> +			rxa_flush_event_buffer(rx_adapter, buf);
> 
>  		stats->rx_poll_count++;
>  		n = rte_eth_rx_burst(port_id, queue_id, mbufs, BATCH_SIZE);
> @@ -1021,14 +1021,14 @@ rxa_eth_rx(struct rte_event_eth_rx_adapter *rx_adapter,
>  				*rxq_empty = 1;
>  			break;
>  		}
> -		rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n);
> +		rxa_buffer_mbufs(rx_adapter, port_id, queue_id, mbufs, n, buf);
>  		nb_rx += n;
>  		if (rx_count + nb_rx > max_rx)
>  			break;
>  	}
> 
>  	if (buf->count > 0)
> -		rxa_flush_event_buffer(rx_adapter);
> +		rxa_flush_event_buffer(rx_adapter, buf);
> 
>  	return nb_rx;
>  }
> @@ -1169,7 +1169,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
>  	ring_lock = &rx_adapter->intr_ring_lock;
> 
>  	if (buf->count >= BATCH_SIZE)
> -		rxa_flush_event_buffer(rx_adapter);
> +		rxa_flush_event_buffer(rx_adapter, buf);
> 
>  	while (rxa_pkt_buf_available(buf)) {
>  		struct eth_device_info *dev_info;
> @@ -1221,7 +1221,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
>  					continue;
>  				n = rxa_eth_rx(rx_adapter, port, i, nb_rx,
>  					rx_adapter->max_nb_rx,
> -					&rxq_empty);
> +					&rxq_empty, buf);
>  				nb_rx += n;
> 
>  				enq_buffer_full = !rxq_empty && n == 0;
> @@ -1242,7 +1242,7 @@ rxa_intr_ring_dequeue(struct rte_event_eth_rx_adapter *rx_adapter)
>  		} else {
>  			n = rxa_eth_rx(rx_adapter, port, queue, nb_rx,
>  				rx_adapter->max_nb_rx,
> -				&rxq_empty);
> +				&rxq_empty, buf);
>  			rx_adapter->qd_valid = !rxq_empty;
>  			nb_rx += n;
>  			if (nb_rx > rx_adapter->max_nb_rx)
> @@ -1273,13 +1273,12 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
>  {
>  	uint32_t num_queue;
>  	uint32_t nb_rx = 0;
> -	struct rte_eth_event_enqueue_buffer *buf;
> +	struct rte_eth_event_enqueue_buffer *buf = NULL;
>  	uint32_t wrr_pos;
>  	uint32_t max_nb_rx;
> 
>  	wrr_pos = rx_adapter->wrr_pos;
>  	max_nb_rx = rx_adapter->max_nb_rx;
> -	buf = &rx_adapter->event_enqueue_buffer;
> 
>  	/* Iterate through a WRR sequence */
>  	for (num_queue = 0; num_queue < rx_adapter->wrr_len; num_queue++) {
> @@ -1287,24 +1286,36 @@ rxa_poll(struct rte_event_eth_rx_adapter *rx_adapter)
>  		uint16_t qid = rx_adapter->eth_rx_poll[poll_idx].eth_rx_qid;
>  		uint16_t d = rx_adapter->eth_rx_poll[poll_idx].eth_dev_id;
> 
> +		if (rx_adapter->use_queue_event_buf) {
> +			struct eth_device_info *dev_info =
> +				&rx_adapter->eth_devices[d];
> +			buf = dev_info->rx_queue[qid].event_buf;
> +		} else
> +			buf = &rx_adapter->event_enqueue_buffer;
> +
>  		/* Don't do a batch dequeue from the rx queue if there isn't
>  		 * enough space in the enqueue buffer.
>  		 */
>  		if (buf->count >= BATCH_SIZE)
> -			rxa_flush_event_buffer(rx_adapter);
> +			rxa_flush_event_buffer(rx_adapter, buf);
>  		if (!rxa_pkt_buf_available(buf)) {
> -			rx_adapter->wrr_pos = wrr_pos;
> -			return nb_rx;
> +			if (rx_adapter->use_queue_event_buf)
> +				goto poll_next_entry;
> +			else {
> +				rx_adapter->wrr_pos = wrr_pos;
> +				return nb_rx;
> +			}
>  		}
> 
>  		nb_rx += rxa_eth_rx(rx_adapter, d, qid, nb_rx, max_nb_rx,
> -				NULL);
> +				NULL, buf);
>  		if (nb_rx > max_nb_rx) {
>  			rx_adapter->wrr_pos =
>  				    (wrr_pos + 1) % rx_adapter->wrr_len;
>  			break;
>  		}
> 
> +poll_next_entry:
>  		if (++wrr_pos == rx_adapter->wrr_len)
>  			wrr_pos = 0;
>  	}
> @@ -1315,12 +1326,18 @@ static void
>  rxa_vector_expire(struct eth_rx_vector_data *vec, void *arg)
>  {
>  	struct rte_event_eth_rx_adapter *rx_adapter = arg;
> -	struct rte_eth_event_enqueue_buffer *buf =
> -		&rx_adapter->event_enqueue_buffer;
> +	struct rte_eth_event_enqueue_buffer *buf = NULL;
>  	struct rte_event *ev;
> 
> +	if (rx_adapter->use_queue_event_buf) {
> +		struct eth_device_info *dev_info =
> +			&rx_adapter->eth_devices[vec->port];
> +		buf = dev_info->rx_queue[vec->queue].event_buf;
> +	} else
> +		buf = &rx_adapter->event_enqueue_buffer;
> +

The above code to get the buffer can be made an inline function since it is needed in more than one place.

>  	if (buf->count)
> -		rxa_flush_event_buffer(rx_adapter);
> +		rxa_flush_event_buffer(rx_adapter, buf);
> 
>  	if (vec->vector_ev->nb_elem == 0)
>  		return;
> @@ -1947,9 +1964,16 @@ rxa_sw_del(struct rte_event_eth_rx_adapter *rx_adapter,
>  	rx_adapter->num_rx_intr -= intrq;
>  	dev_info->nb_rx_intr -= intrq;
>  	dev_info->nb_shared_intr -= intrq && sintrq;
> +	if (rx_adapter->use_queue_event_buf) {
> +		struct rte_eth_event_enqueue_buffer *event_buf =
> +			dev_info->rx_queue[rx_queue_id].event_buf;
> +		rte_free(event_buf->events);
> +		rte_free(event_buf);
> +		dev_info->rx_queue[rx_queue_id].event_buf = NULL;
> +	}
>  }
> 
> -static void
> +static int
>  rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
>  	struct eth_device_info *dev_info,
>  	int32_t rx_queue_id,
> @@ -1961,15 +1985,21 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
>  	int intrq;
>  	int sintrq;
>  	struct rte_event *qi_ev;
> +	struct rte_eth_event_enqueue_buffer *new_rx_buf = NULL;
> +	uint16_t eth_dev_id = dev_info->dev->data->port_id;
> +	int ret;
> 
>  	if (rx_queue_id == -1) {
>  		uint16_t nb_rx_queues;
>  		uint16_t i;
> 
>  		nb_rx_queues = dev_info->dev->data->nb_rx_queues;
> -		for (i = 0; i <	nb_rx_queues; i++)
> -			rxa_add_queue(rx_adapter, dev_info, i, conf);
> -		return;
> +		for (i = 0; i <	nb_rx_queues; i++) {
> +			ret = rxa_add_queue(rx_adapter, dev_info, i, conf);
> +			if (ret)
> +				return ret;
> +		}
> +		return 0;
>  	}
> 
>  	pollq = rxa_polled_queue(dev_info, rx_queue_id);
> @@ -2032,6 +2062,37 @@ rxa_add_queue(struct rte_event_eth_rx_adapter *rx_adapter,
>  				dev_info->next_q_idx = 0;
>  		}
>  	}
> +
> +	if (!rx_adapter->use_queue_event_buf)
> +		return 0;
> +
> +	new_rx_buf = rte_zmalloc_socket("rx_buffer_meta",
> +				sizeof(*new_rx_buf), 0,
> +				rte_eth_dev_socket_id(eth_dev_id));
> +	if (new_rx_buf == NULL) {
> +		RTE_EDEV_LOG_ERR("Failed to allocate event buffer meta for "
> +				 "dev_id: %d queue_id: %d",
> +				 eth_dev_id, rx_queue_id);
> +		return -ENOMEM;
> +	}
> +
> +	new_rx_buf->events_size = RTE_ALIGN(conf->event_buf_size, BATCH_SIZE);
> +	new_rx_buf->events_size += (2 * BATCH_SIZE);
> +	new_rx_buf->events = rte_zmalloc_socket("rx_buffer",
> +				sizeof(struct rte_event) *
> +				new_rx_buf->events_size, 0,
> +				rte_eth_dev_socket_id(eth_dev_id));
> +	if (new_rx_buf->events == NULL) {
> +		rte_free(new_rx_buf);
> +		RTE_EDEV_LOG_ERR("Failed to allocate event buffer for "
> +				 "dev_id: %d queue_id: %d",
> +				 eth_dev_id, rx_queue_id);
> +		return -ENOMEM;
> +	}
> +
> +	queue_info->event_buf = new_rx_buf;
> +
> +	return 0;
>  }
> 
>  static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
> @@ -2060,6 +2121,16 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
>  			temp_conf.servicing_weight = 1;
>  		}
>  		queue_conf = &temp_conf;
> +
> +		if (queue_conf->servicing_weight == 0 &&
> +		    rx_adapter->use_queue_event_buf) {
> +
> +			RTE_EDEV_LOG_ERR("Use of queue level event buffer "
> +					 "not supported for interrupt queues "
> +					 "dev_id: %d queue_id: %d",
> +					 eth_dev_id, rx_queue_id);
> +			return -EINVAL;
> +		}
>  	}
> 
>  	nb_rx_queues = dev_info->dev->data->nb_rx_queues;
> @@ -2139,7 +2210,9 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
> 
> 
> 
> -	rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
> +	ret = rxa_add_queue(rx_adapter, dev_info, rx_queue_id, queue_conf);
> +	if (ret)
> +		goto err_free_rxqueue;
>  	rxa_calc_wrr_sequence(rx_adapter, rx_poll, rx_wrr);
> 
>  	rte_free(rx_adapter->eth_rx_poll);
> @@ -2160,7 +2233,7 @@ static int rxa_sw_add(struct rte_event_eth_rx_adapter *rx_adapter,
>  	rte_free(rx_poll);
>  	rte_free(rx_wrr);
> 
> -	return 0;
> +	return ret;
>  }
> 
>  static int
> @@ -2286,20 +2359,26 @@ rxa_create(uint8_t id, uint8_t dev_id,
>  		rx_adapter->eth_devices[i].dev = &rte_eth_devices[i];
> 
>  	/* Rx adapter event buffer allocation */
> -	buf = &rx_adapter->event_enqueue_buffer;
> -	buf->events_size = RTE_ALIGN(rxa_params->event_buf_size, BATCH_SIZE);
> -
> -	events = rte_zmalloc_socket(rx_adapter->mem_name,
> -				    buf->events_size * sizeof(*events),
> -				    0, socket_id);
> -	if (events == NULL) {
> -		RTE_EDEV_LOG_ERR("Failed to allocate mem for event buffer\n");
> -		rte_free(rx_adapter->eth_devices);
> -		rte_free(rx_adapter);
> -		return -ENOMEM;
> -	}
> +	rx_adapter->use_queue_event_buf = rxa_params->use_queue_event_buf;
> +
> +	if (!rx_adapter->use_queue_event_buf) {
> +		buf = &rx_adapter->event_enqueue_buffer;
> +		buf->events_size = RTE_ALIGN(rxa_params->event_buf_size,
> +					     BATCH_SIZE);
> +
> +		events = rte_zmalloc_socket(rx_adapter->mem_name,
> +					    buf->events_size * sizeof(*events),
> +					    0, socket_id);
> +		if (events == NULL) {
> +			RTE_EDEV_LOG_ERR("Failed to allocate memory "
> +					 "for adapter event buffer");
> +			rte_free(rx_adapter->eth_devices);
> +			rte_free(rx_adapter);
> +			return -ENOMEM;
> +		}
> 
> -	rx_adapter->event_enqueue_buffer.events = events;
> +		rx_adapter->event_enqueue_buffer.events = events;
> +	}
> 
>  	event_eth_rx_adapter[id] = rx_adapter;
> 
> @@ -2327,6 +2406,7 @@ rte_event_eth_rx_adapter_create_ext(uint8_t id, uint8_t dev_id,
> 
>  	/* use default values for adapter params */
>  	rxa_params.event_buf_size = ETH_EVENT_BUFFER_SIZE;
> +	rxa_params.use_queue_event_buf = false;
> 
>  	return rxa_create(id, dev_id, &rxa_params, conf_cb, conf_arg);
>  }
> @@ -2347,9 +2427,9 @@ rte_event_eth_rx_adapter_create_with_params(uint8_t id, uint8_t dev_id,
>  	if (rxa_params == NULL) {
>  		rxa_params = &temp_params;
>  		rxa_params->event_buf_size = ETH_EVENT_BUFFER_SIZE;
> -	}
> -
> -	if (rxa_params->event_buf_size == 0)
> +		rxa_params->use_queue_event_buf = false;
> +	} else if ((!rxa_params->use_queue_event_buf &&
> +		    rxa_params->event_buf_size == 0))
>  		return -EINVAL;
> 
>  	pc = rte_malloc(NULL, sizeof(*pc), 0);
> @@ -2418,7 +2498,8 @@ rte_event_eth_rx_adapter_free(uint8_t id)
>  	if (rx_adapter->default_cb_arg)
>  		rte_free(rx_adapter->conf_arg);
>  	rte_free(rx_adapter->eth_devices);
> -	rte_free(rx_adapter->event_enqueue_buffer.events);
> +	if (!rx_adapter->use_queue_event_buf)
> +		rte_free(rx_adapter->event_enqueue_buffer.events);
>  	rte_free(rx_adapter);
>  	event_eth_rx_adapter[id] = NULL;
> 
> @@ -2522,6 +2603,14 @@ rte_event_eth_rx_adapter_queue_add(uint8_t id,
>  		return -EINVAL;
>  	}
> 
> +	if ((rx_adapter->use_queue_event_buf &&
> +	     queue_conf->event_buf_size == 0) ||
> +	    (!rx_adapter->use_queue_event_buf &&
> +	     queue_conf->event_buf_size != 0)) {
> +		RTE_EDEV_LOG_ERR("Invalid Event buffer size for the queue");
> +		return -EINVAL;
> +	}
> +

Another error case is configuring both - rx_adapter->use_queue_event_buf = true and queue_conf->event_buf_size != 0.

>  	dev_info = &rx_adapter->eth_devices[eth_dev_id];
> 
>  	if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT) {
> --
> 2.25.1


  reply	other threads:[~2021-10-05  7:56 UTC|newest]

Thread overview: 81+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-09-18 13:11 [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support to configure event buffer size Naga Harish K S V
2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 2/5] test/event: add unit test for rte_event_eth_rx_adapter_create2 api Naga Harish K S V
2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 3/5] eventdev:rx_adapter:add per queue event buffer configure support Naga Harish K S V
2021-09-20  6:23   ` Jerin Jacob
2021-09-21 13:46     ` Naga Harish K, S V
2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 4/5] eventdev:rx_adapter: implement per queue event buffer Naga Harish K S V
2021-09-18 13:11 ` [dpdk-dev] [PATCH v1 5/5] test/eventdev: add per rx queue event buffer unit Naga Harish K S V
2021-09-20  6:20 ` [dpdk-dev] [PATCH v1 1/5] eventdev: rx_adapter: add support to configure event buffer size Jerin Jacob
2021-09-21 13:45   ` Naga Harish K, S V
2021-09-21  9:21 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: " Naga Harish K S V
2021-09-21  9:21   ` [dpdk-dev] [PATCH v2 2/5] test/event: add unit test for event buffer size config api Naga Harish K S V
2021-09-21  9:21   ` [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per queue event buffer configure support Naga Harish K S V
2021-09-21 16:24     ` Jerin Jacob
2021-09-22 15:21       ` Naga Harish K, S V
2021-09-21 20:32     ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2021-09-22 15:39       ` Naga Harish K, S V
2021-09-21  9:21   ` [dpdk-dev] [PATCH v2 4/5] eventdev/rx_adapter: implement per queue event buffer Naga Harish K S V
2021-09-21  9:21   ` [dpdk-dev] [PATCH v2 5/5] test/eventdev: add per rx queue event buffer unit Naga Harish K S V
2021-09-21  9:45 ` [dpdk-dev] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size Naga Harish K S V
2021-09-21  9:45   ` [dpdk-dev] [PATCH v2 2/5] test/event: add unit test for event buffer size config api Naga Harish K S V
2021-09-21 20:28     ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2021-09-22 15:37       ` Naga Harish K, S V
2021-09-21  9:45   ` [dpdk-dev] [PATCH v2 3/5] eventdev/rx_adapter:add per queue event buffer configure support Naga Harish K S V
2021-09-21  9:45   ` [dpdk-dev] [PATCH v2 4/5] eventdev/rx_adapter: implement per queue event buffer Naga Harish K S V
2021-09-21 20:51     ` [dpdk-dev] [EXT] " Pavan Nikhilesh Bhagavatula
2021-09-22 15:45       ` Naga Harish K, S V
2021-09-21  9:45   ` [dpdk-dev] [PATCH v2 5/5] test/eventdev: add per rx queue event buffer unit Naga Harish K S V
2021-09-21 20:24   ` [dpdk-dev] [EXT] [PATCH v2 1/5] eventdev/rx_adapter: add support to configure event buffer size Pavan Nikhilesh Bhagavatula
2021-09-22 15:36     ` Naga Harish K, S V
2021-09-22 15:13   ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V
2021-09-22 15:13     ` [dpdk-dev] [PATCH v3 2/5] test/event: add unit test for Rx adapter Naga Harish K S V
2021-09-22 15:13     ` [dpdk-dev] [PATCH v3 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V
2021-09-22 15:13     ` [dpdk-dev] [PATCH v3 4/5] eventdev/rx_adapter: implement " Naga Harish K S V
2021-09-22 15:14     ` [dpdk-dev] [PATCH v3 5/5] test/event: add unit test for Rx adapter Naga Harish K S V
2021-09-29  5:16     ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Jerin Jacob
2021-09-30  8:34       ` Jayatheerthan, Jay
2021-09-30  8:28     ` [dpdk-dev] [PATCH v4 " Naga Harish K S V
2021-09-30  8:28       ` [dpdk-dev] [PATCH v4 2/5] test/event: add unit test for Rx adapter Naga Harish K S V
2021-09-30  8:28       ` [dpdk-dev] [PATCH v4 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V
2021-09-30  8:29       ` [dpdk-dev] [PATCH v4 4/5] eventdev/rx_adapter: implement " Naga Harish K S V
2021-09-30  8:29       ` [dpdk-dev] [PATCH v4 5/5] test/event: add unit test for Rx adapter Naga Harish K S V
2021-10-04  5:41       ` [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V
2021-10-04  5:41         ` [dpdk-dev] [PATCH v5 2/5] test/event: add unit test for Rx adapter Naga Harish K S V
2021-10-05  7:36           ` Jayatheerthan, Jay
2021-10-04  5:41         ` [dpdk-dev] [PATCH v5 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V
2021-10-05  7:39           ` Jayatheerthan, Jay
2021-10-04  5:41         ` [dpdk-dev] [PATCH v5 4/5] eventdev/rx_adapter: implement " Naga Harish K S V
2021-10-05  7:55           ` Jayatheerthan, Jay [this message]
2021-10-05 14:47             ` Naga Harish K, S V
2021-10-05 15:01               ` Jayatheerthan, Jay
2021-10-06  4:06                 ` Naga Harish K, S V
2021-10-04  5:41         ` [dpdk-dev] [PATCH v5 5/5] test/event: add unit test for Rx adapter Naga Harish K S V
2021-10-05  8:03           ` Jayatheerthan, Jay
2021-10-05  7:19         ` [dpdk-dev] [PATCH v5 1/5] eventdev/rx_adapter: add event buffer size configurability Jayatheerthan, Jay
2021-10-05 14:45           ` Naga Harish K, S V
2021-10-05 14:38         ` [dpdk-dev] [PATCH v6 " Naga Harish K S V
2021-10-05 14:38           ` [dpdk-dev] [PATCH v6 2/5] test/event: add unit test for Rx adapter Naga Harish K S V
2021-10-05 14:38           ` [dpdk-dev] [PATCH v6 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V
2021-10-05 14:38           ` [dpdk-dev] [PATCH v6 4/5] eventdev/rx_adapter: implement " Naga Harish K S V
2021-10-05 14:38           ` [dpdk-dev] [PATCH v6 5/5] test/event: add unit test for Rx adapter Naga Harish K S V
2021-10-06  4:02           ` [dpdk-dev] [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V
2021-10-06  4:02             ` [dpdk-dev] [PATCH v7 2/5] test/event: add unit test for Rx adapter Naga Harish K S V
2021-10-06  4:02             ` [dpdk-dev] [PATCH v7 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V
2021-10-06  4:02             ` [dpdk-dev] [PATCH v7 4/5] eventdev/rx_adapter: implement " Naga Harish K S V
2021-10-06  6:42               ` Jayatheerthan, Jay
2021-10-06  7:53                 ` Naga Harish K, S V
2021-10-06  4:02             ` [dpdk-dev] [PATCH v7 5/5] test/event: add unit test for Rx adapter Naga Harish K S V
2021-10-06  6:19             ` [dpdk-dev] [PATCH v7 1/5] eventdev/rx_adapter: add event buffer size configurability Jayatheerthan, Jay
2021-10-06  7:47             ` [dpdk-dev] [PATCH v8 " Naga Harish K S V
2021-10-06  7:47               ` [dpdk-dev] [PATCH v8 2/5] test/event: add unit test for Rx adapter Naga Harish K S V
2021-10-06  7:47               ` [dpdk-dev] [PATCH v8 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V
2021-10-06  7:47               ` [dpdk-dev] [PATCH v8 4/5] eventdev/rx_adapter: implement " Naga Harish K S V
2021-10-06  7:47               ` [dpdk-dev] [PATCH v8 5/5] test/event: add unit test for Rx adapter Naga Harish K S V
2021-10-06  7:55             ` [dpdk-dev] [PATCH v8 1/5] eventdev/rx_adapter: add event buffer size configurability Naga Harish K S V
2021-10-06  7:55               ` [dpdk-dev] [PATCH v8 2/5] test/event: add unit test for Rx adapter Naga Harish K S V
2021-10-06  7:55               ` [dpdk-dev] [PATCH v8 3/5] eventdev/rx_adapter: introduce per queue event buffer Naga Harish K S V
2021-10-06  7:55               ` [dpdk-dev] [PATCH v8 4/5] eventdev/rx_adapter: implement " Naga Harish K S V
2021-10-06  9:11                 ` Jayatheerthan, Jay
2021-10-06  7:55               ` [dpdk-dev] [PATCH v8 5/5] test/event: add unit test for Rx adapter Naga Harish K S V
2021-10-07  8:52                 ` Jerin Jacob
2021-10-07  8:51     ` [dpdk-dev] [PATCH v3 1/5] eventdev/rx_adapter: add event buffer size configurability Jerin Jacob

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=BN6SPR00MB239F2E9351B1BA53EAFE189FDAF9@BN6SPR00MB239.namprd11.prod.outlook.com \
    --to=jay.jayatheerthan@intel.com \
    --cc=dev@dpdk.org \
    --cc=jerinj@marvell.com \
    --cc=s.v.naga.harish.k@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.