All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/5] net/dpaa2: support more than 16 burst size in Rx func
@ 2018-01-15 11:38 Hemant Agrawal
  2018-01-15 11:38 ` [PATCH 2/5] net/dpaa2: optimize Rx/Tx path Hemant Agrawal
                   ` (4 more replies)
  0 siblings, 5 replies; 6+ messages in thread
From: Hemant Agrawal @ 2018-01-15 11:38 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit

This patch enhances the Rx function to support more than
16 burst size.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/bus/fslmc/portal/dpaa2_hw_pvt.h |   5 +-
 drivers/net/dpaa2/dpaa2_rxtx.c          | 140 ++++++++++++++++++++++++--------
 2 files changed, 108 insertions(+), 37 deletions(-)

diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
index 2e79399..97e61bb 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_pvt.h
@@ -95,8 +95,9 @@ struct dpaa2_dpbp_dev {
 struct queue_storage_info_t {
 	struct qbman_result *dq_storage[NUM_DQS_PER_QUEUE];
 	struct qbman_result *active_dqs;
-	int active_dpio_id;
-	int toggle;
+	uint8_t active_dpio_id;
+	uint8_t toggle;
+	uint8_t last_num_pkts;
 };
 
 struct dpaa2_queue;
diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index 53466c3..efad728 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -495,12 +495,12 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 {
 	/* Function receive frames for a given device and VQ*/
 	struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
-	struct qbman_result *dq_storage;
+	struct qbman_result *dq_storage, *dq_storage1 = 0;
 	uint32_t fqid = dpaa2_q->fqid;
-	int ret, num_rx = 0;
-	uint8_t is_last = 0, status;
+	int ret, num_rx = 0, next_pull = 0, num_pulled, num_to_pull;
+	uint8_t pending, is_repeat, status;
 	struct qbman_swp *swp;
-	const struct qbman_fd *fd[DPAA2_DQRR_RING_SIZE], *next_fd;
+	const struct qbman_fd *fd, *next_fd;
 	struct qbman_pull_desc pulldesc;
 	struct queue_storage_info_t *q_storage = dpaa2_q->q_storage;
 	struct rte_eth_dev *dev = dpaa2_q->dev;
@@ -513,37 +513,51 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		}
 	}
 	swp = DPAA2_PER_LCORE_PORTAL;
-	if (!q_storage->active_dqs) {
+
+	/* if the original request for this q was from another portal */
+	if (unlikely(DPAA2_PER_LCORE_DPIO->index !=
+		q_storage->active_dpio_id)) {
+		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
+			while (!qbman_check_command_complete(get_swp_active_dqs
+				(DPAA2_PER_LCORE_DPIO->index)))
+				;
+			clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+		}
+		q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
+	}
+
+	if (unlikely(!q_storage->active_dqs)) {
 		q_storage->toggle = 0;
 		dq_storage = q_storage->dq_storage[q_storage->toggle];
+		q_storage->last_num_pkts = (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
+					       DPAA2_DQRR_RING_SIZE : nb_pkts;
 		qbman_pull_desc_clear(&pulldesc);
 		qbman_pull_desc_set_numframes(&pulldesc,
-					      (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
-					       DPAA2_DQRR_RING_SIZE : nb_pkts);
+					      q_storage->last_num_pkts);
 		qbman_pull_desc_set_fq(&pulldesc, fqid);
 		qbman_pull_desc_set_storage(&pulldesc, dq_storage,
 			(dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
-		if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
-			while (!qbman_check_command_complete(
-			       get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
-				;
-			clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
-		}
 		while (1) {
 			if (qbman_swp_pull(swp, &pulldesc)) {
-				PMD_RX_LOG(WARNING, "VDQ command is not issued."
-					   "QBMAN is busy\n");
+				PMD_RX_LOG(WARNING,
+					"VDQ command not issued.QBMAN busy\n");
 				/* Portal was busy, try again */
 				continue;
 			}
 			break;
 		}
 		q_storage->active_dqs = dq_storage;
-		q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 		set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
 	}
+
+	/* pkt to pull in current pull request */
+	num_to_pull = q_storage->last_num_pkts;
+
+	/* Number of packet requested is more than current pull request */
+	if (nb_pkts > num_to_pull)
+		next_pull = nb_pkts - num_to_pull;
+
 	dq_storage = q_storage->active_dqs;
-	rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
 	/* Check if the previous issued command is completed.
 	 * Also seems like the SWP is shared between the Ethernet Driver
 	 * and the SEC driver.
@@ -552,7 +566,49 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		;
 	if (dq_storage == get_swp_active_dqs(q_storage->active_dpio_id))
 		clear_swp_active_dqs(q_storage->active_dpio_id);
-	while (!is_last) {
+
+repeat:
+	is_repeat = 0;
+
+	/* issue the deq command one more time to get another set of packets */
+	if (next_pull) {
+		q_storage->toggle ^= 1;
+		dq_storage1 = q_storage->dq_storage[q_storage->toggle];
+		qbman_pull_desc_clear(&pulldesc);
+
+		if (next_pull > DPAA2_DQRR_RING_SIZE) {
+			qbman_pull_desc_set_numframes(&pulldesc,
+					DPAA2_DQRR_RING_SIZE);
+			next_pull = next_pull - DPAA2_DQRR_RING_SIZE;
+			q_storage->last_num_pkts = DPAA2_DQRR_RING_SIZE;
+		} else {
+			qbman_pull_desc_set_numframes(&pulldesc, next_pull);
+			q_storage->last_num_pkts = next_pull;
+			next_pull = 0;
+		}
+		qbman_pull_desc_set_fq(&pulldesc, fqid);
+		qbman_pull_desc_set_storage(&pulldesc, dq_storage1,
+			(dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage1)), 1);
+		while (1) {
+			if (qbman_swp_pull(swp, &pulldesc)) {
+				PMD_RX_LOG(WARNING,
+					"VDQ command not issued.QBMAN busy\n");
+				/* Portal was busy, try again */
+				continue;
+			}
+			break;
+		}
+		is_repeat = 1;
+		q_storage->active_dqs = dq_storage1;
+		set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage1);
+	}
+
+	rte_prefetch0((void *)((uint64_t)(dq_storage + 1)));
+
+	num_pulled = 0;
+	pending = 1;
+
+	do {
 		/* Loop until the dq_storage is updated with
 		 * new token by QBMAN
 		 */
@@ -563,23 +619,23 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		 * setting Condition for Loop termination
 		 */
 		if (qbman_result_DQ_is_pull_complete(dq_storage)) {
-			is_last = 1;
+			pending = 0;
 			/* Check for valid frame. */
-			status = (uint8_t)qbman_result_DQ_flags(dq_storage);
+			status = qbman_result_DQ_flags(dq_storage);
 			if (unlikely((status & QBMAN_DQ_STAT_VALIDFRAME) == 0))
 				continue;
 		}
-		fd[num_rx] = qbman_result_DQ_fd(dq_storage);
+		fd = qbman_result_DQ_fd(dq_storage);
 
 		next_fd = qbman_result_DQ_fd(dq_storage + 1);
 		/* Prefetch Annotation address for the parse results */
-		rte_prefetch0((void *)((uint64_t)DPAA2_GET_FD_ADDR(next_fd)
+		rte_prefetch0((void *)(DPAA2_GET_FD_ADDR(next_fd)
 				+ DPAA2_FD_PTA_SIZE + 16));
 
-		if (unlikely(DPAA2_FD_GET_FORMAT(fd[num_rx]) == qbman_fd_sg))
-			bufs[num_rx] = eth_sg_fd_to_mbuf(fd[num_rx]);
+		if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
+			bufs[num_rx] = eth_sg_fd_to_mbuf(fd);
 		else
-			bufs[num_rx] = eth_fd_to_mbuf(fd[num_rx]);
+			bufs[num_rx] = eth_fd_to_mbuf(fd);
 		bufs[num_rx]->port = dev->data->port_id;
 
 		if (dev->data->dev_conf.rxmode.hw_vlan_strip)
@@ -587,22 +643,37 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 
 		dq_storage++;
 		num_rx++;
-	}
+		num_pulled++;
+	} while (pending);
 
-	if (check_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)) {
-		while (!qbman_check_command_complete(
-		       get_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index)))
-			;
-		clear_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index);
+	/* Another VDQ request pending and this request returned full */
+	if (is_repeat) {
+		/* all packets pulled from this pull request */
+		if (num_pulled == num_to_pull)  {
+			/* pkt to pull in current pull request */
+			num_to_pull = q_storage->last_num_pkts;
+
+			dq_storage = dq_storage1;
+
+			while (!qbman_check_command_complete(dq_storage))
+				;
+			goto repeat;
+		} else {
+			/* if this request did not returned all pkts */
+			goto next_time;
+		}
 	}
+
 	q_storage->toggle ^= 1;
 	dq_storage = q_storage->dq_storage[q_storage->toggle];
+	q_storage->last_num_pkts = (nb_pkts > DPAA2_DQRR_RING_SIZE) ?
+				       DPAA2_DQRR_RING_SIZE : nb_pkts;
 	qbman_pull_desc_clear(&pulldesc);
-	qbman_pull_desc_set_numframes(&pulldesc, DPAA2_DQRR_RING_SIZE);
+	qbman_pull_desc_set_numframes(&pulldesc, q_storage->last_num_pkts);
 	qbman_pull_desc_set_fq(&pulldesc, fqid);
 	qbman_pull_desc_set_storage(&pulldesc, dq_storage,
 			(dma_addr_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
-	/* Issue a volatile dequeue command. */
+	/* issue a volatile dequeue command for next pull */
 	while (1) {
 		if (qbman_swp_pull(swp, &pulldesc)) {
 			PMD_RX_LOG(WARNING, "VDQ command is not issued."
@@ -612,12 +683,11 @@ dpaa2_dev_prefetch_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		break;
 	}
 	q_storage->active_dqs = dq_storage;
-	q_storage->active_dpio_id = DPAA2_PER_LCORE_DPIO->index;
 	set_swp_active_dqs(DPAA2_PER_LCORE_DPIO->index, dq_storage);
 
+next_time:
 	dpaa2_q->rx_pkts += num_rx;
 
-	/* Return the total number of packets received to DPAA2 app */
 	return num_rx;
 }
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/5] net/dpaa2: optimize Rx/Tx path
  2018-01-15 11:38 [PATCH 1/5] net/dpaa2: support more than 16 burst size in Rx func Hemant Agrawal
@ 2018-01-15 11:38 ` Hemant Agrawal
  2018-01-15 11:38 ` [PATCH 3/5] net/dpaa2: change vlan filter rule to be called on config Hemant Agrawal
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Hemant Agrawal @ 2018-01-15 11:38 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit

Merge the offload with parse function to save on instructions.

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/net/dpaa2/dpaa2_rxtx.c | 57 +++++++++++++-----------------------------
 1 file changed, 17 insertions(+), 40 deletions(-)

diff --git a/drivers/net/dpaa2/dpaa2_rxtx.c b/drivers/net/dpaa2/dpaa2_rxtx.c
index efad728..89b7c1a 100644
--- a/drivers/net/dpaa2/dpaa2_rxtx.c
+++ b/drivers/net/dpaa2/dpaa2_rxtx.c
@@ -164,15 +164,24 @@ dpaa2_dev_rx_parse_slow(uint64_t hw_annot_addr)
 	return pkt_type;
 }
 
-
 static inline uint32_t __attribute__((hot))
-dpaa2_dev_rx_parse(uint64_t hw_annot_addr)
+dpaa2_dev_rx_parse(struct rte_mbuf *mbuf, uint64_t hw_annot_addr)
 {
 	struct dpaa2_annot_hdr *annotation =
 			(struct dpaa2_annot_hdr *)hw_annot_addr;
 
 	PMD_RX_LOG(DEBUG, "annotation = 0x%lx   ", annotation->word4);
 
+	/* Check offloads first */
+	if (BIT_ISSET_AT_POS(annotation->word3,
+			     L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
+		mbuf->ol_flags |= PKT_RX_VLAN;
+
+	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
+		mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+	else if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
+		mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+
 	/* Return some common types from parse processing */
 	switch (annotation->word4) {
 	case DPAA2_L3_IPv4:
@@ -199,23 +208,6 @@ dpaa2_dev_rx_parse(uint64_t hw_annot_addr)
 	return dpaa2_dev_rx_parse_slow(hw_annot_addr);
 }
 
-static inline void __attribute__((hot))
-dpaa2_dev_rx_offload(uint64_t hw_annot_addr, struct rte_mbuf *mbuf)
-{
-	struct dpaa2_annot_hdr *annotation =
-		(struct dpaa2_annot_hdr *)hw_annot_addr;
-
-	if (BIT_ISSET_AT_POS(annotation->word3,
-			     L2_VLAN_1_PRESENT | L2_VLAN_N_PRESENT))
-		mbuf->ol_flags |= PKT_RX_VLAN;
-
-	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L3CE))
-		mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
-
-	if (BIT_ISSET_AT_POS(annotation->word8, DPAA2_ETH_FAS_L4CE))
-		mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
-}
-
 static inline struct rte_mbuf *__attribute__((hot))
 eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
 {
@@ -247,14 +239,11 @@ eth_sg_fd_to_mbuf(const struct qbman_fd *fd)
 	if (dpaa2_svr_family == SVR_LX2160A)
 		dpaa2_dev_rx_parse_frc(first_seg,
 				DPAA2_GET_FD_FRC_PARSE_SUM(fd));
-	else {
-		first_seg->packet_type = dpaa2_dev_rx_parse(
+	else
+		first_seg->packet_type = dpaa2_dev_rx_parse(first_seg,
 			 (uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
 			 + DPAA2_FD_PTA_SIZE);
-		dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
-			DPAA2_GET_FD_ADDR(fd)) +
-			DPAA2_FD_PTA_SIZE, first_seg);
-	}
+
 	rte_mbuf_refcnt_set(first_seg, 1);
 	cur_seg = first_seg;
 	while (!DPAA2_SG_IS_FINAL(sge)) {
@@ -306,14 +295,10 @@ eth_fd_to_mbuf(const struct qbman_fd *fd)
 
 	if (dpaa2_svr_family == SVR_LX2160A)
 		dpaa2_dev_rx_parse_frc(mbuf, DPAA2_GET_FD_FRC_PARSE_SUM(fd));
-	else {
-		mbuf->packet_type = dpaa2_dev_rx_parse(
+	else
+		mbuf->packet_type = dpaa2_dev_rx_parse(mbuf,
 			(uint64_t)DPAA2_IOVA_TO_VADDR(DPAA2_GET_FD_ADDR(fd))
 			 + DPAA2_FD_PTA_SIZE);
-		dpaa2_dev_rx_offload((uint64_t)DPAA2_IOVA_TO_VADDR(
-			     DPAA2_GET_FD_ADDR(fd)) +
-			     DPAA2_FD_PTA_SIZE, mbuf);
-	}
 
 	PMD_RX_LOG(DEBUG, "to mbuf - mbuf =%p, mbuf->buf_addr =%p, off = %d,"
 		"fd_off=%d fd =%lx, meta = %d  bpid =%d, len=%d\n",
@@ -333,10 +318,6 @@ eth_mbuf_to_sg_fd(struct rte_mbuf *mbuf,
 	struct qbman_sge *sgt, *sge = NULL;
 	int i;
 
-	/* First Prepare FD to be transmited*/
-	/* Resetting the buffer pool id and offset field*/
-	fd->simple.bpid_offset = 0;
-
 	if (unlikely(mbuf->ol_flags & PKT_TX_VLAN_PKT)) {
 		int ret = rte_vlan_insert(&mbuf);
 		if (ret)
@@ -415,8 +396,6 @@ eth_mbuf_to_fd(struct rte_mbuf *mbuf,
 			return;
 		}
 	}
-	/*Resetting the buffer pool id and offset field*/
-	fd->simple.bpid_offset = 0;
 
 	DPAA2_MBUF_TO_CONTIG_FD(mbuf, fd, bpid);
 
@@ -472,9 +451,6 @@ eth_copy_mbuf_to_fd(struct rte_mbuf *mbuf,
 	m->packet_type = mbuf->packet_type;
 	m->tx_offload = mbuf->tx_offload;
 
-	/*Resetting the buffer pool id and offset field*/
-	fd->simple.bpid_offset = 0;
-
 	DPAA2_MBUF_TO_CONTIG_FD(m, fd, bpid);
 
 	PMD_TX_LOG(DEBUG, " mbuf %p BMAN buf addr %p",
@@ -764,6 +740,7 @@ dpaa2_dev_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
 		frames_to_send = (nb_pkts >> 3) ? MAX_TX_RING_SLOTS : nb_pkts;
 
 		for (loop = 0; loop < frames_to_send; loop++) {
+			fd_arr[loop].simple.bpid_offset = 0;
 			fd_arr[loop].simple.frc = 0;
 			DPAA2_RESET_FD_CTRL((&fd_arr[loop]));
 			DPAA2_SET_FD_FLC((&fd_arr[loop]), NULL);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 3/5] net/dpaa2: change vlan filter rule to be called on config
  2018-01-15 11:38 [PATCH 1/5] net/dpaa2: support more than 16 burst size in Rx func Hemant Agrawal
  2018-01-15 11:38 ` [PATCH 2/5] net/dpaa2: optimize Rx/Tx path Hemant Agrawal
@ 2018-01-15 11:38 ` Hemant Agrawal
  2018-01-15 11:38 ` [PATCH 4/5] net/dpaa2: use HASH FLCTYPE only for LX2 Hemant Agrawal
                   ` (2 subsequent siblings)
  4 siblings, 0 replies; 6+ messages in thread
From: Hemant Agrawal @ 2018-01-15 11:38 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/net/dpaa2/dpaa2_ethdev.c | 21 ++++++++++-----------
 1 file changed, 10 insertions(+), 11 deletions(-)

diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 821c862..0763033 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -147,6 +147,12 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 	PMD_INIT_FUNC_TRACE();
 
 	if (mask & ETH_VLAN_FILTER_MASK) {
+		/* VLAN Filter not avaialble */
+		if (!priv->max_vlan_filters) {
+			RTE_LOG(INFO, PMD, "VLAN filter not available\n");
+			goto next_mask;
+		}
+
 		if (dev->data->dev_conf.rxmode.hw_vlan_filter)
 			ret = dpni_enable_vlan_filter(dpni, CMD_PRI_LOW,
 						      priv->token, true);
@@ -157,7 +163,7 @@ dpaa2_vlan_offload_set(struct rte_eth_dev *dev, int mask)
 			RTE_LOG(ERR, PMD, "Unable to set vlan filter = %d\n",
 				ret);
 	}
-
+next_mask:
 	if (mask & ETH_VLAN_EXTEND_MASK) {
 		if (dev->data->dev_conf.rxmode.hw_vlan_extend)
 			RTE_LOG(INFO, PMD,
@@ -374,6 +380,9 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		return ret;
 	}
 
+	if (eth_conf->rxmode.hw_vlan_filter)
+		dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
+
 	/* update the current status */
 	dpaa2_dev_link_update(dev, 0);
 
@@ -764,16 +773,6 @@ dpaa2_dev_start(struct rte_eth_dev *dev)
 			     "code = %d\n", ret);
 		return ret;
 	}
-	/* VLAN Offload Settings */
-	if (priv->max_vlan_filters) {
-		ret = dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
-		if (ret) {
-			PMD_INIT_LOG(ERR, "Error to dpaa2_vlan_offload_set:"
-				     "code = %d\n", ret);
-			return ret;
-		}
-	}
-
 
 	/* if the interrupts were configured on this devices*/
 	if (intr_handle && (intr_handle->fd) &&
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 4/5] net/dpaa2: use HASH FLCTYPE only for LX2
  2018-01-15 11:38 [PATCH 1/5] net/dpaa2: support more than 16 burst size in Rx func Hemant Agrawal
  2018-01-15 11:38 ` [PATCH 2/5] net/dpaa2: optimize Rx/Tx path Hemant Agrawal
  2018-01-15 11:38 ` [PATCH 3/5] net/dpaa2: change vlan filter rule to be called on config Hemant Agrawal
@ 2018-01-15 11:38 ` Hemant Agrawal
  2018-01-15 11:38 ` [PATCH 5/5] bus/fslmc: disable eventdev config with no dpaa2 eventdev Hemant Agrawal
  2018-01-17 19:11 ` [PATCH 1/5] net/dpaa2: support more than 16 burst size in Rx func Ferruh Yigit
  4 siblings, 0 replies; 6+ messages in thread
From: Hemant Agrawal @ 2018-01-15 11:38 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit

From: Nipun Gupta <nipun.gupta@nxp.com>

Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
to 0 for LS2 in the hardware thus disabling data/annotation stashing.
For LX2 this is fixed in hardware and thus hash result and parse
results can be received in FD using this option.

Signed-off-by: Nipun Gupta <nipun.gupta@nxp.com>
Acked-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/net/dpaa2/dpaa2_ethdev.c | 16 ++++++++++++++++
 1 file changed, 16 insertions(+)

diff --git a/drivers/net/dpaa2/dpaa2_ethdev.c b/drivers/net/dpaa2/dpaa2_ethdev.c
index 0763033..8b1e4d2 100644
--- a/drivers/net/dpaa2/dpaa2_ethdev.c
+++ b/drivers/net/dpaa2/dpaa2_ethdev.c
@@ -380,6 +380,22 @@ dpaa2_eth_dev_configure(struct rte_eth_dev *dev)
 		return ret;
 	}
 
+	/* Enabling hash results in FD requires setting DPNI_FLCTYPE_HASH in
+	 * dpni_set_offload API. Setting this FLCTYPE for DPNI sets the FD[SC]
+	 * to 0 for LS2 in the hardware thus disabling data/annotation
+	 * stashing. For LX2 this is fixed in hardware and thus hash result and
+	 * parse results can be received in FD using this option.
+	 */
+	if (dpaa2_svr_family == SVR_LX2160A) {
+		ret = dpni_set_offload(dpni, CMD_PRI_LOW, priv->token,
+				       DPNI_FLCTYPE_HASH, true);
+		if (ret) {
+			PMD_INIT_LOG(ERR, "Error setting FLCTYPE: Err = %d\n",
+				     ret);
+			return ret;
+		}
+	}
+
 	if (eth_conf->rxmode.hw_vlan_filter)
 		dpaa2_vlan_offload_set(dev, ETH_VLAN_FILTER_MASK);
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 5/5] bus/fslmc: disable eventdev config with no dpaa2 eventdev
  2018-01-15 11:38 [PATCH 1/5] net/dpaa2: support more than 16 burst size in Rx func Hemant Agrawal
                   ` (2 preceding siblings ...)
  2018-01-15 11:38 ` [PATCH 4/5] net/dpaa2: use HASH FLCTYPE only for LX2 Hemant Agrawal
@ 2018-01-15 11:38 ` Hemant Agrawal
  2018-01-17 19:11 ` [PATCH 1/5] net/dpaa2: support more than 16 burst size in Rx func Ferruh Yigit
  4 siblings, 0 replies; 6+ messages in thread
From: Hemant Agrawal @ 2018-01-15 11:38 UTC (permalink / raw)
  To: dev; +Cc: ferruh.yigit

Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>
---
 drivers/bus/fslmc/portal/dpaa2_hw_dpio.c | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
index 6e349a7..537141d 100644
--- a/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
+++ b/drivers/bus/fslmc/portal/dpaa2_hw_dpio.c
@@ -86,6 +86,7 @@ dpaa2_core_cluster_sdest(int cpu_id)
 	return dpaa2_core_cluster_base + x;
 }
 
+#ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV
 static void dpaa2_affine_dpio_intr_to_respective_core(int32_t dpio_id)
 {
 #define STRING_LEN	28
@@ -174,6 +175,7 @@ static int dpaa2_dpio_intr_init(struct dpaa2_dpio_dev *dpio_dev)
 
 	return 0;
 }
+#endif
 
 static int
 configure_dpio_qbman_swp(struct dpaa2_dpio_dev *dpio_dev)
@@ -266,10 +268,12 @@ dpaa2_configure_stashing(struct dpaa2_dpio_dev *dpio_dev, int cpu_id)
 		return -1;
 	}
 
+#ifdef RTE_LIBRTE_PMD_DPAA2_EVENTDEV
 	if (dpaa2_dpio_intr_init(dpio_dev)) {
 		PMD_DRV_LOG(ERR, "Interrupt registration failed for dpio\n");
 		return -1;
 	}
+#endif
 
 	return 0;
 }
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/5] net/dpaa2: support more than 16 burst size in Rx func
  2018-01-15 11:38 [PATCH 1/5] net/dpaa2: support more than 16 burst size in Rx func Hemant Agrawal
                   ` (3 preceding siblings ...)
  2018-01-15 11:38 ` [PATCH 5/5] bus/fslmc: disable eventdev config with no dpaa2 eventdev Hemant Agrawal
@ 2018-01-17 19:11 ` Ferruh Yigit
  4 siblings, 0 replies; 6+ messages in thread
From: Ferruh Yigit @ 2018-01-17 19:11 UTC (permalink / raw)
  To: Hemant Agrawal, dev

On 1/15/2018 11:38 AM, Hemant Agrawal wrote:
> This patch enhances the Rx function to support more than
> 16 burst size.
> 
> Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com>

Series applied to dpdk-next-net/master, thanks.

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2018-01-17 19:11 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-01-15 11:38 [PATCH 1/5] net/dpaa2: support more than 16 burst size in Rx func Hemant Agrawal
2018-01-15 11:38 ` [PATCH 2/5] net/dpaa2: optimize Rx/Tx path Hemant Agrawal
2018-01-15 11:38 ` [PATCH 3/5] net/dpaa2: change vlan filter rule to be called on config Hemant Agrawal
2018-01-15 11:38 ` [PATCH 4/5] net/dpaa2: use HASH FLCTYPE only for LX2 Hemant Agrawal
2018-01-15 11:38 ` [PATCH 5/5] bus/fslmc: disable eventdev config with no dpaa2 eventdev Hemant Agrawal
2018-01-17 19:11 ` [PATCH 1/5] net/dpaa2: support more than 16 burst size in Rx func Ferruh Yigit

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.