All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/3] net/octeontx: add support for event Rx adapter
@ 2017-10-17  8:53 Pavan Nikhilesh
  2017-10-17  8:53 ` [PATCH 2/3] event/octeontx: " Pavan Nikhilesh
                   ` (3 more replies)
  0 siblings, 4 replies; 22+ messages in thread
From: Pavan Nikhilesh @ 2017-10-17  8:53 UTC (permalink / raw)
  To: santosh.shukla, jerin.jacob; +Cc: dev, Pavan Nikhilesh

Add functions to modify and delete qos responsible for mapping eth queues
to event queues used for configuring event Rx adapter.
The mbox functions have been moved from octeontx_pkivf.c to
octeontx_pkivf.h to allow event_octeontx to access them.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---

This patch set depends on the series http://dpdk.org/dev/patchwork/patch/30430/

 drivers/net/octeontx/base/octeontx_pkivf.c |  65 ---------------
 drivers/net/octeontx/base/octeontx_pkivf.h | 126 ++++++++++++++++++++++++++++-
 drivers/net/octeontx/octeontx_ethdev.c     |   3 +-
 drivers/net/octeontx/octeontx_rxtx.c       | 108 +------------------------
 drivers/net/octeontx/octeontx_rxtx.h       |  89 ++++++++++++++++++++
 5 files changed, 216 insertions(+), 175 deletions(-)

diff --git a/drivers/net/octeontx/base/octeontx_pkivf.c b/drivers/net/octeontx/base/octeontx_pkivf.c
index afae6a3..f9e4053 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.c
+++ b/drivers/net/octeontx/base/octeontx_pkivf.c
@@ -111,71 +111,6 @@ octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg)
 	return res;
 }

-int
-octeontx_pki_port_close(int port)
-{
-	struct octeontx_mbox_hdr hdr;
-	int res;
-
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
-	memset(&ptype, 0, len);
-	ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-	hdr.coproc = OCTEONTX_PKI_COPROC;
-	hdr.msg = MBOX_PKI_PORT_CLOSE;
-	hdr.vfid = port;
-
-	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-	if (res < 0)
-		return -EACCES;
-
-	return res;
-}
-
-int
-octeontx_pki_port_start(int port)
-{
-	struct octeontx_mbox_hdr hdr;
-	int res;
-
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
-	memset(&ptype, 0, len);
-	ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-	hdr.coproc = OCTEONTX_PKI_COPROC;
-	hdr.msg = MBOX_PKI_PORT_START;
-	hdr.vfid = port;
-
-	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-	if (res < 0)
-		return -EACCES;
-
-	return res;
-}
-
-int
-octeontx_pki_port_stop(int port)
-{
-	struct octeontx_mbox_hdr hdr;
-	int res;
-
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
-	memset(&ptype, 0, len);
-	ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-	hdr.coproc = OCTEONTX_PKI_COPROC;
-	hdr.msg = MBOX_PKI_PORT_STOP;
-	hdr.vfid = port;
-
-	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-	if (res < 0)
-		return -EACCES;
-
-	return res;
-}

 int
 octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg)
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.h b/drivers/net/octeontx/base/octeontx_pkivf.h
index 7cf8332..004dedc 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.h
+++ b/drivers/net/octeontx/base/octeontx_pkivf.h
@@ -240,10 +240,18 @@ typedef struct mbox_pki_port_modify_qos_entry {
 		uint8_t f_gaura:1;
 		uint8_t f_grptag_ok:1;
 		uint8_t f_grptag_bad:1;
+		uint8_t f_tag_type:1;
 	} mmask;
+	uint8_t tag_type;
 	struct mbox_pki_qos_entry qos_entry;
 } mbox_pki_mod_qos_t;

+/* pki flow/style enable qos */
+typedef struct mbox_pki_port_delete_qos_entry {
+	uint8_t port_type;
+	uint16_t index;
+} mbox_pki_del_qos_t;
+
 /* PKI maximum constants */
 #define PKI_VF_MAX			(1)
 #define PKI_MAX_PKTLEN			(32768)
@@ -407,6 +415,12 @@ typedef struct pki_port_create_qos {
 } pki_qos_cfg_t;

 /* pki flow/style enable qos */
+typedef struct pki_port_delete_qos_entry {
+	uint8_t port_type;
+	uint16_t index;
+} pki_del_qos_t;
+
+/* pki flow/style enable qos */
 typedef struct pki_port_modify_qos_entry {
 	uint8_t port_type;
 	uint16_t index;
@@ -415,17 +429,125 @@ typedef struct pki_port_modify_qos_entry {
 		uint8_t f_grp_ok:1;
 		uint8_t f_grp_bad:1;
 		uint8_t f_gaura:1;
+		uint8_t f_grptag_ok:1;
+		uint8_t f_grptag_bad:1;
+		uint8_t f_tag_type:1;
 	} mmask;
+	uint8_t tag_type;
 	struct pki_qos_entry qos_entry;
 } pki_mod_qos_t;

+static inline int
+octeontx_pki_port_modify_qos(int port, pki_mod_qos_t *qos_cfg)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_mod_qos_t q_cfg = *(mbox_pki_mod_qos_t *)qos_cfg;
+	int len = sizeof(mbox_pki_mod_qos_t);
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_MODIFY_QOS;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_delete_qos(int port, pki_del_qos_t *qos_cfg)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_del_qos_t q_cfg = *(mbox_pki_del_qos_t *)qos_cfg;
+	int len = sizeof(mbox_pki_del_qos_t);
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_DELETE_QOS;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_close(int port)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_port_t ptype;
+	int len = sizeof(mbox_pki_port_t);
+	memset(&ptype, 0, len);
+	ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_CLOSE;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_start(int port)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_port_t ptype;
+	int len = sizeof(mbox_pki_port_t);
+	memset(&ptype, 0, len);
+	ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_START;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_stop(int port)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_port_t ptype;
+	int len = sizeof(mbox_pki_port_t);
+	memset(&ptype, 0, len);
+	ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_STOP;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
 int octeontx_pki_port_open(int port);
 int octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg);
 int octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg);
 int octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg);
 int octeontx_pki_port_close(int port);
-int octeontx_pki_port_start(int port);
-int octeontx_pki_port_stop(int port);
 int octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg);

 #endif /* __OCTEONTX_PKI_H__ */
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 82e38c2..86de5d1 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -930,6 +930,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 			pki_hash.tag_slc = 1;
 			pki_hash.tag_dlf = 1;
 			pki_hash.tag_slf = 1;
+			pki_hash.tag_prt = 1;
 			octeontx_pki_port_hash_config(port, &pki_hash);
 		}

@@ -941,7 +942,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		pki_qos.qpg_qos = PKI_QPG_QOS_NONE;
 		pki_qos.num_entry = 1;
 		pki_qos.drop_policy = 0;
-		pki_qos.tag_type = 2L;
+		pki_qos.tag_type = 0L;
 		pki_qos.qos_entry[0].port_add = 0;
 		pki_qos.qos_entry[0].gaura = gaura;
 		pki_qos.qos_entry[0].ggrp_ok = ev_queues;
diff --git a/drivers/net/octeontx/octeontx_rxtx.c b/drivers/net/octeontx/octeontx_rxtx.c
index 0b15833..99531cd 100644
--- a/drivers/net/octeontx/octeontx_rxtx.c
+++ b/drivers/net/octeontx/octeontx_rxtx.c
@@ -47,94 +47,6 @@
 #include "octeontx_rxtx.h"
 #include "octeontx_logs.h"

-/* Packet type table */
-#define PTYPE_SIZE	OCCTX_PKI_LTYPE_LAST
-
-static const uint32_t __rte_cache_aligned
-ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
-	[LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
-	[LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
-	[LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
-	[LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
-	[LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
-	[LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
-	[LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
-	[LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
-
-	[LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
-	[LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
-	[LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
-	[LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
-	[LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV4][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
-
-	[LC_IPV4_OPT][LE_NONE][LF_NONE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
-	[LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
-	[LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4_OPT][LE_NONE][LF_TCP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
-	[LC_IPV4_OPT][LE_NONE][LF_UDP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
-	[LC_IPV4_OPT][LE_NONE][LF_GRE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
-
-	[LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
-	[LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
-	[LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
-	[LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
-	[LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV6][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
-	[LC_IPV6_OPT][LE_NONE][LF_NONE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
-	[LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
-	[LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6_OPT][LE_NONE][LF_TCP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
-	[LC_IPV6_OPT][LE_NONE][LF_UDP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
-	[LC_IPV6_OPT][LE_NONE][LF_GRE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
-
-};

 static __rte_always_inline uint16_t __hot
 __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
@@ -195,10 +107,8 @@ octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 uint16_t __hot
 octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-	struct rte_mbuf *mbuf;
 	struct octeontx_rxq *rxq;
 	struct rte_event ev;
-	octtx_wqe_t *wqe;
 	size_t count;
 	uint16_t valid_event;

@@ -210,23 +120,7 @@ octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 							1, 0);
 		if (!valid_event)
 			break;
-
-		wqe = (octtx_wqe_t *)(uintptr_t)ev.u64;
-		rte_prefetch_non_temporal(wqe);
-
-		/* Get mbuf from wqe */
-		mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
-						OCTTX_PACKET_WQE_SKIP);
-		mbuf->packet_type =
-		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
-		mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
-		mbuf->pkt_len = wqe->s.w1.len;
-		mbuf->data_len = mbuf->pkt_len;
-		mbuf->nb_segs = 1;
-		mbuf->ol_flags = 0;
-		mbuf->port = rxq->port_id;
-		rte_mbuf_refcnt_set(mbuf, 1);
-		rx_pkts[count++] = mbuf;
+		rx_pkts[count++] = (struct rte_mbuf *)ev.u64;
 	}

 	return count; /* return number of pkts received */
diff --git a/drivers/net/octeontx/octeontx_rxtx.h b/drivers/net/octeontx/octeontx_rxtx.h
index 1f91532..382ff2b 100644
--- a/drivers/net/octeontx/octeontx_rxtx.h
+++ b/drivers/net/octeontx/octeontx_rxtx.h
@@ -39,6 +39,95 @@
 #define __hot	__attribute__((hot))
 #endif

+/* Packet type table */
+#define PTYPE_SIZE	OCCTX_PKI_LTYPE_LAST
+
+static const uint32_t __rte_cache_aligned
+ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
+	[LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
+	[LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
+	[LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
+	[LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
+	[LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
+	[LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
+	[LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
+	[LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
+
+	[LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
+	[LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
+	[LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+	[LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+	[LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV4][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+
+	[LC_IPV4_OPT][LE_NONE][LF_NONE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
+	[LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
+	[LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4_OPT][LE_NONE][LF_TCP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+	[LC_IPV4_OPT][LE_NONE][LF_UDP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+	[LC_IPV4_OPT][LE_NONE][LF_GRE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+	[LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
+	[LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
+	[LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+	[LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+	[LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV6][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+	[LC_IPV6_OPT][LE_NONE][LF_NONE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
+	[LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
+	[LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6_OPT][LE_NONE][LF_TCP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+	[LC_IPV6_OPT][LE_NONE][LF_UDP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+	[LC_IPV6_OPT][LE_NONE][LF_GRE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+};
+
 uint16_t
 octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);

--
2.7.4

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 2/3] event/octeontx: add support for event Rx adapter
  2017-10-17  8:53 [PATCH 1/3] net/octeontx: add support for event Rx adapter Pavan Nikhilesh
@ 2017-10-17  8:53 ` Pavan Nikhilesh
  2017-10-17 11:11   ` Rao, Nikhil
  2017-10-17  8:53 ` [PATCH 3/3] doc: add event octeontx Rx adapter limitation Pavan Nikhilesh
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 22+ messages in thread
From: Pavan Nikhilesh @ 2017-10-17  8:53 UTC (permalink / raw)
  To: santosh.shukla, jerin.jacob; +Cc: dev, Pavan Nikhilesh

Add Rx adapter queue add and delete API for both generic eth_devices as
well as HW backed eth_octeontx which supports direct event injection to
event device.
The HW injected event needs to be converted into mbuf, previously this
was done in eth_octeontx during rx_burst now it is moved to
event_octeontx as events from Rx adapter are dequeued directly from
event device.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 drivers/event/octeontx/Makefile       |   1 +
 drivers/event/octeontx/ssovf_evdev.c  | 126 ++++++++++++++++++++++++++++++++++
 drivers/event/octeontx/ssovf_evdev.h  |   1 +
 drivers/event/octeontx/ssovf_worker.h |  31 ++++++++-
 4 files changed, 156 insertions(+), 3 deletions(-)

diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
index 08fc167..7f7b9b3 100644
--- a/drivers/event/octeontx/Makefile
+++ b/drivers/event/octeontx/Makefile
@@ -39,6 +39,7 @@ LIB = librte_pmd_octeontx_ssovf.a
 
 CFLAGS += $(WERROR_FLAGS)
 CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
+CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx/
 
 EXPORT_MAP := rte_pmd_octeontx_ssovf_version.map
 
diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index d829b49..7bdc85d 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -36,6 +36,8 @@
 #include <rte_debug.h>
 #include <rte_dev.h>
 #include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_event_eth_rx_adapter.h>
 #include <rte_lcore.h>
 #include <rte_log.h>
 #include <rte_malloc.h>
@@ -395,6 +397,123 @@ ssows_dump(struct ssows *ws, FILE *f)
 	fprintf(f, "\tpwqp=0x%"PRIx64"\n", val);
 }
 
+static int
+ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
+		const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+	int ret;
+	RTE_SET_USED(dev);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+	else
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT;
+
+	return 0;
+}
+
+static int
+ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
+		const struct rte_eth_dev *eth_dev, int32_t rx_queue_id,
+		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+	int ret = 0;
+	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	pki_mod_qos_t pki_qos;
+	RTE_SET_USED(dev);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		return -EINVAL;
+
+	if (rx_queue_id >= 0)
+		return -EINVAL;
+
+	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
+		return -ENOTSUP;
+
+	memset(&pki_qos, 0, sizeof(pki_mod_qos_t));
+
+	pki_qos.port_type = 0;
+	pki_qos.index = 0;
+	pki_qos.mmask.f_tag_type = 1;
+	pki_qos.mmask.f_port_add = 1;
+	pki_qos.mmask.f_grp_ok = 1;
+	pki_qos.mmask.f_grp_bad = 1;
+	pki_qos.mmask.f_grptag_ok = 1;
+	pki_qos.mmask.f_grptag_bad = 1;
+
+	pki_qos.tag_type = queue_conf->ev.sched_type;
+	pki_qos.qos_entry.port_add = 0;
+	pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id;
+	pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id;
+	pki_qos.qos_entry.grptag_bad = 0;
+	pki_qos.qos_entry.grptag_ok = 0;
+
+	ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos);
+	if (ret < 0)
+		ssovf_log_err("failed to modify QOS, port=%d, q=%d",
+				nic->port_id, queue_conf->ev.queue_id);
+
+	return ret;
+}
+
+static int
+ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
+		const struct rte_eth_dev *eth_dev, int32_t rx_queue_id)
+{
+	int ret = 0;
+	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	pki_del_qos_t pki_qos;
+	RTE_SET_USED(dev);
+	RTE_SET_USED(rx_queue_id);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		return -EINVAL;
+
+	pki_qos.port_type = 0;
+	pki_qos.index = 0;
+	memset(&pki_qos, 0, sizeof(pki_del_qos_t));
+	ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos);
+	if (ret < 0)
+		ssovf_log_err("Failed to delete QOS port=%d, q=%d",
+				nic->port_id, queue_conf->ev.queue_id);
+	return ret;
+}
+
+static int
+ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev,
+					const struct rte_eth_dev *eth_dev)
+{
+	int ret;
+	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	RTE_SET_USED(dev);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		return 0;
+	octeontx_pki_port_start(nic->port_id);
+	return 0;
+}
+
+
+static int
+ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
+		const struct rte_eth_dev *eth_dev)
+{
+	int ret;
+	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	RTE_SET_USED(dev);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		return 0;
+	octeontx_pki_port_stop(nic->port_id);
+	return 0;
+}
+
 static void
 ssovf_dump(struct rte_eventdev *dev, FILE *f)
 {
@@ -488,6 +607,13 @@ static const struct rte_eventdev_ops ssovf_ops = {
 	.port_link        = ssovf_port_link,
 	.port_unlink      = ssovf_port_unlink,
 	.timeout_ticks    = ssovf_timeout_ticks,
+
+	.eth_rx_adapter_caps_get  = ssovf_eth_rx_adapter_caps_get,
+	.eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add,
+	.eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del,
+	.eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
+	.eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
+
 	.dump             = ssovf_dump,
 	.dev_start        = ssovf_start,
 	.dev_stop         = ssovf_stop,
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index 933c5a3..bbce492 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -38,6 +38,7 @@
 #include <rte_io.h>
 
 #include <octeontx_mbox.h>
+#include <octeontx_ethdev.h>
 
 #define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx
 
diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index 8dc1264..bd3d71a 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -34,9 +34,11 @@
 #include <rte_common.h>
 #include <rte_branch_prediction.h>
 
-#include "ssovf_evdev.h"
 #include <octeontx_mbox.h>
 
+#include "ssovf_evdev.h"
+#include "octeontx_rxtx.h"
+
 enum {
 	SSO_SYNC_ORDERED,
 	SSO_SYNC_ATOMIC,
@@ -50,6 +52,28 @@ enum {
 
 /* SSO Operations */
 
+static __rte_always_inline struct rte_mbuf *
+ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_id)
+{
+	struct rte_mbuf *mbuf;
+	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
+	rte_prefetch_non_temporal(wqe);
+
+	/* Get mbuf from wqe */
+	mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
+			OCTTX_PACKET_WQE_SKIP);
+	mbuf->packet_type =
+		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
+	mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
+	mbuf->pkt_len = wqe->s.w1.len;
+	mbuf->data_len = mbuf->pkt_len;
+	mbuf->nb_segs = 1;
+	mbuf->ol_flags = 0;
+	mbuf->port = port_id;
+	rte_mbuf_refcnt_set(mbuf, 1);
+	return mbuf;
+}
+
 static __rte_always_inline uint16_t
 ssows_get_work(struct ssows *ws, struct rte_event *ev)
 {
@@ -62,9 +86,10 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev)
 	ws->cur_tt = sched_type_queue & 0x3;
 	ws->cur_grp = sched_type_queue >> 2;
 	sched_type_queue = sched_type_queue << 38;
-
 	ev->event = sched_type_queue | (get_work0 & 0xffffffff);
-	ev->u64 = get_work1;
+	ev->u64 = get_work1 && !ev->event_type ?
+		(uint64_t)ssovf_octeontx_wqe_to_pkt(get_work1,
+				(ev->event >> 20) & 0xF) : get_work1;
 	return !!get_work1;
 }
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH 3/3] doc: add event octeontx Rx adapter limitation
  2017-10-17  8:53 [PATCH 1/3] net/octeontx: add support for event Rx adapter Pavan Nikhilesh
  2017-10-17  8:53 ` [PATCH 2/3] event/octeontx: " Pavan Nikhilesh
@ 2017-10-17  8:53 ` Pavan Nikhilesh
  2017-10-18  8:48   ` Mcnamara, John
  2017-10-17  9:29 ` [PATCH 1/3] net/octeontx: add support for event Rx adapter Bhagavatula, Pavan
  2017-10-18  8:45 ` [PATCH v2 " Pavan Nikhilesh
  3 siblings, 1 reply; 22+ messages in thread
From: Pavan Nikhilesh @ 2017-10-17  8:53 UTC (permalink / raw)
  To: santosh.shukla, jerin.jacob; +Cc: dev, Pavan Nikhilesh

Add limitaion when using eth_octeontx as Rx adapter with event_octeontx.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 doc/guides/eventdevs/octeontx.rst | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/doc/guides/eventdevs/octeontx.rst b/doc/guides/eventdevs/octeontx.rst
index b43d515..4412bfa 100644
--- a/doc/guides/eventdevs/octeontx.rst
+++ b/doc/guides/eventdevs/octeontx.rst
@@ -149,3 +149,8 @@ Burst mode support
 Burst mode is not supported. Dequeue and Enqueue functions accepts only single
 event at a time.
 
+Rx adapter support
+~~~~~~~~~~~~~~~~~~
+
+When eth_octeontx is used as Rx adapter event schedule type
+RTE_SCHED_TYPE_PARALLEL is not supported.
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* Re: [PATCH 1/3] net/octeontx: add support for event Rx adapter
  2017-10-17  8:53 [PATCH 1/3] net/octeontx: add support for event Rx adapter Pavan Nikhilesh
  2017-10-17  8:53 ` [PATCH 2/3] event/octeontx: " Pavan Nikhilesh
  2017-10-17  8:53 ` [PATCH 3/3] doc: add event octeontx Rx adapter limitation Pavan Nikhilesh
@ 2017-10-17  9:29 ` Bhagavatula, Pavan
  2017-10-18  8:45 ` [PATCH v2 " Pavan Nikhilesh
  3 siblings, 0 replies; 22+ messages in thread
From: Bhagavatula, Pavan @ 2017-10-17  9:29 UTC (permalink / raw)
  To: Shukla, Santosh, Jacob,  Jerin, nikhil.rao; +Cc: dev

++ Nikhil Rao


________________________________
From: Bhagavatula, Pavan
Sent: 17 October 2017 14:23
To: Shukla, Santosh; Jacob, Jerin
Cc: dev@dpdk.org; Bhagavatula, Pavan
Subject: [dpdk-dev] [PATCH 1/3] net/octeontx: add support for event Rx adapter

Add functions to modify and delete qos responsible for mapping eth queues
to event queues used for configuring event Rx adapter.
The mbox functions have been moved from octeontx_pkivf.c to
octeontx_pkivf.h to allow event_octeontx to access them.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---

This patch set depends on the series http://dpdk.org/dev/patchwork/patch/30430/

 drivers/net/octeontx/base/octeontx_pkivf.c |  65 ---------------
 drivers/net/octeontx/base/octeontx_pkivf.h | 126 ++++++++++++++++++++++++++++-
 drivers/net/octeontx/octeontx_ethdev.c     |   3 +-
 drivers/net/octeontx/octeontx_rxtx.c       | 108 +------------------------
 drivers/net/octeontx/octeontx_rxtx.h       |  89 ++++++++++++++++++++
 5 files changed, 216 insertions(+), 175 deletions(-)

diff --git a/drivers/net/octeontx/base/octeontx_pkivf.c b/drivers/net/octeontx/base/octeontx_pkivf.c
index afae6a3..f9e4053 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.c
+++ b/drivers/net/octeontx/base/octeontx_pkivf.c
@@ -111,71 +111,6 @@ octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg)
         return res;
 }

-int
-octeontx_pki_port_close(int port)
-{
-       struct octeontx_mbox_hdr hdr;
-       int res;
-
-       mbox_pki_port_t ptype;
-       int len = sizeof(mbox_pki_port_t);
-       memset(&ptype, 0, len);
-       ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-       hdr.coproc = OCTEONTX_PKI_COPROC;
-       hdr.msg = MBOX_PKI_PORT_CLOSE;
-       hdr.vfid = port;
-
-       res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-       if (res < 0)
-               return -EACCES;
-
-       return res;
-}
-
-int
-octeontx_pki_port_start(int port)
-{
-       struct octeontx_mbox_hdr hdr;
-       int res;
-
-       mbox_pki_port_t ptype;
-       int len = sizeof(mbox_pki_port_t);
-       memset(&ptype, 0, len);
-       ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-       hdr.coproc = OCTEONTX_PKI_COPROC;
-       hdr.msg = MBOX_PKI_PORT_START;
-       hdr.vfid = port;
-
-       res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-       if (res < 0)
-               return -EACCES;
-
-       return res;
-}
-
-int
-octeontx_pki_port_stop(int port)
-{
-       struct octeontx_mbox_hdr hdr;
-       int res;
-
-       mbox_pki_port_t ptype;
-       int len = sizeof(mbox_pki_port_t);
-       memset(&ptype, 0, len);
-       ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-       hdr.coproc = OCTEONTX_PKI_COPROC;
-       hdr.msg = MBOX_PKI_PORT_STOP;
-       hdr.vfid = port;
-
-       res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-       if (res < 0)
-               return -EACCES;
-
-       return res;
-}

 int
 octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg)
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.h b/drivers/net/octeontx/base/octeontx_pkivf.h
index 7cf8332..004dedc 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.h
+++ b/drivers/net/octeontx/base/octeontx_pkivf.h
@@ -240,10 +240,18 @@ typedef struct mbox_pki_port_modify_qos_entry {
                 uint8_t f_gaura:1;
                 uint8_t f_grptag_ok:1;
                 uint8_t f_grptag_bad:1;
+               uint8_t f_tag_type:1;
         } mmask;
+       uint8_t tag_type;
         struct mbox_pki_qos_entry qos_entry;
 } mbox_pki_mod_qos_t;

+/* pki flow/style enable qos */
+typedef struct mbox_pki_port_delete_qos_entry {
+       uint8_t port_type;
+       uint16_t index;
+} mbox_pki_del_qos_t;
+
 /* PKI maximum constants */
 #define PKI_VF_MAX                      (1)
 #define PKI_MAX_PKTLEN                  (32768)
@@ -407,6 +415,12 @@ typedef struct pki_port_create_qos {
 } pki_qos_cfg_t;

 /* pki flow/style enable qos */
+typedef struct pki_port_delete_qos_entry {
+       uint8_t port_type;
+       uint16_t index;
+} pki_del_qos_t;
+
+/* pki flow/style enable qos */
 typedef struct pki_port_modify_qos_entry {
         uint8_t port_type;
         uint16_t index;
@@ -415,17 +429,125 @@ typedef struct pki_port_modify_qos_entry {
                 uint8_t f_grp_ok:1;
                 uint8_t f_grp_bad:1;
                 uint8_t f_gaura:1;
+               uint8_t f_grptag_ok:1;
+               uint8_t f_grptag_bad:1;
+               uint8_t f_tag_type:1;
         } mmask;
+       uint8_t tag_type;
         struct pki_qos_entry qos_entry;
 } pki_mod_qos_t;

+static inline int
+octeontx_pki_port_modify_qos(int port, pki_mod_qos_t *qos_cfg)
+{
+       struct octeontx_mbox_hdr hdr;
+       int res;
+
+       mbox_pki_mod_qos_t q_cfg = *(mbox_pki_mod_qos_t *)qos_cfg;
+       int len = sizeof(mbox_pki_mod_qos_t);
+
+       hdr.coproc = OCTEONTX_PKI_COPROC;
+       hdr.msg = MBOX_PKI_PORT_MODIFY_QOS;
+       hdr.vfid = port;
+
+       res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+       if (res < 0)
+               return -EACCES;
+
+       return res;
+}
+
+static inline int
+octeontx_pki_port_delete_qos(int port, pki_del_qos_t *qos_cfg)
+{
+       struct octeontx_mbox_hdr hdr;
+       int res;
+
+       mbox_pki_del_qos_t q_cfg = *(mbox_pki_del_qos_t *)qos_cfg;
+       int len = sizeof(mbox_pki_del_qos_t);
+
+       hdr.coproc = OCTEONTX_PKI_COPROC;
+       hdr.msg = MBOX_PKI_PORT_DELETE_QOS;
+       hdr.vfid = port;
+
+       res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+       if (res < 0)
+               return -EACCES;
+
+       return res;
+}
+
+static inline int
+octeontx_pki_port_close(int port)
+{
+       struct octeontx_mbox_hdr hdr;
+       int res;
+
+       mbox_pki_port_t ptype;
+       int len = sizeof(mbox_pki_port_t);
+       memset(&ptype, 0, len);
+       ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+       hdr.coproc = OCTEONTX_PKI_COPROC;
+       hdr.msg = MBOX_PKI_PORT_CLOSE;
+       hdr.vfid = port;
+
+       res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+       if (res < 0)
+               return -EACCES;
+
+       return res;
+}
+
+static inline int
+octeontx_pki_port_start(int port)
+{
+       struct octeontx_mbox_hdr hdr;
+       int res;
+
+       mbox_pki_port_t ptype;
+       int len = sizeof(mbox_pki_port_t);
+       memset(&ptype, 0, len);
+       ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+       hdr.coproc = OCTEONTX_PKI_COPROC;
+       hdr.msg = MBOX_PKI_PORT_START;
+       hdr.vfid = port;
+
+       res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+       if (res < 0)
+               return -EACCES;
+
+       return res;
+}
+
+static inline int
+octeontx_pki_port_stop(int port)
+{
+       struct octeontx_mbox_hdr hdr;
+       int res;
+
+       mbox_pki_port_t ptype;
+       int len = sizeof(mbox_pki_port_t);
+       memset(&ptype, 0, len);
+       ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+       hdr.coproc = OCTEONTX_PKI_COPROC;
+       hdr.msg = MBOX_PKI_PORT_STOP;
+       hdr.vfid = port;
+
+       res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+       if (res < 0)
+               return -EACCES;
+
+       return res;
+}
+
 int octeontx_pki_port_open(int port);
 int octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg);
 int octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg);
 int octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg);
 int octeontx_pki_port_close(int port);
-int octeontx_pki_port_start(int port);
-int octeontx_pki_port_stop(int port);
 int octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg);

 #endif /* __OCTEONTX_PKI_H__ */
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 82e38c2..86de5d1 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -930,6 +930,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
                         pki_hash.tag_slc = 1;
                         pki_hash.tag_dlf = 1;
                         pki_hash.tag_slf = 1;
+                       pki_hash.tag_prt = 1;
                         octeontx_pki_port_hash_config(port, &pki_hash);
                 }

@@ -941,7 +942,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
                 pki_qos.qpg_qos = PKI_QPG_QOS_NONE;
                 pki_qos.num_entry = 1;
                 pki_qos.drop_policy = 0;
-               pki_qos.tag_type = 2L;
+               pki_qos.tag_type = 0L;
                 pki_qos.qos_entry[0].port_add = 0;
                 pki_qos.qos_entry[0].gaura = gaura;
                 pki_qos.qos_entry[0].ggrp_ok = ev_queues;
diff --git a/drivers/net/octeontx/octeontx_rxtx.c b/drivers/net/octeontx/octeontx_rxtx.c
index 0b15833..99531cd 100644
--- a/drivers/net/octeontx/octeontx_rxtx.c
+++ b/drivers/net/octeontx/octeontx_rxtx.c
@@ -47,94 +47,6 @@
 #include "octeontx_rxtx.h"
 #include "octeontx_logs.h"

-/* Packet type table */
-#define PTYPE_SIZE     OCCTX_PKI_LTYPE_LAST
-
-static const uint32_t __rte_cache_aligned
-ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
-       [LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
-       [LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
-       [LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
-       [LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
-       [LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
-       [LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
-       [LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
-       [LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
-       [LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
-       [LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
-
-       [LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
-       [LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
-                               RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
-       [LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
-       [LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
-       [LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
-       [LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
-       [LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
-       [LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
-                               RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
-       [LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
-                               RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
-       [LC_IPV4][LE_NONE][LF_NVGRE] =
-                               RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
-
-       [LC_IPV4_OPT][LE_NONE][LF_NONE] =
-                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
-       [LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
-                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
-       [LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
-                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
-       [LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
-                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
-       [LC_IPV4_OPT][LE_NONE][LF_TCP] =
-                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
-       [LC_IPV4_OPT][LE_NONE][LF_UDP] =
-                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
-       [LC_IPV4_OPT][LE_NONE][LF_GRE] =
-                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
-       [LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
-                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
-       [LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
-                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
-       [LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
-                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
-
-       [LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
-       [LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
-                               RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
-       [LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
-       [LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
-       [LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
-       [LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
-       [LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
-       [LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
-                               RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
-       [LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
-                               RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
-       [LC_IPV6][LE_NONE][LF_NVGRE] =
-                               RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
-       [LC_IPV6_OPT][LE_NONE][LF_NONE] =
-                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
-       [LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
-                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
-       [LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
-                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
-       [LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
-                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
-       [LC_IPV6_OPT][LE_NONE][LF_TCP] =
-                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
-       [LC_IPV6_OPT][LE_NONE][LF_UDP] =
-                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
-       [LC_IPV6_OPT][LE_NONE][LF_GRE] =
-                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
-       [LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
-                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
-       [LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
-                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
-       [LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
-                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
-
-};

 static __rte_always_inline uint16_t __hot
 __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
@@ -195,10 +107,8 @@ octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 uint16_t __hot
 octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-       struct rte_mbuf *mbuf;
         struct octeontx_rxq *rxq;
         struct rte_event ev;
-       octtx_wqe_t *wqe;
         size_t count;
         uint16_t valid_event;

@@ -210,23 +120,7 @@ octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
                                                         1, 0);
                 if (!valid_event)
                         break;
-
-               wqe = (octtx_wqe_t *)(uintptr_t)ev.u64;
-               rte_prefetch_non_temporal(wqe);
-
-               /* Get mbuf from wqe */
-               mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
-                                               OCTTX_PACKET_WQE_SKIP);
-               mbuf->packet_type =
-               ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
-               mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
-               mbuf->pkt_len = wqe->s.w1.len;
-               mbuf->data_len = mbuf->pkt_len;
-               mbuf->nb_segs = 1;
-               mbuf->ol_flags = 0;
-               mbuf->port = rxq->port_id;
-               rte_mbuf_refcnt_set(mbuf, 1);
-               rx_pkts[count++] = mbuf;
+               rx_pkts[count++] = (struct rte_mbuf *)ev.u64;
         }

         return count; /* return number of pkts received */
diff --git a/drivers/net/octeontx/octeontx_rxtx.h b/drivers/net/octeontx/octeontx_rxtx.h
index 1f91532..382ff2b 100644
--- a/drivers/net/octeontx/octeontx_rxtx.h
+++ b/drivers/net/octeontx/octeontx_rxtx.h
@@ -39,6 +39,95 @@
 #define __hot   __attribute__((hot))
 #endif

+/* Packet type table */
+#define PTYPE_SIZE     OCCTX_PKI_LTYPE_LAST
+
+static const uint32_t __rte_cache_aligned
+ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
+       [LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
+       [LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
+       [LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
+       [LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
+       [LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
+       [LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
+       [LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
+       [LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+       [LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+       [LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
+
+       [LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+       [LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
+                               RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
+       [LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
+       [LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+       [LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+       [LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+       [LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
+       [LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
+                               RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
+       [LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
+                               RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
+       [LC_IPV4][LE_NONE][LF_NVGRE] =
+                               RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+
+       [LC_IPV4_OPT][LE_NONE][LF_NONE] =
+                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+       [LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
+                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
+       [LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
+                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
+       [LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
+                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+       [LC_IPV4_OPT][LE_NONE][LF_TCP] =
+                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+       [LC_IPV4_OPT][LE_NONE][LF_UDP] =
+                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+       [LC_IPV4_OPT][LE_NONE][LF_GRE] =
+                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
+       [LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
+                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+       [LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
+                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+       [LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
+                               RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+       [LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+       [LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
+                               RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
+       [LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
+       [LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+       [LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+       [LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+       [LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
+       [LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
+                               RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
+       [LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
+                               RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
+       [LC_IPV6][LE_NONE][LF_NVGRE] =
+                               RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+       [LC_IPV6_OPT][LE_NONE][LF_NONE] =
+                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+       [LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
+                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
+       [LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
+                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
+       [LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
+                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+       [LC_IPV6_OPT][LE_NONE][LF_TCP] =
+                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+       [LC_IPV6_OPT][LE_NONE][LF_UDP] =
+                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+       [LC_IPV6_OPT][LE_NONE][LF_GRE] =
+                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
+       [LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
+                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+       [LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
+                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+       [LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
+                               RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+};
+
 uint16_t
 octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);

--
2.7.4

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/3] event/octeontx: add support for event Rx adapter
  2017-10-17  8:53 ` [PATCH 2/3] event/octeontx: " Pavan Nikhilesh
@ 2017-10-17 11:11   ` Rao, Nikhil
  2017-10-18  8:12     ` Pavan Nikhilesh Bhagavatula
  2017-10-24 14:27     ` Jerin Jacob
  0 siblings, 2 replies; 22+ messages in thread
From: Rao, Nikhil @ 2017-10-17 11:11 UTC (permalink / raw)
  To: Pavan Nikhilesh, santosh.shukla, jerin.jacob; +Cc: dev

On 10/17/2017 2:23 PM, Pavan Nikhilesh wrote:
> Add Rx adapter queue add and delete API for both generic eth_devices as
> well as HW backed eth_octeontx which supports direct event injection to
> event device.
> The HW injected event needs to be converted into mbuf, previously this
> was done in eth_octeontx during rx_burst now it is moved to
> event_octeontx as events from Rx adapter are dequeued directly from
> event device.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> ---
>   drivers/event/octeontx/Makefile       |   1 +
>   drivers/event/octeontx/ssovf_evdev.c  | 126 ++++++++++++++++++++++++++++++++++
>   drivers/event/octeontx/ssovf_evdev.h  |   1 +
>   drivers/event/octeontx/ssovf_worker.h |  31 ++++++++-
>   4 files changed, 156 insertions(+), 3 deletions(-)
> 
> diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
> index 08fc167..7f7b9b3 100644
> --- a/drivers/event/octeontx/Makefile
> +++ b/drivers/event/octeontx/Makefile
> @@ -39,6 +39,7 @@ LIB = librte_pmd_octeontx_ssovf.a
>   
>   CFLAGS += $(WERROR_FLAGS)
>   CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
> +CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx/
>   
>   EXPORT_MAP := rte_pmd_octeontx_ssovf_version.map
>   
> diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
> index d829b49..7bdc85d 100644
> --- a/drivers/event/octeontx/ssovf_evdev.c
> +++ b/drivers/event/octeontx/ssovf_evdev.c
> @@ -36,6 +36,8 @@
>   #include <rte_debug.h>
>   #include <rte_dev.h>
>   #include <rte_eal.h>
> +#include <rte_ethdev.h>
> +#include <rte_event_eth_rx_adapter.h>
>   #include <rte_lcore.h>
>   #include <rte_log.h>
>   #include <rte_malloc.h>
> @@ -395,6 +397,123 @@ ssows_dump(struct ssows *ws, FILE *f)
>   	fprintf(f, "\tpwqp=0x%"PRIx64"\n", val);
>   }
>   
> +static int
> +ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
> +		const struct rte_eth_dev *eth_dev, uint32_t *caps)
> +{
> +	int ret;
> +	RTE_SET_USED(dev);
> +
> +	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
> +	if (ret)
> +		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
> +	else
> +		*caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT;
> +
> +	return 0;
> +}
> +
> +static int
> +ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
> +		const struct rte_eth_dev *eth_dev, int32_t rx_queue_id,
> +		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
> +{
> +	int ret = 0;
> +	const struct octeontx_nic *nic = eth_dev->data->dev_private;
> +	pki_mod_qos_t pki_qos;
> +	RTE_SET_USED(dev);
> +
> +	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
> +	if (ret)
> +		return -EINVAL;
> +
> +	if (rx_queue_id >= 0)
> +		return -EINVAL;
> +
> +	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
> +		return -ENOTSUP;
> +
> +	memset(&pki_qos, 0, sizeof(pki_mod_qos_t));
> +
> +	pki_qos.port_type = 0;
> +	pki_qos.index = 0;
> +	pki_qos.mmask.f_tag_type = 1;
> +	pki_qos.mmask.f_port_add = 1;
> +	pki_qos.mmask.f_grp_ok = 1;
> +	pki_qos.mmask.f_grp_bad = 1;
> +	pki_qos.mmask.f_grptag_ok = 1;
> +	pki_qos.mmask.f_grptag_bad = 1;
> +
> +	pki_qos.tag_type = queue_conf->ev.sched_type;
> +	pki_qos.qos_entry.port_add = 0;
> +	pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id;
> +	pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id;
> +	pki_qos.qos_entry.grptag_bad = 0;
> +	pki_qos.qos_entry.grptag_ok = 0;
> +
> +	ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos);
> +	if (ret < 0)
> +		ssovf_log_err("failed to modify QOS, port=%d, q=%d",
> +				nic->port_id, queue_conf->ev.queue_id);
> +
> +	return ret;
> +}
> +
> +static int
> +ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
> +		const struct rte_eth_dev *eth_dev, int32_t rx_queue_id)
> +{
> +	int ret = 0;
> +	const struct octeontx_nic *nic = eth_dev->data->dev_private;
> +	pki_del_qos_t pki_qos;
> +	RTE_SET_USED(dev);
> +	RTE_SET_USED(rx_queue_id);
> +
> +	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
> +	if (ret)
> +		return -EINVAL;
> +
> +	pki_qos.port_type = 0;
> +	pki_qos.index = 0;
> +	memset(&pki_qos, 0, sizeof(pki_del_qos_t));
> +	ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos);
> +	if (ret < 0)
> +		ssovf_log_err("Failed to delete QOS port=%d, q=%d",
> +				nic->port_id, queue_conf->ev.queue_id);
> +	return ret;
> +}
> +
> +static int
> +ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev,
> +					const struct rte_eth_dev *eth_dev)
> +{
> +	int ret;
> +	const struct octeontx_nic *nic = eth_dev->data->dev_private;
> +	RTE_SET_USED(dev);
> +
> +	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
> +	if (ret)
> +		return 0;
> +	octeontx_pki_port_start(nic->port_id);
> +	return 0;
> +}
> +
> +
> +static int
> +ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
> +		const struct rte_eth_dev *eth_dev)
> +{
> +	int ret;
> +	const struct octeontx_nic *nic = eth_dev->data->dev_private;
> +	RTE_SET_USED(dev);
> +
> +	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
> +	if (ret)
> +		return 0;
> +	octeontx_pki_port_stop(nic->port_id);
> +	return 0;
> +}
> +
>   static void
>   ssovf_dump(struct rte_eventdev *dev, FILE *f)
>   {
> @@ -488,6 +607,13 @@ static const struct rte_eventdev_ops ssovf_ops = {
>   	.port_link        = ssovf_port_link,
>   	.port_unlink      = ssovf_port_unlink,
>   	.timeout_ticks    = ssovf_timeout_ticks,
> +
> +	.eth_rx_adapter_caps_get  = ssovf_eth_rx_adapter_caps_get,
> +	.eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add,
> +	.eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del,
> +	.eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
> +	.eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
> +
>   	.dump             = ssovf_dump,
>   	.dev_start        = ssovf_start,
>   	.dev_stop         = ssovf_stop,
> diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
> index 933c5a3..bbce492 100644
> --- a/drivers/event/octeontx/ssovf_evdev.h
> +++ b/drivers/event/octeontx/ssovf_evdev.h
> @@ -38,6 +38,7 @@
>   #include <rte_io.h>
>   
>   #include <octeontx_mbox.h>
> +#include <octeontx_ethdev.h>
>   
>   #define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx
>   
> diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
> index 8dc1264..bd3d71a 100644
> --- a/drivers/event/octeontx/ssovf_worker.h
> +++ b/drivers/event/octeontx/ssovf_worker.h
> @@ -34,9 +34,11 @@
>   #include <rte_common.h>
>   #include <rte_branch_prediction.h>
>   
> -#include "ssovf_evdev.h"
>   #include <octeontx_mbox.h>
>   
> +#include "ssovf_evdev.h"
> +#include "octeontx_rxtx.h"
> +
>   enum {
>   	SSO_SYNC_ORDERED,
>   	SSO_SYNC_ATOMIC,
> @@ -50,6 +52,28 @@ enum {
>   
>   /* SSO Operations */
>   
> +static __rte_always_inline struct rte_mbuf *
> +ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_id)
> +{
> +	struct rte_mbuf *mbuf;
> +	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
> +	rte_prefetch_non_temporal(wqe);
> +
> +	/* Get mbuf from wqe */
> +	mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
> +			OCTTX_PACKET_WQE_SKIP);
> +	mbuf->packet_type =
> +		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
> +	mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
> +	mbuf->pkt_len = wqe->s.w1.len;
> +	mbuf->data_len = mbuf->pkt_len;
> +	mbuf->nb_segs = 1;
> +	mbuf->ol_flags = 0;
> +	mbuf->port = port_id;
> +	rte_mbuf_refcnt_set(mbuf, 1);
> +	return mbuf;
> +}
> +
>   static __rte_always_inline uint16_t
>   ssows_get_work(struct ssows *ws, struct rte_event *ev)
>   {
> @@ -62,9 +86,10 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev)
>   	ws->cur_tt = sched_type_queue & 0x3;
>   	ws->cur_grp = sched_type_queue >> 2;
>   	sched_type_queue = sched_type_queue << 38;
> -
>   	ev->event = sched_type_queue | (get_work0 & 0xffffffff);
> -	ev->u64 = get_work1;
> +	ev->u64 = get_work1 && !ev->event_type ?

I think the code readability would be better if you compared event_type 
to RTE_EVENT_TYPE_ETHDEV.

Reviewed by: Nikhil Rao <nikhil.rao@intel.com>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/3] event/octeontx: add support for event Rx adapter
  2017-10-17 11:11   ` Rao, Nikhil
@ 2017-10-18  8:12     ` Pavan Nikhilesh Bhagavatula
  2017-10-24 14:27     ` Jerin Jacob
  1 sibling, 0 replies; 22+ messages in thread
From: Pavan Nikhilesh Bhagavatula @ 2017-10-18  8:12 UTC (permalink / raw)
  To: Rao, Nikhil; +Cc: dev

On Tue, Oct 17, 2017 at 04:41:44PM +0530, Rao, Nikhil wrote:

<snip>

> >  static __rte_always_inline uint16_t
> >  ssows_get_work(struct ssows *ws, struct rte_event *ev)
> >  {
> >@@ -62,9 +86,10 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev)
> >  	ws->cur_tt = sched_type_queue & 0x3;
> >  	ws->cur_grp = sched_type_queue >> 2;
> >  	sched_type_queue = sched_type_queue << 38;
> >-
> >  	ev->event = sched_type_queue | (get_work0 & 0xffffffff);
> >-	ev->u64 = get_work1;
> >+	ev->u64 = get_work1 && !ev->event_type ?
>
> I think the code readability would be better if you compared event_type to
> RTE_EVENT_TYPE_ETHDEV.
>
Agreed,
Will send out a v2.

Thanks,
Pavan.
> Reviewed by: Nikhil Rao <nikhil.rao@intel.com>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH v2 1/3] net/octeontx: add support for event Rx adapter
  2017-10-17  8:53 [PATCH 1/3] net/octeontx: add support for event Rx adapter Pavan Nikhilesh
                   ` (2 preceding siblings ...)
  2017-10-17  9:29 ` [PATCH 1/3] net/octeontx: add support for event Rx adapter Bhagavatula, Pavan
@ 2017-10-18  8:45 ` Pavan Nikhilesh
  2017-10-18  8:45   ` [PATCH v2 2/3] event/octeontx: " Pavan Nikhilesh
                     ` (4 more replies)
  3 siblings, 5 replies; 22+ messages in thread
From: Pavan Nikhilesh @ 2017-10-18  8:45 UTC (permalink / raw)
  To: jerin.jacob, santosh.shukla, nikhil.rao; +Cc: dev, Pavan Nikhilesh

Add functions to modify and delete qos responsible for mapping eth queues
to event queues used for configuring event Rx adapter.
The mbox functions have been moved from octeontx_pkivf.c to
octeontx_pkivf.h to allow event_octeontx to access them.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---

 v2 changes:
 - Improve conditional statement readability (Nikhil).

 This series depends on http://dpdk.org/dev/patchwork/patch/30430

 drivers/net/octeontx/base/octeontx_pkivf.c |  65 ---------------
 drivers/net/octeontx/base/octeontx_pkivf.h | 126 ++++++++++++++++++++++++++++-
 drivers/net/octeontx/octeontx_ethdev.c     |   3 +-
 drivers/net/octeontx/octeontx_rxtx.c       | 108 +------------------------
 drivers/net/octeontx/octeontx_rxtx.h       |  89 ++++++++++++++++++++
 5 files changed, 216 insertions(+), 175 deletions(-)

diff --git a/drivers/net/octeontx/base/octeontx_pkivf.c b/drivers/net/octeontx/base/octeontx_pkivf.c
index afae6a3..f9e4053 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.c
+++ b/drivers/net/octeontx/base/octeontx_pkivf.c
@@ -111,71 +111,6 @@ octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg)
 	return res;
 }

-int
-octeontx_pki_port_close(int port)
-{
-	struct octeontx_mbox_hdr hdr;
-	int res;
-
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
-	memset(&ptype, 0, len);
-	ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-	hdr.coproc = OCTEONTX_PKI_COPROC;
-	hdr.msg = MBOX_PKI_PORT_CLOSE;
-	hdr.vfid = port;
-
-	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-	if (res < 0)
-		return -EACCES;
-
-	return res;
-}
-
-int
-octeontx_pki_port_start(int port)
-{
-	struct octeontx_mbox_hdr hdr;
-	int res;
-
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
-	memset(&ptype, 0, len);
-	ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-	hdr.coproc = OCTEONTX_PKI_COPROC;
-	hdr.msg = MBOX_PKI_PORT_START;
-	hdr.vfid = port;
-
-	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-	if (res < 0)
-		return -EACCES;
-
-	return res;
-}
-
-int
-octeontx_pki_port_stop(int port)
-{
-	struct octeontx_mbox_hdr hdr;
-	int res;
-
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
-	memset(&ptype, 0, len);
-	ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-	hdr.coproc = OCTEONTX_PKI_COPROC;
-	hdr.msg = MBOX_PKI_PORT_STOP;
-	hdr.vfid = port;
-
-	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-	if (res < 0)
-		return -EACCES;
-
-	return res;
-}

 int
 octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg)
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.h b/drivers/net/octeontx/base/octeontx_pkivf.h
index 7cf8332..004dedc 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.h
+++ b/drivers/net/octeontx/base/octeontx_pkivf.h
@@ -240,10 +240,18 @@ typedef struct mbox_pki_port_modify_qos_entry {
 		uint8_t f_gaura:1;
 		uint8_t f_grptag_ok:1;
 		uint8_t f_grptag_bad:1;
+		uint8_t f_tag_type:1;
 	} mmask;
+	uint8_t tag_type;
 	struct mbox_pki_qos_entry qos_entry;
 } mbox_pki_mod_qos_t;

+/* pki flow/style enable qos */
+typedef struct mbox_pki_port_delete_qos_entry {
+	uint8_t port_type;
+	uint16_t index;
+} mbox_pki_del_qos_t;
+
 /* PKI maximum constants */
 #define PKI_VF_MAX			(1)
 #define PKI_MAX_PKTLEN			(32768)
@@ -407,6 +415,12 @@ typedef struct pki_port_create_qos {
 } pki_qos_cfg_t;

 /* pki flow/style enable qos */
+typedef struct pki_port_delete_qos_entry {
+	uint8_t port_type;
+	uint16_t index;
+} pki_del_qos_t;
+
+/* pki flow/style enable qos */
 typedef struct pki_port_modify_qos_entry {
 	uint8_t port_type;
 	uint16_t index;
@@ -415,17 +429,125 @@ typedef struct pki_port_modify_qos_entry {
 		uint8_t f_grp_ok:1;
 		uint8_t f_grp_bad:1;
 		uint8_t f_gaura:1;
+		uint8_t f_grptag_ok:1;
+		uint8_t f_grptag_bad:1;
+		uint8_t f_tag_type:1;
 	} mmask;
+	uint8_t tag_type;
 	struct pki_qos_entry qos_entry;
 } pki_mod_qos_t;

+static inline int
+octeontx_pki_port_modify_qos(int port, pki_mod_qos_t *qos_cfg)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_mod_qos_t q_cfg = *(mbox_pki_mod_qos_t *)qos_cfg;
+	int len = sizeof(mbox_pki_mod_qos_t);
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_MODIFY_QOS;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_delete_qos(int port, pki_del_qos_t *qos_cfg)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_del_qos_t q_cfg = *(mbox_pki_del_qos_t *)qos_cfg;
+	int len = sizeof(mbox_pki_del_qos_t);
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_DELETE_QOS;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_close(int port)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_port_t ptype;
+	int len = sizeof(mbox_pki_port_t);
+	memset(&ptype, 0, len);
+	ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_CLOSE;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_start(int port)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_port_t ptype;
+	int len = sizeof(mbox_pki_port_t);
+	memset(&ptype, 0, len);
+	ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_START;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_stop(int port)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_port_t ptype;
+	int len = sizeof(mbox_pki_port_t);
+	memset(&ptype, 0, len);
+	ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_STOP;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
 int octeontx_pki_port_open(int port);
 int octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg);
 int octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg);
 int octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg);
 int octeontx_pki_port_close(int port);
-int octeontx_pki_port_start(int port);
-int octeontx_pki_port_stop(int port);
 int octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg);

 #endif /* __OCTEONTX_PKI_H__ */
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 82e38c2..86de5d1 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -930,6 +930,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 			pki_hash.tag_slc = 1;
 			pki_hash.tag_dlf = 1;
 			pki_hash.tag_slf = 1;
+			pki_hash.tag_prt = 1;
 			octeontx_pki_port_hash_config(port, &pki_hash);
 		}

@@ -941,7 +942,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		pki_qos.qpg_qos = PKI_QPG_QOS_NONE;
 		pki_qos.num_entry = 1;
 		pki_qos.drop_policy = 0;
-		pki_qos.tag_type = 2L;
+		pki_qos.tag_type = 0L;
 		pki_qos.qos_entry[0].port_add = 0;
 		pki_qos.qos_entry[0].gaura = gaura;
 		pki_qos.qos_entry[0].ggrp_ok = ev_queues;
diff --git a/drivers/net/octeontx/octeontx_rxtx.c b/drivers/net/octeontx/octeontx_rxtx.c
index 0b15833..99531cd 100644
--- a/drivers/net/octeontx/octeontx_rxtx.c
+++ b/drivers/net/octeontx/octeontx_rxtx.c
@@ -47,94 +47,6 @@
 #include "octeontx_rxtx.h"
 #include "octeontx_logs.h"

-/* Packet type table */
-#define PTYPE_SIZE	OCCTX_PKI_LTYPE_LAST
-
-static const uint32_t __rte_cache_aligned
-ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
-	[LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
-	[LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
-	[LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
-	[LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
-	[LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
-	[LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
-	[LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
-	[LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
-
-	[LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
-	[LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
-	[LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
-	[LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
-	[LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV4][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
-
-	[LC_IPV4_OPT][LE_NONE][LF_NONE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
-	[LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
-	[LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4_OPT][LE_NONE][LF_TCP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
-	[LC_IPV4_OPT][LE_NONE][LF_UDP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
-	[LC_IPV4_OPT][LE_NONE][LF_GRE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
-
-	[LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
-	[LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
-	[LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
-	[LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
-	[LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV6][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
-	[LC_IPV6_OPT][LE_NONE][LF_NONE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
-	[LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
-	[LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6_OPT][LE_NONE][LF_TCP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
-	[LC_IPV6_OPT][LE_NONE][LF_UDP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
-	[LC_IPV6_OPT][LE_NONE][LF_GRE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
-
-};

 static __rte_always_inline uint16_t __hot
 __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
@@ -195,10 +107,8 @@ octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 uint16_t __hot
 octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-	struct rte_mbuf *mbuf;
 	struct octeontx_rxq *rxq;
 	struct rte_event ev;
-	octtx_wqe_t *wqe;
 	size_t count;
 	uint16_t valid_event;

@@ -210,23 +120,7 @@ octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 							1, 0);
 		if (!valid_event)
 			break;
-
-		wqe = (octtx_wqe_t *)(uintptr_t)ev.u64;
-		rte_prefetch_non_temporal(wqe);
-
-		/* Get mbuf from wqe */
-		mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
-						OCTTX_PACKET_WQE_SKIP);
-		mbuf->packet_type =
-		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
-		mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
-		mbuf->pkt_len = wqe->s.w1.len;
-		mbuf->data_len = mbuf->pkt_len;
-		mbuf->nb_segs = 1;
-		mbuf->ol_flags = 0;
-		mbuf->port = rxq->port_id;
-		rte_mbuf_refcnt_set(mbuf, 1);
-		rx_pkts[count++] = mbuf;
+		rx_pkts[count++] = (struct rte_mbuf *)ev.u64;
 	}

 	return count; /* return number of pkts received */
diff --git a/drivers/net/octeontx/octeontx_rxtx.h b/drivers/net/octeontx/octeontx_rxtx.h
index 1f91532..382ff2b 100644
--- a/drivers/net/octeontx/octeontx_rxtx.h
+++ b/drivers/net/octeontx/octeontx_rxtx.h
@@ -39,6 +39,95 @@
 #define __hot	__attribute__((hot))
 #endif

+/* Packet type table */
+#define PTYPE_SIZE	OCCTX_PKI_LTYPE_LAST
+
+static const uint32_t __rte_cache_aligned
+ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
+	[LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
+	[LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
+	[LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
+	[LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
+	[LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
+	[LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
+	[LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
+	[LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
+
+	[LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
+	[LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
+	[LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+	[LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+	[LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV4][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+
+	[LC_IPV4_OPT][LE_NONE][LF_NONE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
+	[LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
+	[LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4_OPT][LE_NONE][LF_TCP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+	[LC_IPV4_OPT][LE_NONE][LF_UDP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+	[LC_IPV4_OPT][LE_NONE][LF_GRE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+	[LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
+	[LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
+	[LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+	[LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+	[LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV6][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+	[LC_IPV6_OPT][LE_NONE][LF_NONE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
+	[LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
+	[LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6_OPT][LE_NONE][LF_TCP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+	[LC_IPV6_OPT][LE_NONE][LF_UDP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+	[LC_IPV6_OPT][LE_NONE][LF_GRE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+};
+
 uint16_t
 octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);

--
2.7.4

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH v2 2/3] event/octeontx: add support for event Rx adapter
  2017-10-18  8:45 ` [PATCH v2 " Pavan Nikhilesh
@ 2017-10-18  8:45   ` Pavan Nikhilesh
  2017-10-18  8:45   ` [PATCH v2 3/3] doc: add event octeontx Rx adapter limitation Pavan Nikhilesh
                     ` (3 subsequent siblings)
  4 siblings, 0 replies; 22+ messages in thread
From: Pavan Nikhilesh @ 2017-10-18  8:45 UTC (permalink / raw)
  To: jerin.jacob, santosh.shukla, nikhil.rao; +Cc: dev, Pavan Nikhilesh

Add Rx adapter queue add and delete API for both generic eth_devices as
well as HW backed eth_octeontx which supports direct event injection to
event device.
The HW injected event needs to be converted into mbuf, previously this
was done in eth_octeontx during rx_burst now it is moved to
event_octeontx as events from Rx adapter are dequeued directly from
event device.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Reviewed by: Nikhil Rao <nikhil.rao@intel.com>
---
 drivers/event/octeontx/Makefile       |   1 +
 drivers/event/octeontx/ssovf_evdev.c  | 126 ++++++++++++++++++++++++++++++++++
 drivers/event/octeontx/ssovf_evdev.h  |   1 +
 drivers/event/octeontx/ssovf_worker.h |  31 ++++++++-
 4 files changed, 156 insertions(+), 3 deletions(-)

diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
index 08fc167..7f7b9b3 100644
--- a/drivers/event/octeontx/Makefile
+++ b/drivers/event/octeontx/Makefile
@@ -39,6 +39,7 @@ LIB = librte_pmd_octeontx_ssovf.a

 CFLAGS += $(WERROR_FLAGS)
 CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
+CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx/

 EXPORT_MAP := rte_pmd_octeontx_ssovf_version.map

diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index d829b49..7bdc85d 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -36,6 +36,8 @@
 #include <rte_debug.h>
 #include <rte_dev.h>
 #include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_event_eth_rx_adapter.h>
 #include <rte_lcore.h>
 #include <rte_log.h>
 #include <rte_malloc.h>
@@ -395,6 +397,123 @@ ssows_dump(struct ssows *ws, FILE *f)
 	fprintf(f, "\tpwqp=0x%"PRIx64"\n", val);
 }

+static int
+ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
+		const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+	int ret;
+	RTE_SET_USED(dev);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+	else
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT;
+
+	return 0;
+}
+
+static int
+ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
+		const struct rte_eth_dev *eth_dev, int32_t rx_queue_id,
+		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+	int ret = 0;
+	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	pki_mod_qos_t pki_qos;
+	RTE_SET_USED(dev);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		return -EINVAL;
+
+	if (rx_queue_id >= 0)
+		return -EINVAL;
+
+	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
+		return -ENOTSUP;
+
+	memset(&pki_qos, 0, sizeof(pki_mod_qos_t));
+
+	pki_qos.port_type = 0;
+	pki_qos.index = 0;
+	pki_qos.mmask.f_tag_type = 1;
+	pki_qos.mmask.f_port_add = 1;
+	pki_qos.mmask.f_grp_ok = 1;
+	pki_qos.mmask.f_grp_bad = 1;
+	pki_qos.mmask.f_grptag_ok = 1;
+	pki_qos.mmask.f_grptag_bad = 1;
+
+	pki_qos.tag_type = queue_conf->ev.sched_type;
+	pki_qos.qos_entry.port_add = 0;
+	pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id;
+	pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id;
+	pki_qos.qos_entry.grptag_bad = 0;
+	pki_qos.qos_entry.grptag_ok = 0;
+
+	ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos);
+	if (ret < 0)
+		ssovf_log_err("failed to modify QOS, port=%d, q=%d",
+				nic->port_id, queue_conf->ev.queue_id);
+
+	return ret;
+}
+
+static int
+ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
+		const struct rte_eth_dev *eth_dev, int32_t rx_queue_id)
+{
+	int ret = 0;
+	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	pki_del_qos_t pki_qos;
+	RTE_SET_USED(dev);
+	RTE_SET_USED(rx_queue_id);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		return -EINVAL;
+
+	pki_qos.port_type = 0;
+	pki_qos.index = 0;
+	memset(&pki_qos, 0, sizeof(pki_del_qos_t));
+	ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos);
+	if (ret < 0)
+		ssovf_log_err("Failed to delete QOS port=%d, q=%d",
+				nic->port_id, queue_conf->ev.queue_id);
+	return ret;
+}
+
+static int
+ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev,
+					const struct rte_eth_dev *eth_dev)
+{
+	int ret;
+	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	RTE_SET_USED(dev);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		return 0;
+	octeontx_pki_port_start(nic->port_id);
+	return 0;
+}
+
+
+static int
+ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
+		const struct rte_eth_dev *eth_dev)
+{
+	int ret;
+	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	RTE_SET_USED(dev);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		return 0;
+	octeontx_pki_port_stop(nic->port_id);
+	return 0;
+}
+
 static void
 ssovf_dump(struct rte_eventdev *dev, FILE *f)
 {
@@ -488,6 +607,13 @@ static const struct rte_eventdev_ops ssovf_ops = {
 	.port_link        = ssovf_port_link,
 	.port_unlink      = ssovf_port_unlink,
 	.timeout_ticks    = ssovf_timeout_ticks,
+
+	.eth_rx_adapter_caps_get  = ssovf_eth_rx_adapter_caps_get,
+	.eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add,
+	.eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del,
+	.eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
+	.eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
+
 	.dump             = ssovf_dump,
 	.dev_start        = ssovf_start,
 	.dev_stop         = ssovf_stop,
diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index 933c5a3..bbce492 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -38,6 +38,7 @@
 #include <rte_io.h>

 #include <octeontx_mbox.h>
+#include <octeontx_ethdev.h>

 #define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx

diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index 8dc1264..57a3c12 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -34,9 +34,11 @@
 #include <rte_common.h>
 #include <rte_branch_prediction.h>

-#include "ssovf_evdev.h"
 #include <octeontx_mbox.h>

+#include "ssovf_evdev.h"
+#include "octeontx_rxtx.h"
+
 enum {
 	SSO_SYNC_ORDERED,
 	SSO_SYNC_ATOMIC,
@@ -50,6 +52,28 @@ enum {

 /* SSO Operations */

+static __rte_always_inline struct rte_mbuf *
+ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_id)
+{
+	struct rte_mbuf *mbuf;
+	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
+	rte_prefetch_non_temporal(wqe);
+
+	/* Get mbuf from wqe */
+	mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
+			OCTTX_PACKET_WQE_SKIP);
+	mbuf->packet_type =
+		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
+	mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
+	mbuf->pkt_len = wqe->s.w1.len;
+	mbuf->data_len = mbuf->pkt_len;
+	mbuf->nb_segs = 1;
+	mbuf->ol_flags = 0;
+	mbuf->port = port_id;
+	rte_mbuf_refcnt_set(mbuf, 1);
+	return mbuf;
+}
+
 static __rte_always_inline uint16_t
 ssows_get_work(struct ssows *ws, struct rte_event *ev)
 {
@@ -62,9 +86,10 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev)
 	ws->cur_tt = sched_type_queue & 0x3;
 	ws->cur_grp = sched_type_queue >> 2;
 	sched_type_queue = sched_type_queue << 38;
-
 	ev->event = sched_type_queue | (get_work0 & 0xffffffff);
-	ev->u64 = get_work1;
+	ev->u64 = get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV ?
+		(uint64_t)ssovf_octeontx_wqe_to_pkt(get_work1,
+				(ev->event >> 20) & 0xF) : get_work1;
 	return !!get_work1;
 }

--
2.7.4

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH v2 3/3] doc: add event octeontx Rx adapter limitation
  2017-10-18  8:45 ` [PATCH v2 " Pavan Nikhilesh
  2017-10-18  8:45   ` [PATCH v2 2/3] event/octeontx: " Pavan Nikhilesh
@ 2017-10-18  8:45   ` Pavan Nikhilesh
  2017-10-18  8:52     ` Mcnamara, John
  2017-10-23 18:09   ` [PATCH v2 1/3] net/octeontx: add support for event Rx adapter Jerin Jacob
                     ` (2 subsequent siblings)
  4 siblings, 1 reply; 22+ messages in thread
From: Pavan Nikhilesh @ 2017-10-18  8:45 UTC (permalink / raw)
  To: jerin.jacob, santosh.shukla, nikhil.rao; +Cc: dev, Pavan Nikhilesh

Add limitaion when using eth_octeontx as Rx adapter with event_octeontx.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 doc/guides/eventdevs/octeontx.rst | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/doc/guides/eventdevs/octeontx.rst b/doc/guides/eventdevs/octeontx.rst
index b43d515..4412bfa 100644
--- a/doc/guides/eventdevs/octeontx.rst
+++ b/doc/guides/eventdevs/octeontx.rst
@@ -149,3 +149,8 @@ Burst mode support
 Burst mode is not supported. Dequeue and Enqueue functions accepts only single
 event at a time.
 
+Rx adapter support
+~~~~~~~~~~~~~~~~~~
+
+When eth_octeontx is used as Rx adapter event schedule type
+RTE_SCHED_TYPE_PARALLEL is not supported.
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] doc: add event octeontx Rx adapter limitation
  2017-10-17  8:53 ` [PATCH 3/3] doc: add event octeontx Rx adapter limitation Pavan Nikhilesh
@ 2017-10-18  8:48   ` Mcnamara, John
  2017-10-18  9:06     ` Pavan Nikhilesh Bhagavatula
  0 siblings, 1 reply; 22+ messages in thread
From: Mcnamara, John @ 2017-10-18  8:48 UTC (permalink / raw)
  To: Pavan Nikhilesh, santosh.shukla, jerin.jacob; +Cc: dev



> -----Original Message-----
> From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Pavan Nikhilesh
> Sent: Tuesday, October 17, 2017 9:53 AM
> To: santosh.shukla@caviumnetworks.com; jerin.jacob@caviumnetworks.com
> Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Subject: [dpdk-dev] [PATCH 3/3] doc: add event octeontx Rx adapter
> limitation
> 
> Add limitaion when using eth_octeontx as Rx adapter with event_octeontx.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> ---
>  doc/guides/eventdevs/octeontx.rst | 5 +++++
>  1 file changed, 5 insertions(+)
> 
> diff --git a/doc/guides/eventdevs/octeontx.rst
> b/doc/guides/eventdevs/octeontx.rst
> index b43d515..4412bfa 100644
> --- a/doc/guides/eventdevs/octeontx.rst
> +++ b/doc/guides/eventdevs/octeontx.rst
> @@ -149,3 +149,8 @@ Burst mode support
>  Burst mode is not supported. Dequeue and Enqueue functions accepts only
> single  event at a time.
> 
> +Rx adapter support
> +~~~~~~~~~~~~~~~~~~
> +
> +When eth_octeontx is used as Rx adapter event schedule type
> +RTE_SCHED_TYPE_PARALLEL is not supported.

Note, for future patches, it is best to add fixed formatting to variable
names and #defines like this: `` RTE_SCHED_TYPE_PARALLEL``. That isn't
worth a respin though, so:

Acked-by: John McNamara <john.mcnamara@intel.com>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 3/3] doc: add event octeontx Rx adapter limitation
  2017-10-18  8:45   ` [PATCH v2 3/3] doc: add event octeontx Rx adapter limitation Pavan Nikhilesh
@ 2017-10-18  8:52     ` Mcnamara, John
  0 siblings, 0 replies; 22+ messages in thread
From: Mcnamara, John @ 2017-10-18  8:52 UTC (permalink / raw)
  To: Pavan Nikhilesh, jerin.jacob, santosh.shukla, Rao, Nikhil; +Cc: dev

Acked-by: John McNamara <john.mcnamara@intel.com>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 3/3] doc: add event octeontx Rx adapter limitation
  2017-10-18  8:48   ` Mcnamara, John
@ 2017-10-18  9:06     ` Pavan Nikhilesh Bhagavatula
  0 siblings, 0 replies; 22+ messages in thread
From: Pavan Nikhilesh Bhagavatula @ 2017-10-18  9:06 UTC (permalink / raw)
  To: Mcnamara, John; +Cc: dev

On Wed, Oct 18, 2017 at 08:48:57AM +0000, Mcnamara, John wrote:
>
>
> > -----Original Message-----
> > From: dev [mailto:dev-bounces@dpdk.org] On Behalf Of Pavan Nikhilesh
> > Sent: Tuesday, October 17, 2017 9:53 AM
> > To: santosh.shukla@caviumnetworks.com; jerin.jacob@caviumnetworks.com
> > Cc: dev@dpdk.org; Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> > Subject: [dpdk-dev] [PATCH 3/3] doc: add event octeontx Rx adapter
> > limitation
> >
> > Add limitaion when using eth_octeontx as Rx adapter with event_octeontx.
> >
> > Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> > ---
> >  doc/guides/eventdevs/octeontx.rst | 5 +++++
> >  1 file changed, 5 insertions(+)
> >
> > diff --git a/doc/guides/eventdevs/octeontx.rst
> > b/doc/guides/eventdevs/octeontx.rst
> > index b43d515..4412bfa 100644
> > --- a/doc/guides/eventdevs/octeontx.rst
> > +++ b/doc/guides/eventdevs/octeontx.rst
> > @@ -149,3 +149,8 @@ Burst mode support
> >  Burst mode is not supported. Dequeue and Enqueue functions accepts only
> > single  event at a time.
> >
> > +Rx adapter support
> > +~~~~~~~~~~~~~~~~~~
> > +
> > +When eth_octeontx is used as Rx adapter event schedule type
> > +RTE_SCHED_TYPE_PARALLEL is not supported.
>
> Note, for future patches, it is best to add fixed formatting to variable
> names and #defines like this: `` RTE_SCHED_TYPE_PARALLEL``. That isn't
> worth a respin though, so:
>

Thanks for the headsup. Will take care while sending future patches.
-Pavan.

> Acked-by: John McNamara <john.mcnamara@intel.com>
>
>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/3] net/octeontx: add support for event Rx adapter
  2017-10-18  8:45 ` [PATCH v2 " Pavan Nikhilesh
  2017-10-18  8:45   ` [PATCH v2 2/3] event/octeontx: " Pavan Nikhilesh
  2017-10-18  8:45   ` [PATCH v2 3/3] doc: add event octeontx Rx adapter limitation Pavan Nikhilesh
@ 2017-10-23 18:09   ` Jerin Jacob
  2017-10-24  6:56     ` Pavan Nikhilesh Bhagavatula
  2017-10-24  8:10   ` [PATCH v3 " Pavan Nikhilesh
  2017-10-24 13:00   ` [PATCH v4 1/3] net/octeontx: add support for event Rx adapter Pavan Nikhilesh
  4 siblings, 1 reply; 22+ messages in thread
From: Jerin Jacob @ 2017-10-23 18:09 UTC (permalink / raw)
  To: Pavan Nikhilesh; +Cc: santosh.shukla, nikhil.rao, dev

-----Original Message-----
> Date: Wed, 18 Oct 2017 14:15:40 +0530
> From: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> To: jerin.jacob@caviumnetworks.com, santosh.shukla@caviumnetworks.com,
>  nikhil.rao@intel.com
> Cc: dev@dpdk.org, Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> Subject: [dpdk-dev] [PATCH v2 1/3] net/octeontx: add support for event Rx
>  adapter
> X-Mailer: git-send-email 2.7.4
> 
> Add functions to modify and delete qos responsible for mapping eth queues
> to event queues used for configuring event Rx adapter.
> The mbox functions have been moved from octeontx_pkivf.c to
> octeontx_pkivf.h to allow event_octeontx to access them.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> ---
> 
>  v2 changes:
>  - Improve conditional statement readability (Nikhil).
> 
>  This series depends on http://dpdk.org/dev/patchwork/patch/30430
> 
> 
>  static __rte_always_inline uint16_t __hot
>  __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
> @@ -195,10 +107,8 @@ octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
>  uint16_t __hot
>  octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
>  {
> -	struct rte_mbuf *mbuf;
>  	struct octeontx_rxq *rxq;
>  	struct rte_event ev;
> -	octtx_wqe_t *wqe;
>  	size_t count;
>  	uint16_t valid_event;
> 
> @@ -210,23 +120,7 @@ octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
>  							1, 0);
>  		if (!valid_event)
>  			break;
> -
> -		wqe = (octtx_wqe_t *)(uintptr_t)ev.u64;
> -		rte_prefetch_non_temporal(wqe);
> -
> -		/* Get mbuf from wqe */
> -		mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
> -						OCTTX_PACKET_WQE_SKIP);
> -		mbuf->packet_type =
> -		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
> -		mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
> -		mbuf->pkt_len = wqe->s.w1.len;
> -		mbuf->data_len = mbuf->pkt_len;
> -		mbuf->nb_segs = 1;
> -		mbuf->ol_flags = 0;
> -		mbuf->port = rxq->port_id;

IMO, You don't need to move the mbuf conversion inline function to 2/3,
Instead if we do in 1/3.It will functionality correct at 1/3.

> -		rte_mbuf_refcnt_set(mbuf, 1);
> -		rx_pkts[count++] = mbuf;
> +		rx_pkts[count++] = (struct rte_mbuf *)ev.u64;

This will create build error on 32bit. You can avoid the typecast by
changing to ev.mbuf.
/export/dpdk-next-eventdev/drivers/net/octeontx/octeontx_rxtx.c: In
function ‘octeontx_recv_pkts’:
/export/dpdk-next-eventdev/drivers/net/octeontx/octeontx_rxtx.c:123:22:
error: cast to pointer from integer of different size
[-Werror=int-to-pointer-cast]
   rx_pkts[count++] = (struct rte_mbuf *)ev.u64;                                
                      ^                                                         
cc1: all warnings being treated as errors    

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH v2 1/3] net/octeontx: add support for event Rx adapter
  2017-10-23 18:09   ` [PATCH v2 1/3] net/octeontx: add support for event Rx adapter Jerin Jacob
@ 2017-10-24  6:56     ` Pavan Nikhilesh Bhagavatula
  0 siblings, 0 replies; 22+ messages in thread
From: Pavan Nikhilesh Bhagavatula @ 2017-10-24  6:56 UTC (permalink / raw)
  To: Jerin Jacob; +Cc: dev

On Mon, Oct 23, 2017 at 11:39:42PM +0530, Jerin Jacob wrote:
> -----Original Message-----
> > Date: Wed, 18 Oct 2017 14:15:40 +0530
> > From: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> > To: jerin.jacob@caviumnetworks.com, santosh.shukla@caviumnetworks.com,
> >  nikhil.rao@intel.com
> > Cc: dev@dpdk.org, Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> > Subject: [dpdk-dev] [PATCH v2 1/3] net/octeontx: add support for event Rx
> >  adapter
> > X-Mailer: git-send-email 2.7.4
> >
> > Add functions to modify and delete qos responsible for mapping eth queues
> > to event queues used for configuring event Rx adapter.
> > The mbox functions have been moved from octeontx_pkivf.c to
> > octeontx_pkivf.h to allow event_octeontx to access them.
> >
> > Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> > ---
> >
> >  v2 changes:
> >  - Improve conditional statement readability (Nikhil).
> >
> >  This series depends on http://dpdk.org/dev/patchwork/patch/30430
> >
> >
> >  static __rte_always_inline uint16_t __hot
> >  __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
> > @@ -195,10 +107,8 @@ octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
> >  uint16_t __hot
> >  octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
> >  {
> > -	struct rte_mbuf *mbuf;
> >  	struct octeontx_rxq *rxq;
> >  	struct rte_event ev;
> > -	octtx_wqe_t *wqe;
> >  	size_t count;
> >  	uint16_t valid_event;
> >
> > @@ -210,23 +120,7 @@ octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
> >  							1, 0);
> >  		if (!valid_event)
> >  			break;
> > -
> > -		wqe = (octtx_wqe_t *)(uintptr_t)ev.u64;
> > -		rte_prefetch_non_temporal(wqe);
> > -
> > -		/* Get mbuf from wqe */
> > -		mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
> > -						OCTTX_PACKET_WQE_SKIP);
> > -		mbuf->packet_type =
> > -		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
> > -		mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
> > -		mbuf->pkt_len = wqe->s.w1.len;
> > -		mbuf->data_len = mbuf->pkt_len;
> > -		mbuf->nb_segs = 1;
> > -		mbuf->ol_flags = 0;
> > -		mbuf->port = rxq->port_id;
>
> IMO, You don't need to move the mbuf conversion inline function to 2/3,
> Instead if we do in 1/3.It will functionality correct at 1/3.
>
> > -		rte_mbuf_refcnt_set(mbuf, 1);
> > -		rx_pkts[count++] = mbuf;
> > +		rx_pkts[count++] = (struct rte_mbuf *)ev.u64;
>
> This will create build error on 32bit. You can avoid the typecast by
> changing to ev.mbuf.
> /export/dpdk-next-eventdev/drivers/net/octeontx/octeontx_rxtx.c: In
> function ‘octeontx_recv_pkts’:
> /export/dpdk-next-eventdev/drivers/net/octeontx/octeontx_rxtx.c:123:22:
> error: cast to pointer from integer of different size
> [-Werror=int-to-pointer-cast]
>    rx_pkts[count++] = (struct rte_mbuf *)ev.u64;
>                       ^
> cc1: all warnings being treated as errors

Agreed, will move mbuf conversion to 1/3 and fix 32 bit compilation issues and
send a v3.

Thanks,
Pavan

^ permalink raw reply	[flat|nested] 22+ messages in thread

* [PATCH v3 1/3] net/octeontx: add support for event Rx adapter
  2017-10-18  8:45 ` [PATCH v2 " Pavan Nikhilesh
                     ` (2 preceding siblings ...)
  2017-10-23 18:09   ` [PATCH v2 1/3] net/octeontx: add support for event Rx adapter Jerin Jacob
@ 2017-10-24  8:10   ` Pavan Nikhilesh
  2017-10-24  8:10     ` [PATCH v3 2/3] event/octeontx: " Pavan Nikhilesh
  2017-10-24  8:10     ` [PATCH v3 3/3] doc: add event octeontx Rx adapter limitation Pavan Nikhilesh
  2017-10-24 13:00   ` [PATCH v4 1/3] net/octeontx: add support for event Rx adapter Pavan Nikhilesh
  4 siblings, 2 replies; 22+ messages in thread
From: Pavan Nikhilesh @ 2017-10-24  8:10 UTC (permalink / raw)
  To: nikhil.rao, jerin.jacob, santosh.shukla; +Cc: dev, Pavan Bhagavatula

From: Pavan Bhagavatula <pbhagavatula@caviumnetworks.com>

Add functions to modify and delete qos responsible for mapping eth queues
to event queues used for configuring event Rx adapter.
The mbox functions have been moved from octeontx_pkivf.c to
octeontx_pkivf.h to allow event_octeontx to access them.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 v3 changes:
 - move mbuf conversion from 2/3 to 1/3
 - fix 32bit compilation error
 - fix documentation formatting

 v2 changes:
 - Improve conditional statement readability (Nikhil).

 This series depends on http://dpdk.org/dev/patchwork/patch/30430

 drivers/event/octeontx/Makefile            |   1 +
 drivers/event/octeontx/ssovf_evdev.h       |   1 +
 drivers/event/octeontx/ssovf_worker.h      |  31 ++++++-
 drivers/net/octeontx/base/octeontx_pkivf.c |  65 ---------------
 drivers/net/octeontx/base/octeontx_pkivf.h | 126 ++++++++++++++++++++++++++++-
 drivers/net/octeontx/octeontx_ethdev.c     |   3 +-
 drivers/net/octeontx/octeontx_rxtx.c       | 108 +------------------------
 drivers/net/octeontx/octeontx_rxtx.h       |  89 ++++++++++++++++++++
 8 files changed, 246 insertions(+), 178 deletions(-)

diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
index 08fc167..7f7b9b3 100644
--- a/drivers/event/octeontx/Makefile
+++ b/drivers/event/octeontx/Makefile
@@ -39,6 +39,7 @@ LIB = librte_pmd_octeontx_ssovf.a

 CFLAGS += $(WERROR_FLAGS)
 CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
+CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx/

 EXPORT_MAP := rte_pmd_octeontx_ssovf_version.map

diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index 933c5a3..bbce492 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -38,6 +38,7 @@
 #include <rte_io.h>

 #include <octeontx_mbox.h>
+#include <octeontx_ethdev.h>

 #define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx

diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index 8dc1264..57a3c12 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -34,9 +34,11 @@
 #include <rte_common.h>
 #include <rte_branch_prediction.h>

-#include "ssovf_evdev.h"
 #include <octeontx_mbox.h>

+#include "ssovf_evdev.h"
+#include "octeontx_rxtx.h"
+
 enum {
 	SSO_SYNC_ORDERED,
 	SSO_SYNC_ATOMIC,
@@ -50,6 +52,28 @@ enum {

 /* SSO Operations */

+static __rte_always_inline struct rte_mbuf *
+ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_id)
+{
+	struct rte_mbuf *mbuf;
+	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
+	rte_prefetch_non_temporal(wqe);
+
+	/* Get mbuf from wqe */
+	mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
+			OCTTX_PACKET_WQE_SKIP);
+	mbuf->packet_type =
+		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
+	mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
+	mbuf->pkt_len = wqe->s.w1.len;
+	mbuf->data_len = mbuf->pkt_len;
+	mbuf->nb_segs = 1;
+	mbuf->ol_flags = 0;
+	mbuf->port = port_id;
+	rte_mbuf_refcnt_set(mbuf, 1);
+	return mbuf;
+}
+
 static __rte_always_inline uint16_t
 ssows_get_work(struct ssows *ws, struct rte_event *ev)
 {
@@ -62,9 +86,10 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev)
 	ws->cur_tt = sched_type_queue & 0x3;
 	ws->cur_grp = sched_type_queue >> 2;
 	sched_type_queue = sched_type_queue << 38;
-
 	ev->event = sched_type_queue | (get_work0 & 0xffffffff);
-	ev->u64 = get_work1;
+	ev->u64 = get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV ?
+		(uint64_t)ssovf_octeontx_wqe_to_pkt(get_work1,
+				(ev->event >> 20) & 0xF) : get_work1;
 	return !!get_work1;
 }

diff --git a/drivers/net/octeontx/base/octeontx_pkivf.c b/drivers/net/octeontx/base/octeontx_pkivf.c
index afae6a3..f9e4053 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.c
+++ b/drivers/net/octeontx/base/octeontx_pkivf.c
@@ -111,71 +111,6 @@ octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg)
 	return res;
 }

-int
-octeontx_pki_port_close(int port)
-{
-	struct octeontx_mbox_hdr hdr;
-	int res;
-
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
-	memset(&ptype, 0, len);
-	ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-	hdr.coproc = OCTEONTX_PKI_COPROC;
-	hdr.msg = MBOX_PKI_PORT_CLOSE;
-	hdr.vfid = port;
-
-	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-	if (res < 0)
-		return -EACCES;
-
-	return res;
-}
-
-int
-octeontx_pki_port_start(int port)
-{
-	struct octeontx_mbox_hdr hdr;
-	int res;
-
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
-	memset(&ptype, 0, len);
-	ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-	hdr.coproc = OCTEONTX_PKI_COPROC;
-	hdr.msg = MBOX_PKI_PORT_START;
-	hdr.vfid = port;
-
-	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-	if (res < 0)
-		return -EACCES;
-
-	return res;
-}
-
-int
-octeontx_pki_port_stop(int port)
-{
-	struct octeontx_mbox_hdr hdr;
-	int res;
-
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
-	memset(&ptype, 0, len);
-	ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-	hdr.coproc = OCTEONTX_PKI_COPROC;
-	hdr.msg = MBOX_PKI_PORT_STOP;
-	hdr.vfid = port;
-
-	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-	if (res < 0)
-		return -EACCES;
-
-	return res;
-}

 int
 octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg)
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.h b/drivers/net/octeontx/base/octeontx_pkivf.h
index 7cf8332..004dedc 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.h
+++ b/drivers/net/octeontx/base/octeontx_pkivf.h
@@ -240,10 +240,18 @@ typedef struct mbox_pki_port_modify_qos_entry {
 		uint8_t f_gaura:1;
 		uint8_t f_grptag_ok:1;
 		uint8_t f_grptag_bad:1;
+		uint8_t f_tag_type:1;
 	} mmask;
+	uint8_t tag_type;
 	struct mbox_pki_qos_entry qos_entry;
 } mbox_pki_mod_qos_t;

+/* pki flow/style enable qos */
+typedef struct mbox_pki_port_delete_qos_entry {
+	uint8_t port_type;
+	uint16_t index;
+} mbox_pki_del_qos_t;
+
 /* PKI maximum constants */
 #define PKI_VF_MAX			(1)
 #define PKI_MAX_PKTLEN			(32768)
@@ -407,6 +415,12 @@ typedef struct pki_port_create_qos {
 } pki_qos_cfg_t;

 /* pki flow/style enable qos */
+typedef struct pki_port_delete_qos_entry {
+	uint8_t port_type;
+	uint16_t index;
+} pki_del_qos_t;
+
+/* pki flow/style enable qos */
 typedef struct pki_port_modify_qos_entry {
 	uint8_t port_type;
 	uint16_t index;
@@ -415,17 +429,125 @@ typedef struct pki_port_modify_qos_entry {
 		uint8_t f_grp_ok:1;
 		uint8_t f_grp_bad:1;
 		uint8_t f_gaura:1;
+		uint8_t f_grptag_ok:1;
+		uint8_t f_grptag_bad:1;
+		uint8_t f_tag_type:1;
 	} mmask;
+	uint8_t tag_type;
 	struct pki_qos_entry qos_entry;
 } pki_mod_qos_t;

+static inline int
+octeontx_pki_port_modify_qos(int port, pki_mod_qos_t *qos_cfg)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_mod_qos_t q_cfg = *(mbox_pki_mod_qos_t *)qos_cfg;
+	int len = sizeof(mbox_pki_mod_qos_t);
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_MODIFY_QOS;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_delete_qos(int port, pki_del_qos_t *qos_cfg)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_del_qos_t q_cfg = *(mbox_pki_del_qos_t *)qos_cfg;
+	int len = sizeof(mbox_pki_del_qos_t);
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_DELETE_QOS;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_close(int port)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_port_t ptype;
+	int len = sizeof(mbox_pki_port_t);
+	memset(&ptype, 0, len);
+	ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_CLOSE;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_start(int port)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_port_t ptype;
+	int len = sizeof(mbox_pki_port_t);
+	memset(&ptype, 0, len);
+	ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_START;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_stop(int port)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_port_t ptype;
+	int len = sizeof(mbox_pki_port_t);
+	memset(&ptype, 0, len);
+	ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_STOP;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
 int octeontx_pki_port_open(int port);
 int octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg);
 int octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg);
 int octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg);
 int octeontx_pki_port_close(int port);
-int octeontx_pki_port_start(int port);
-int octeontx_pki_port_stop(int port);
 int octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg);

 #endif /* __OCTEONTX_PKI_H__ */
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 82e38c2..86de5d1 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -930,6 +930,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 			pki_hash.tag_slc = 1;
 			pki_hash.tag_dlf = 1;
 			pki_hash.tag_slf = 1;
+			pki_hash.tag_prt = 1;
 			octeontx_pki_port_hash_config(port, &pki_hash);
 		}

@@ -941,7 +942,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		pki_qos.qpg_qos = PKI_QPG_QOS_NONE;
 		pki_qos.num_entry = 1;
 		pki_qos.drop_policy = 0;
-		pki_qos.tag_type = 2L;
+		pki_qos.tag_type = 0L;
 		pki_qos.qos_entry[0].port_add = 0;
 		pki_qos.qos_entry[0].gaura = gaura;
 		pki_qos.qos_entry[0].ggrp_ok = ev_queues;
diff --git a/drivers/net/octeontx/octeontx_rxtx.c b/drivers/net/octeontx/octeontx_rxtx.c
index 0b15833..2b58423 100644
--- a/drivers/net/octeontx/octeontx_rxtx.c
+++ b/drivers/net/octeontx/octeontx_rxtx.c
@@ -47,94 +47,6 @@
 #include "octeontx_rxtx.h"
 #include "octeontx_logs.h"

-/* Packet type table */
-#define PTYPE_SIZE	OCCTX_PKI_LTYPE_LAST
-
-static const uint32_t __rte_cache_aligned
-ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
-	[LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
-	[LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
-	[LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
-	[LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
-	[LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
-	[LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
-	[LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
-	[LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
-
-	[LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
-	[LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
-	[LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
-	[LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
-	[LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV4][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
-
-	[LC_IPV4_OPT][LE_NONE][LF_NONE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
-	[LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
-	[LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4_OPT][LE_NONE][LF_TCP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
-	[LC_IPV4_OPT][LE_NONE][LF_UDP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
-	[LC_IPV4_OPT][LE_NONE][LF_GRE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
-
-	[LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
-	[LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
-	[LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
-	[LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
-	[LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV6][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
-	[LC_IPV6_OPT][LE_NONE][LF_NONE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
-	[LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
-	[LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6_OPT][LE_NONE][LF_TCP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
-	[LC_IPV6_OPT][LE_NONE][LF_UDP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
-	[LC_IPV6_OPT][LE_NONE][LF_GRE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
-
-};

 static __rte_always_inline uint16_t __hot
 __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
@@ -195,10 +107,8 @@ octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 uint16_t __hot
 octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-	struct rte_mbuf *mbuf;
 	struct octeontx_rxq *rxq;
 	struct rte_event ev;
-	octtx_wqe_t *wqe;
 	size_t count;
 	uint16_t valid_event;

@@ -210,23 +120,7 @@ octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 							1, 0);
 		if (!valid_event)
 			break;
-
-		wqe = (octtx_wqe_t *)(uintptr_t)ev.u64;
-		rte_prefetch_non_temporal(wqe);
-
-		/* Get mbuf from wqe */
-		mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
-						OCTTX_PACKET_WQE_SKIP);
-		mbuf->packet_type =
-		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
-		mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
-		mbuf->pkt_len = wqe->s.w1.len;
-		mbuf->data_len = mbuf->pkt_len;
-		mbuf->nb_segs = 1;
-		mbuf->ol_flags = 0;
-		mbuf->port = rxq->port_id;
-		rte_mbuf_refcnt_set(mbuf, 1);
-		rx_pkts[count++] = mbuf;
+		rx_pkts[count++] = ev.mbuf;
 	}

 	return count; /* return number of pkts received */
diff --git a/drivers/net/octeontx/octeontx_rxtx.h b/drivers/net/octeontx/octeontx_rxtx.h
index 1f91532..382ff2b 100644
--- a/drivers/net/octeontx/octeontx_rxtx.h
+++ b/drivers/net/octeontx/octeontx_rxtx.h
@@ -39,6 +39,95 @@
 #define __hot	__attribute__((hot))
 #endif

+/* Packet type table */
+#define PTYPE_SIZE	OCCTX_PKI_LTYPE_LAST
+
+static const uint32_t __rte_cache_aligned
+ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
+	[LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
+	[LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
+	[LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
+	[LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
+	[LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
+	[LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
+	[LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
+	[LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
+
+	[LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
+	[LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
+	[LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+	[LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+	[LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV4][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+
+	[LC_IPV4_OPT][LE_NONE][LF_NONE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
+	[LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
+	[LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4_OPT][LE_NONE][LF_TCP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+	[LC_IPV4_OPT][LE_NONE][LF_UDP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+	[LC_IPV4_OPT][LE_NONE][LF_GRE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+	[LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
+	[LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
+	[LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+	[LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+	[LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV6][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+	[LC_IPV6_OPT][LE_NONE][LF_NONE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
+	[LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
+	[LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6_OPT][LE_NONE][LF_TCP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+	[LC_IPV6_OPT][LE_NONE][LF_UDP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+	[LC_IPV6_OPT][LE_NONE][LF_GRE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+};
+
 uint16_t
 octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);

--
2.7.4

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH v3 2/3] event/octeontx: add support for event Rx adapter
  2017-10-24  8:10   ` [PATCH v3 " Pavan Nikhilesh
@ 2017-10-24  8:10     ` Pavan Nikhilesh
  2017-10-24  8:10     ` [PATCH v3 3/3] doc: add event octeontx Rx adapter limitation Pavan Nikhilesh
  1 sibling, 0 replies; 22+ messages in thread
From: Pavan Nikhilesh @ 2017-10-24  8:10 UTC (permalink / raw)
  To: nikhil.rao, jerin.jacob, santosh.shukla; +Cc: dev, Pavan Nikhilesh

Add Rx adapter queue add and delete API for both generic eth_devices as
well as HW backed eth_octeontx which supports direct event injection to
event device.
The HW injected event needs to be converted into mbuf, previously this
was done in eth_octeontx during rx_burst now it is moved to
event_octeontx as events from Rx adapter are dequeued directly from
event device.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 drivers/event/octeontx/ssovf_evdev.c | 126 +++++++++++++++++++++++++++++++++++
 1 file changed, 126 insertions(+)

diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index d829b49..7bdc85d 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -36,6 +36,8 @@
 #include <rte_debug.h>
 #include <rte_dev.h>
 #include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_event_eth_rx_adapter.h>
 #include <rte_lcore.h>
 #include <rte_log.h>
 #include <rte_malloc.h>
@@ -395,6 +397,123 @@ ssows_dump(struct ssows *ws, FILE *f)
 	fprintf(f, "\tpwqp=0x%"PRIx64"\n", val);
 }
 
+static int
+ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
+		const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+	int ret;
+	RTE_SET_USED(dev);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+	else
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT;
+
+	return 0;
+}
+
+static int
+ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
+		const struct rte_eth_dev *eth_dev, int32_t rx_queue_id,
+		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+	int ret = 0;
+	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	pki_mod_qos_t pki_qos;
+	RTE_SET_USED(dev);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		return -EINVAL;
+
+	if (rx_queue_id >= 0)
+		return -EINVAL;
+
+	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
+		return -ENOTSUP;
+
+	memset(&pki_qos, 0, sizeof(pki_mod_qos_t));
+
+	pki_qos.port_type = 0;
+	pki_qos.index = 0;
+	pki_qos.mmask.f_tag_type = 1;
+	pki_qos.mmask.f_port_add = 1;
+	pki_qos.mmask.f_grp_ok = 1;
+	pki_qos.mmask.f_grp_bad = 1;
+	pki_qos.mmask.f_grptag_ok = 1;
+	pki_qos.mmask.f_grptag_bad = 1;
+
+	pki_qos.tag_type = queue_conf->ev.sched_type;
+	pki_qos.qos_entry.port_add = 0;
+	pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id;
+	pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id;
+	pki_qos.qos_entry.grptag_bad = 0;
+	pki_qos.qos_entry.grptag_ok = 0;
+
+	ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos);
+	if (ret < 0)
+		ssovf_log_err("failed to modify QOS, port=%d, q=%d",
+				nic->port_id, queue_conf->ev.queue_id);
+
+	return ret;
+}
+
+static int
+ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
+		const struct rte_eth_dev *eth_dev, int32_t rx_queue_id)
+{
+	int ret = 0;
+	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	pki_del_qos_t pki_qos;
+	RTE_SET_USED(dev);
+	RTE_SET_USED(rx_queue_id);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		return -EINVAL;
+
+	pki_qos.port_type = 0;
+	pki_qos.index = 0;
+	memset(&pki_qos, 0, sizeof(pki_del_qos_t));
+	ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos);
+	if (ret < 0)
+		ssovf_log_err("Failed to delete QOS port=%d, q=%d",
+				nic->port_id, queue_conf->ev.queue_id);
+	return ret;
+}
+
+static int
+ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev,
+					const struct rte_eth_dev *eth_dev)
+{
+	int ret;
+	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	RTE_SET_USED(dev);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		return 0;
+	octeontx_pki_port_start(nic->port_id);
+	return 0;
+}
+
+
+static int
+ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
+		const struct rte_eth_dev *eth_dev)
+{
+	int ret;
+	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	RTE_SET_USED(dev);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		return 0;
+	octeontx_pki_port_stop(nic->port_id);
+	return 0;
+}
+
 static void
 ssovf_dump(struct rte_eventdev *dev, FILE *f)
 {
@@ -488,6 +607,13 @@ static const struct rte_eventdev_ops ssovf_ops = {
 	.port_link        = ssovf_port_link,
 	.port_unlink      = ssovf_port_unlink,
 	.timeout_ticks    = ssovf_timeout_ticks,
+
+	.eth_rx_adapter_caps_get  = ssovf_eth_rx_adapter_caps_get,
+	.eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add,
+	.eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del,
+	.eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
+	.eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
+
 	.dump             = ssovf_dump,
 	.dev_start        = ssovf_start,
 	.dev_stop         = ssovf_stop,
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH v3 3/3] doc: add event octeontx Rx adapter limitation
  2017-10-24  8:10   ` [PATCH v3 " Pavan Nikhilesh
  2017-10-24  8:10     ` [PATCH v3 2/3] event/octeontx: " Pavan Nikhilesh
@ 2017-10-24  8:10     ` Pavan Nikhilesh
  1 sibling, 0 replies; 22+ messages in thread
From: Pavan Nikhilesh @ 2017-10-24  8:10 UTC (permalink / raw)
  To: nikhil.rao, jerin.jacob, santosh.shukla; +Cc: dev, Pavan Bhagavatula

From: Pavan Bhagavatula <pbhagavatula@caviumnetworks.com>

Add limitaion when using eth_octeontx as Rx adapter with event_octeontx.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Acked-by: John McNamara <john.mcnamara@intel.com>
---
 doc/guides/eventdevs/octeontx.rst | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/doc/guides/eventdevs/octeontx.rst b/doc/guides/eventdevs/octeontx.rst
index b43d515..7e601a0 100644
--- a/doc/guides/eventdevs/octeontx.rst
+++ b/doc/guides/eventdevs/octeontx.rst
@@ -149,3 +149,8 @@ Burst mode support
 Burst mode is not supported. Dequeue and Enqueue functions accepts only single
 event at a time.
 
+Rx adapter support
+~~~~~~~~~~~~~~~~~~
+
+When eth_octeontx is used as Rx adapter event schedule type
+``RTE_SCHED_TYPE_PARALLEL`` is not supported.
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH v4 1/3] net/octeontx: add support for event Rx adapter
  2017-10-18  8:45 ` [PATCH v2 " Pavan Nikhilesh
                     ` (3 preceding siblings ...)
  2017-10-24  8:10   ` [PATCH v3 " Pavan Nikhilesh
@ 2017-10-24 13:00   ` Pavan Nikhilesh
  2017-10-24 13:00     ` [PATCH v4 2/3] event/octeontx: " Pavan Nikhilesh
                       ` (2 more replies)
  4 siblings, 3 replies; 22+ messages in thread
From: Pavan Nikhilesh @ 2017-10-24 13:00 UTC (permalink / raw)
  To: nikhil.rao, jerin.jacob, santosh.shukla; +Cc: dev, Pavan Bhagavatula

From: Pavan Bhagavatula <pbhagavatula@caviumnetworks.com>

Add functions to modify and delete qos responsible for mapping eth queues
to event queues used for configuring event Rx adapter.
The mbox functions have been moved from octeontx_pkivf.c to
octeontx_pkivf.h to allow event_octeontx to access them.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---

 v4 changes:
 - fix 32bit compilation issue

 v3 changes:
 - move mbuf conversion from 2/3 to 1/3
 - fix 32bit compilation error
 - fix documentation formatting

 v2 changes:
 - Improve conditional statement readability (Nikhil).

 drivers/event/octeontx/Makefile            |   1 +
 drivers/event/octeontx/ssovf_evdev.h       |   1 +
 drivers/event/octeontx/ssovf_worker.h      |  35 +++++++-
 drivers/net/octeontx/base/octeontx_pkivf.c |  65 ---------------
 drivers/net/octeontx/base/octeontx_pkivf.h | 126 ++++++++++++++++++++++++++++-
 drivers/net/octeontx/octeontx_ethdev.c     |   3 +-
 drivers/net/octeontx/octeontx_rxtx.c       | 108 +------------------------
 drivers/net/octeontx/octeontx_rxtx.h       |  89 ++++++++++++++++++++
 8 files changed, 250 insertions(+), 178 deletions(-)

diff --git a/drivers/event/octeontx/Makefile b/drivers/event/octeontx/Makefile
index 50434a3..a9b7ccd 100644
--- a/drivers/event/octeontx/Makefile
+++ b/drivers/event/octeontx/Makefile
@@ -39,6 +39,7 @@ LIB = librte_pmd_octeontx_ssovf.a

 CFLAGS += $(WERROR_FLAGS)
 CFLAGS += -I$(RTE_SDK)/drivers/mempool/octeontx/
+CFLAGS += -I$(RTE_SDK)/drivers/net/octeontx/

 EXPORT_MAP := rte_pmd_octeontx_ssovf_version.map

diff --git a/drivers/event/octeontx/ssovf_evdev.h b/drivers/event/octeontx/ssovf_evdev.h
index 933c5a3..bbce492 100644
--- a/drivers/event/octeontx/ssovf_evdev.h
+++ b/drivers/event/octeontx/ssovf_evdev.h
@@ -38,6 +38,7 @@
 #include <rte_io.h>

 #include <octeontx_mbox.h>
+#include <octeontx_ethdev.h>

 #define EVENTDEV_NAME_OCTEONTX_PMD event_octeontx

diff --git a/drivers/event/octeontx/ssovf_worker.h b/drivers/event/octeontx/ssovf_worker.h
index 8dc1264..bf76ac8 100644
--- a/drivers/event/octeontx/ssovf_worker.h
+++ b/drivers/event/octeontx/ssovf_worker.h
@@ -34,9 +34,11 @@
 #include <rte_common.h>
 #include <rte_branch_prediction.h>

-#include "ssovf_evdev.h"
 #include <octeontx_mbox.h>

+#include "ssovf_evdev.h"
+#include "octeontx_rxtx.h"
+
 enum {
 	SSO_SYNC_ORDERED,
 	SSO_SYNC_ATOMIC,
@@ -50,6 +52,28 @@ enum {

 /* SSO Operations */

+static __rte_always_inline struct rte_mbuf *
+ssovf_octeontx_wqe_to_pkt(uint64_t work, uint16_t port_id)
+{
+	struct rte_mbuf *mbuf;
+	octtx_wqe_t *wqe = (octtx_wqe_t *)(uintptr_t)work;
+	rte_prefetch_non_temporal(wqe);
+
+	/* Get mbuf from wqe */
+	mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
+			OCTTX_PACKET_WQE_SKIP);
+	mbuf->packet_type =
+		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
+	mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
+	mbuf->pkt_len = wqe->s.w1.len;
+	mbuf->data_len = mbuf->pkt_len;
+	mbuf->nb_segs = 1;
+	mbuf->ol_flags = 0;
+	mbuf->port = port_id;
+	rte_mbuf_refcnt_set(mbuf, 1);
+	return mbuf;
+}
+
 static __rte_always_inline uint16_t
 ssows_get_work(struct ssows *ws, struct rte_event *ev)
 {
@@ -62,9 +86,14 @@ ssows_get_work(struct ssows *ws, struct rte_event *ev)
 	ws->cur_tt = sched_type_queue & 0x3;
 	ws->cur_grp = sched_type_queue >> 2;
 	sched_type_queue = sched_type_queue << 38;
-
 	ev->event = sched_type_queue | (get_work0 & 0xffffffff);
-	ev->u64 = get_work1;
+	if (get_work1 && ev->event_type == RTE_EVENT_TYPE_ETHDEV) {
+		ev->mbuf = ssovf_octeontx_wqe_to_pkt(get_work1,
+				(ev->event >> 20) & 0xF);
+	} else {
+		ev->u64 = get_work1;
+	}
+
 	return !!get_work1;
 }

diff --git a/drivers/net/octeontx/base/octeontx_pkivf.c b/drivers/net/octeontx/base/octeontx_pkivf.c
index afae6a3..f9e4053 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.c
+++ b/drivers/net/octeontx/base/octeontx_pkivf.c
@@ -111,71 +111,6 @@ octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg)
 	return res;
 }

-int
-octeontx_pki_port_close(int port)
-{
-	struct octeontx_mbox_hdr hdr;
-	int res;
-
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
-	memset(&ptype, 0, len);
-	ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-	hdr.coproc = OCTEONTX_PKI_COPROC;
-	hdr.msg = MBOX_PKI_PORT_CLOSE;
-	hdr.vfid = port;
-
-	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-	if (res < 0)
-		return -EACCES;
-
-	return res;
-}
-
-int
-octeontx_pki_port_start(int port)
-{
-	struct octeontx_mbox_hdr hdr;
-	int res;
-
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
-	memset(&ptype, 0, len);
-	ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-	hdr.coproc = OCTEONTX_PKI_COPROC;
-	hdr.msg = MBOX_PKI_PORT_START;
-	hdr.vfid = port;
-
-	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-	if (res < 0)
-		return -EACCES;
-
-	return res;
-}
-
-int
-octeontx_pki_port_stop(int port)
-{
-	struct octeontx_mbox_hdr hdr;
-	int res;
-
-	mbox_pki_port_t ptype;
-	int len = sizeof(mbox_pki_port_t);
-	memset(&ptype, 0, len);
-	ptype.port_type = OCTTX_PORT_TYPE_NET;
-
-	hdr.coproc = OCTEONTX_PKI_COPROC;
-	hdr.msg = MBOX_PKI_PORT_STOP;
-	hdr.vfid = port;
-
-	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
-	if (res < 0)
-		return -EACCES;
-
-	return res;
-}

 int
 octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg)
diff --git a/drivers/net/octeontx/base/octeontx_pkivf.h b/drivers/net/octeontx/base/octeontx_pkivf.h
index 7cf8332..004dedc 100644
--- a/drivers/net/octeontx/base/octeontx_pkivf.h
+++ b/drivers/net/octeontx/base/octeontx_pkivf.h
@@ -240,10 +240,18 @@ typedef struct mbox_pki_port_modify_qos_entry {
 		uint8_t f_gaura:1;
 		uint8_t f_grptag_ok:1;
 		uint8_t f_grptag_bad:1;
+		uint8_t f_tag_type:1;
 	} mmask;
+	uint8_t tag_type;
 	struct mbox_pki_qos_entry qos_entry;
 } mbox_pki_mod_qos_t;

+/* pki flow/style enable qos */
+typedef struct mbox_pki_port_delete_qos_entry {
+	uint8_t port_type;
+	uint16_t index;
+} mbox_pki_del_qos_t;
+
 /* PKI maximum constants */
 #define PKI_VF_MAX			(1)
 #define PKI_MAX_PKTLEN			(32768)
@@ -407,6 +415,12 @@ typedef struct pki_port_create_qos {
 } pki_qos_cfg_t;

 /* pki flow/style enable qos */
+typedef struct pki_port_delete_qos_entry {
+	uint8_t port_type;
+	uint16_t index;
+} pki_del_qos_t;
+
+/* pki flow/style enable qos */
 typedef struct pki_port_modify_qos_entry {
 	uint8_t port_type;
 	uint16_t index;
@@ -415,17 +429,125 @@ typedef struct pki_port_modify_qos_entry {
 		uint8_t f_grp_ok:1;
 		uint8_t f_grp_bad:1;
 		uint8_t f_gaura:1;
+		uint8_t f_grptag_ok:1;
+		uint8_t f_grptag_bad:1;
+		uint8_t f_tag_type:1;
 	} mmask;
+	uint8_t tag_type;
 	struct pki_qos_entry qos_entry;
 } pki_mod_qos_t;

+static inline int
+octeontx_pki_port_modify_qos(int port, pki_mod_qos_t *qos_cfg)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_mod_qos_t q_cfg = *(mbox_pki_mod_qos_t *)qos_cfg;
+	int len = sizeof(mbox_pki_mod_qos_t);
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_MODIFY_QOS;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_delete_qos(int port, pki_del_qos_t *qos_cfg)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_del_qos_t q_cfg = *(mbox_pki_del_qos_t *)qos_cfg;
+	int len = sizeof(mbox_pki_del_qos_t);
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_DELETE_QOS;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &q_cfg, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_close(int port)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_port_t ptype;
+	int len = sizeof(mbox_pki_port_t);
+	memset(&ptype, 0, len);
+	ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_CLOSE;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_start(int port)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_port_t ptype;
+	int len = sizeof(mbox_pki_port_t);
+	memset(&ptype, 0, len);
+	ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_START;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
+static inline int
+octeontx_pki_port_stop(int port)
+{
+	struct octeontx_mbox_hdr hdr;
+	int res;
+
+	mbox_pki_port_t ptype;
+	int len = sizeof(mbox_pki_port_t);
+	memset(&ptype, 0, len);
+	ptype.port_type = OCTTX_PORT_TYPE_NET;
+
+	hdr.coproc = OCTEONTX_PKI_COPROC;
+	hdr.msg = MBOX_PKI_PORT_STOP;
+	hdr.vfid = port;
+
+	res = octeontx_ssovf_mbox_send(&hdr, &ptype, len, NULL, 0);
+	if (res < 0)
+		return -EACCES;
+
+	return res;
+}
+
 int octeontx_pki_port_open(int port);
 int octeontx_pki_port_hash_config(int port, pki_hash_cfg_t *hash_cfg);
 int octeontx_pki_port_pktbuf_config(int port, pki_pktbuf_cfg_t *buf_cfg);
 int octeontx_pki_port_create_qos(int port, pki_qos_cfg_t *qos_cfg);
 int octeontx_pki_port_close(int port);
-int octeontx_pki_port_start(int port);
-int octeontx_pki_port_stop(int port);
 int octeontx_pki_port_errchk_config(int port, pki_errchk_cfg_t *cfg);

 #endif /* __OCTEONTX_PKI_H__ */
diff --git a/drivers/net/octeontx/octeontx_ethdev.c b/drivers/net/octeontx/octeontx_ethdev.c
index 82e38c2..86de5d1 100644
--- a/drivers/net/octeontx/octeontx_ethdev.c
+++ b/drivers/net/octeontx/octeontx_ethdev.c
@@ -930,6 +930,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 			pki_hash.tag_slc = 1;
 			pki_hash.tag_dlf = 1;
 			pki_hash.tag_slf = 1;
+			pki_hash.tag_prt = 1;
 			octeontx_pki_port_hash_config(port, &pki_hash);
 		}

@@ -941,7 +942,7 @@ octeontx_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
 		pki_qos.qpg_qos = PKI_QPG_QOS_NONE;
 		pki_qos.num_entry = 1;
 		pki_qos.drop_policy = 0;
-		pki_qos.tag_type = 2L;
+		pki_qos.tag_type = 0L;
 		pki_qos.qos_entry[0].port_add = 0;
 		pki_qos.qos_entry[0].gaura = gaura;
 		pki_qos.qos_entry[0].ggrp_ok = ev_queues;
diff --git a/drivers/net/octeontx/octeontx_rxtx.c b/drivers/net/octeontx/octeontx_rxtx.c
index 0b15833..2b58423 100644
--- a/drivers/net/octeontx/octeontx_rxtx.c
+++ b/drivers/net/octeontx/octeontx_rxtx.c
@@ -47,94 +47,6 @@
 #include "octeontx_rxtx.h"
 #include "octeontx_logs.h"

-/* Packet type table */
-#define PTYPE_SIZE	OCCTX_PKI_LTYPE_LAST
-
-static const uint32_t __rte_cache_aligned
-ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
-	[LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
-	[LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
-	[LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
-	[LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
-	[LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
-	[LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
-	[LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
-	[LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
-
-	[LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
-	[LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
-	[LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
-	[LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
-	[LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV4][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
-
-	[LC_IPV4_OPT][LE_NONE][LF_NONE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
-	[LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
-	[LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV4_OPT][LE_NONE][LF_TCP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
-	[LC_IPV4_OPT][LE_NONE][LF_UDP] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
-	[LC_IPV4_OPT][LE_NONE][LF_GRE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
-
-	[LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
-	[LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
-	[LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
-	[LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
-	[LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV6][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
-	[LC_IPV6_OPT][LE_NONE][LF_NONE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
-	[LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
-	[LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
-	[LC_IPV6_OPT][LE_NONE][LF_TCP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
-	[LC_IPV6_OPT][LE_NONE][LF_UDP] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
-	[LC_IPV6_OPT][LE_NONE][LF_GRE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
-	[LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
-	[LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
-	[LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
-				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
-
-};

 static __rte_always_inline uint16_t __hot
 __octeontx_xmit_pkts(void *lmtline_va, void *ioreg_va, int64_t *fc_status_va,
@@ -195,10 +107,8 @@ octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts)
 uint16_t __hot
 octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 {
-	struct rte_mbuf *mbuf;
 	struct octeontx_rxq *rxq;
 	struct rte_event ev;
-	octtx_wqe_t *wqe;
 	size_t count;
 	uint16_t valid_event;

@@ -210,23 +120,7 @@ octeontx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
 							1, 0);
 		if (!valid_event)
 			break;
-
-		wqe = (octtx_wqe_t *)(uintptr_t)ev.u64;
-		rte_prefetch_non_temporal(wqe);
-
-		/* Get mbuf from wqe */
-		mbuf = (struct rte_mbuf *)((uintptr_t)wqe -
-						OCTTX_PACKET_WQE_SKIP);
-		mbuf->packet_type =
-		ptype_table[wqe->s.w2.lcty][wqe->s.w2.lety][wqe->s.w2.lfty];
-		mbuf->data_off = RTE_PTR_DIFF(wqe->s.w3.addr, mbuf->buf_addr);
-		mbuf->pkt_len = wqe->s.w1.len;
-		mbuf->data_len = mbuf->pkt_len;
-		mbuf->nb_segs = 1;
-		mbuf->ol_flags = 0;
-		mbuf->port = rxq->port_id;
-		rte_mbuf_refcnt_set(mbuf, 1);
-		rx_pkts[count++] = mbuf;
+		rx_pkts[count++] = ev.mbuf;
 	}

 	return count; /* return number of pkts received */
diff --git a/drivers/net/octeontx/octeontx_rxtx.h b/drivers/net/octeontx/octeontx_rxtx.h
index 1f91532..382ff2b 100644
--- a/drivers/net/octeontx/octeontx_rxtx.h
+++ b/drivers/net/octeontx/octeontx_rxtx.h
@@ -39,6 +39,95 @@
 #define __hot	__attribute__((hot))
 #endif

+/* Packet type table */
+#define PTYPE_SIZE	OCCTX_PKI_LTYPE_LAST
+
+static const uint32_t __rte_cache_aligned
+ptype_table[PTYPE_SIZE][PTYPE_SIZE][PTYPE_SIZE] = {
+	[LC_NONE][LE_NONE][LF_NONE] = RTE_PTYPE_UNKNOWN,
+	[LC_NONE][LE_NONE][LF_IPSEC_ESP] = RTE_PTYPE_UNKNOWN,
+	[LC_NONE][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L4_FRAG,
+	[LC_NONE][LE_NONE][LF_IPCOMP] = RTE_PTYPE_UNKNOWN,
+	[LC_NONE][LE_NONE][LF_TCP] = RTE_PTYPE_L4_TCP,
+	[LC_NONE][LE_NONE][LF_UDP] = RTE_PTYPE_L4_UDP,
+	[LC_NONE][LE_NONE][LF_GRE] = RTE_PTYPE_TUNNEL_GRE,
+	[LC_NONE][LE_NONE][LF_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_NONE][LE_NONE][LF_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_NONE][LE_NONE][LF_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE,
+
+	[LC_IPV4][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4,
+	[LC_IPV4][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG,
+	[LC_IPV4][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP,
+	[LC_IPV4][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP,
+	[LC_IPV4][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV4][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV4][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV4][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+
+	[LC_IPV4_OPT][LE_NONE][LF_NONE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4_OPT][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L3_IPV4,
+	[LC_IPV4_OPT][LE_NONE][LF_IPFRAG] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG,
+	[LC_IPV4_OPT][LE_NONE][LF_IPCOMP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV4_OPT][LE_NONE][LF_TCP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP,
+	[LC_IPV4_OPT][LE_NONE][LF_UDP] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP,
+	[LC_IPV4_OPT][LE_NONE][LF_GRE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV4_OPT][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV4_OPT][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV4_OPT][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+	[LC_IPV6][LE_NONE][LF_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4,
+	[LC_IPV6][LE_NONE][LF_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG,
+	[LC_IPV6][LE_NONE][LF_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6][LE_NONE][LF_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP,
+	[LC_IPV6][LE_NONE][LF_UDP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP,
+	[LC_IPV6][LE_NONE][LF_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV6][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV6][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV6][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE,
+	[LC_IPV6_OPT][LE_NONE][LF_NONE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6_OPT][LE_NONE][LF_IPSEC_ESP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L3_IPV4,
+	[LC_IPV6_OPT][LE_NONE][LF_IPFRAG] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG,
+	[LC_IPV6_OPT][LE_NONE][LF_IPCOMP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN,
+	[LC_IPV6_OPT][LE_NONE][LF_TCP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP,
+	[LC_IPV6_OPT][LE_NONE][LF_UDP] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP,
+	[LC_IPV6_OPT][LE_NONE][LF_GRE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE,
+	[LC_IPV6_OPT][LE_NONE][LF_UDP_GENEVE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GENEVE,
+	[LC_IPV6_OPT][LE_NONE][LF_UDP_VXLAN] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_VXLAN,
+	[LC_IPV6_OPT][LE_NONE][LF_NVGRE] =
+				RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_NVGRE,
+
+};
+
 uint16_t
 octeontx_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts);

--
2.7.4

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH v4 2/3] event/octeontx: add support for event Rx adapter
  2017-10-24 13:00   ` [PATCH v4 1/3] net/octeontx: add support for event Rx adapter Pavan Nikhilesh
@ 2017-10-24 13:00     ` Pavan Nikhilesh
  2017-10-24 13:00     ` [PATCH v4 3/3] doc: add event octeontx Rx adapter limitation Pavan Nikhilesh
  2017-10-24 14:21     ` [PATCH v4 1/3] net/octeontx: add support for event Rx adapter Jerin Jacob
  2 siblings, 0 replies; 22+ messages in thread
From: Pavan Nikhilesh @ 2017-10-24 13:00 UTC (permalink / raw)
  To: nikhil.rao, jerin.jacob, santosh.shukla; +Cc: dev, Pavan Bhagavatula

From: Pavan Bhagavatula <pbhagavatula@caviumnetworks.com>

Add Rx adapter queue add and delete API for both generic eth_devices as
well as HW backed eth_octeontx which supports direct event injection to
event device.
The HW injected event needs to be converted into mbuf, previously this
was done in eth_octeontx during rx_burst now it is moved to
event_octeontx as events from Rx adapter are dequeued directly from
event device.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
---
 drivers/event/octeontx/ssovf_evdev.c | 126 +++++++++++++++++++++++++++++++++++
 1 file changed, 126 insertions(+)

diff --git a/drivers/event/octeontx/ssovf_evdev.c b/drivers/event/octeontx/ssovf_evdev.c
index d829b49..7bdc85d 100644
--- a/drivers/event/octeontx/ssovf_evdev.c
+++ b/drivers/event/octeontx/ssovf_evdev.c
@@ -36,6 +36,8 @@
 #include <rte_debug.h>
 #include <rte_dev.h>
 #include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_event_eth_rx_adapter.h>
 #include <rte_lcore.h>
 #include <rte_log.h>
 #include <rte_malloc.h>
@@ -395,6 +397,123 @@ ssows_dump(struct ssows *ws, FILE *f)
 	fprintf(f, "\tpwqp=0x%"PRIx64"\n", val);
 }
 
+static int
+ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
+		const struct rte_eth_dev *eth_dev, uint32_t *caps)
+{
+	int ret;
+	RTE_SET_USED(dev);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
+	else
+		*caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT;
+
+	return 0;
+}
+
+static int
+ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
+		const struct rte_eth_dev *eth_dev, int32_t rx_queue_id,
+		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
+{
+	int ret = 0;
+	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	pki_mod_qos_t pki_qos;
+	RTE_SET_USED(dev);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		return -EINVAL;
+
+	if (rx_queue_id >= 0)
+		return -EINVAL;
+
+	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
+		return -ENOTSUP;
+
+	memset(&pki_qos, 0, sizeof(pki_mod_qos_t));
+
+	pki_qos.port_type = 0;
+	pki_qos.index = 0;
+	pki_qos.mmask.f_tag_type = 1;
+	pki_qos.mmask.f_port_add = 1;
+	pki_qos.mmask.f_grp_ok = 1;
+	pki_qos.mmask.f_grp_bad = 1;
+	pki_qos.mmask.f_grptag_ok = 1;
+	pki_qos.mmask.f_grptag_bad = 1;
+
+	pki_qos.tag_type = queue_conf->ev.sched_type;
+	pki_qos.qos_entry.port_add = 0;
+	pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id;
+	pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id;
+	pki_qos.qos_entry.grptag_bad = 0;
+	pki_qos.qos_entry.grptag_ok = 0;
+
+	ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos);
+	if (ret < 0)
+		ssovf_log_err("failed to modify QOS, port=%d, q=%d",
+				nic->port_id, queue_conf->ev.queue_id);
+
+	return ret;
+}
+
+static int
+ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
+		const struct rte_eth_dev *eth_dev, int32_t rx_queue_id)
+{
+	int ret = 0;
+	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	pki_del_qos_t pki_qos;
+	RTE_SET_USED(dev);
+	RTE_SET_USED(rx_queue_id);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		return -EINVAL;
+
+	pki_qos.port_type = 0;
+	pki_qos.index = 0;
+	memset(&pki_qos, 0, sizeof(pki_del_qos_t));
+	ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos);
+	if (ret < 0)
+		ssovf_log_err("Failed to delete QOS port=%d, q=%d",
+				nic->port_id, queue_conf->ev.queue_id);
+	return ret;
+}
+
+static int
+ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev,
+					const struct rte_eth_dev *eth_dev)
+{
+	int ret;
+	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	RTE_SET_USED(dev);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		return 0;
+	octeontx_pki_port_start(nic->port_id);
+	return 0;
+}
+
+
+static int
+ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
+		const struct rte_eth_dev *eth_dev)
+{
+	int ret;
+	const struct octeontx_nic *nic = eth_dev->data->dev_private;
+	RTE_SET_USED(dev);
+
+	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
+	if (ret)
+		return 0;
+	octeontx_pki_port_stop(nic->port_id);
+	return 0;
+}
+
 static void
 ssovf_dump(struct rte_eventdev *dev, FILE *f)
 {
@@ -488,6 +607,13 @@ static const struct rte_eventdev_ops ssovf_ops = {
 	.port_link        = ssovf_port_link,
 	.port_unlink      = ssovf_port_unlink,
 	.timeout_ticks    = ssovf_timeout_ticks,
+
+	.eth_rx_adapter_caps_get  = ssovf_eth_rx_adapter_caps_get,
+	.eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add,
+	.eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del,
+	.eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
+	.eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
+
 	.dump             = ssovf_dump,
 	.dev_start        = ssovf_start,
 	.dev_stop         = ssovf_stop,
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* [PATCH v4 3/3] doc: add event octeontx Rx adapter limitation
  2017-10-24 13:00   ` [PATCH v4 1/3] net/octeontx: add support for event Rx adapter Pavan Nikhilesh
  2017-10-24 13:00     ` [PATCH v4 2/3] event/octeontx: " Pavan Nikhilesh
@ 2017-10-24 13:00     ` Pavan Nikhilesh
  2017-10-24 14:21     ` [PATCH v4 1/3] net/octeontx: add support for event Rx adapter Jerin Jacob
  2 siblings, 0 replies; 22+ messages in thread
From: Pavan Nikhilesh @ 2017-10-24 13:00 UTC (permalink / raw)
  To: nikhil.rao, jerin.jacob, santosh.shukla; +Cc: dev, Pavan Bhagavatula

From: Pavan Bhagavatula <pbhagavatula@caviumnetworks.com>

Add limitaion when using eth_octeontx as Rx adapter with event_octeontx.

Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
Acked-by: John McNamara <john.mcnamara@intel.com>
---
 doc/guides/eventdevs/octeontx.rst | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/doc/guides/eventdevs/octeontx.rst b/doc/guides/eventdevs/octeontx.rst
index b43d515..7e601a0 100644
--- a/doc/guides/eventdevs/octeontx.rst
+++ b/doc/guides/eventdevs/octeontx.rst
@@ -149,3 +149,8 @@ Burst mode support
 Burst mode is not supported. Dequeue and Enqueue functions accepts only single
 event at a time.
 
+Rx adapter support
+~~~~~~~~~~~~~~~~~~
+
+When eth_octeontx is used as Rx adapter event schedule type
+``RTE_SCHED_TYPE_PARALLEL`` is not supported.
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 22+ messages in thread

* Re: [PATCH v4 1/3] net/octeontx: add support for event Rx adapter
  2017-10-24 13:00   ` [PATCH v4 1/3] net/octeontx: add support for event Rx adapter Pavan Nikhilesh
  2017-10-24 13:00     ` [PATCH v4 2/3] event/octeontx: " Pavan Nikhilesh
  2017-10-24 13:00     ` [PATCH v4 3/3] doc: add event octeontx Rx adapter limitation Pavan Nikhilesh
@ 2017-10-24 14:21     ` Jerin Jacob
  2 siblings, 0 replies; 22+ messages in thread
From: Jerin Jacob @ 2017-10-24 14:21 UTC (permalink / raw)
  To: Pavan Nikhilesh; +Cc: nikhil.rao, santosh.shukla, dev

-----Original Message-----
> Date: Tue, 24 Oct 2017 18:30:35 +0530
> From: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> To: nikhil.rao@intel.com, jerin.jacob@caviumnetworks.com,
>  santosh.shukla@caviumnetworks.com
> Cc: dev@dpdk.org, Pavan Bhagavatula <pbhagavatula@caviumnetworks.com>
> Subject: [dpdk-dev] [PATCH v4 1/3] net/octeontx: add support for event Rx
>  adapter
> X-Mailer: git-send-email 2.7.4
> 
> From: Pavan Bhagavatula <pbhagavatula@caviumnetworks.com>
> 
> Add functions to modify and delete qos responsible for mapping eth queues
> to event queues used for configuring event Rx adapter.
> The mbox functions have been moved from octeontx_pkivf.c to
> octeontx_pkivf.h to allow event_octeontx to access them.
> 
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>

Acked-by: Jerin Jacob <jerin.jacob@caviumnetworks.com>

^ permalink raw reply	[flat|nested] 22+ messages in thread

* Re: [PATCH 2/3] event/octeontx: add support for event Rx adapter
  2017-10-17 11:11   ` Rao, Nikhil
  2017-10-18  8:12     ` Pavan Nikhilesh Bhagavatula
@ 2017-10-24 14:27     ` Jerin Jacob
  1 sibling, 0 replies; 22+ messages in thread
From: Jerin Jacob @ 2017-10-24 14:27 UTC (permalink / raw)
  To: Rao, Nikhil; +Cc: Pavan Nikhilesh, santosh.shukla, dev

-----Original Message-----
> Date: Tue, 17 Oct 2017 16:41:44 +0530
> From: "Rao, Nikhil" <nikhil.rao@intel.com>
> To: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>,
>  santosh.shukla@caviumnetworks.com, jerin.jacob@caviumnetworks.com
> CC: dev@dpdk.org
> Subject: Re: [PATCH 2/3] event/octeontx: add support for event Rx adapter
> User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64; rv:52.0) Gecko/20100101
>  Thunderbird/52.4.0
> 
> On 10/17/2017 2:23 PM, Pavan Nikhilesh wrote:
> > Add Rx adapter queue add and delete API for both generic eth_devices as
> > well as HW backed eth_octeontx which supports direct event injection to
> > event device.
> > The HW injected event needs to be converted into mbuf, previously this
> > was done in eth_octeontx during rx_burst now it is moved to
> > event_octeontx as events from Rx adapter are dequeued directly from
> > event device.
> > 
> > Signed-off-by: Pavan Nikhilesh <pbhagavatula@caviumnetworks.com>
> 
> I think the code readability would be better if you compared event_type to
> RTE_EVENT_TYPE_ETHDEV.
> 
> Reviewed by: Nikhil Rao <nikhil.rao@intel.com>

Fixed the missing "-" in Reviewed-by: and applied v4 series
to dpdk-next-eventdev/master. Thanks.

^ permalink raw reply	[flat|nested] 22+ messages in thread

end of thread, other threads:[~2017-10-24 14:28 UTC | newest]

Thread overview: 22+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-10-17  8:53 [PATCH 1/3] net/octeontx: add support for event Rx adapter Pavan Nikhilesh
2017-10-17  8:53 ` [PATCH 2/3] event/octeontx: " Pavan Nikhilesh
2017-10-17 11:11   ` Rao, Nikhil
2017-10-18  8:12     ` Pavan Nikhilesh Bhagavatula
2017-10-24 14:27     ` Jerin Jacob
2017-10-17  8:53 ` [PATCH 3/3] doc: add event octeontx Rx adapter limitation Pavan Nikhilesh
2017-10-18  8:48   ` Mcnamara, John
2017-10-18  9:06     ` Pavan Nikhilesh Bhagavatula
2017-10-17  9:29 ` [PATCH 1/3] net/octeontx: add support for event Rx adapter Bhagavatula, Pavan
2017-10-18  8:45 ` [PATCH v2 " Pavan Nikhilesh
2017-10-18  8:45   ` [PATCH v2 2/3] event/octeontx: " Pavan Nikhilesh
2017-10-18  8:45   ` [PATCH v2 3/3] doc: add event octeontx Rx adapter limitation Pavan Nikhilesh
2017-10-18  8:52     ` Mcnamara, John
2017-10-23 18:09   ` [PATCH v2 1/3] net/octeontx: add support for event Rx adapter Jerin Jacob
2017-10-24  6:56     ` Pavan Nikhilesh Bhagavatula
2017-10-24  8:10   ` [PATCH v3 " Pavan Nikhilesh
2017-10-24  8:10     ` [PATCH v3 2/3] event/octeontx: " Pavan Nikhilesh
2017-10-24  8:10     ` [PATCH v3 3/3] doc: add event octeontx Rx adapter limitation Pavan Nikhilesh
2017-10-24 13:00   ` [PATCH v4 1/3] net/octeontx: add support for event Rx adapter Pavan Nikhilesh
2017-10-24 13:00     ` [PATCH v4 2/3] event/octeontx: " Pavan Nikhilesh
2017-10-24 13:00     ` [PATCH v4 3/3] doc: add event octeontx Rx adapter limitation Pavan Nikhilesh
2017-10-24 14:21     ` [PATCH v4 1/3] net/octeontx: add support for event Rx adapter Jerin Jacob

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.