All of lore.kernel.org
 help / color / mirror / Atom feed
* [dpdk-dev]  [PATCH 1/3] event/octeontx2: fix device reconfigure
@ 2020-06-29  1:33 pbhagavatula
  2020-06-29  1:33 ` [dpdk-dev] [PATCH 2/3] event/octeontx2: fix sub event type violation pbhagavatula
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: pbhagavatula @ 2020-06-29  1:33 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh; +Cc: dev, stable

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

When event device is re-configured maintain the event queue to event port
links and event port status instead of resetting them.

Fixes: cd24e70258bd ("event/octeontx2: add device configure function")
Cc: stable@dpdk.org

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/octeontx2/otx2_evdev.c | 60 +++++++++++++++++++++++-----
 1 file changed, 50 insertions(+), 10 deletions(-)

diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
index 630073de5..b8b57c388 100644
--- a/drivers/event/octeontx2/otx2_evdev.c
+++ b/drivers/event/octeontx2/otx2_evdev.c
@@ -725,6 +725,46 @@ sso_clr_links(const struct rte_eventdev *event_dev)
 	}
 }
 
+static void
+sso_restore_links(const struct rte_eventdev *event_dev)
+{
+	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+	uint16_t *links_map;
+	int i, j;
+
+	for (i = 0; i < dev->nb_event_ports; i++) {
+		links_map = event_dev->data->links_map;
+		/* Point links_map to this port specific area */
+		links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV);
+		if (dev->dual_ws) {
+			struct otx2_ssogws_dual *ws;
+
+			ws = event_dev->data->ports[i];
+			for (j = 0; j < dev->nb_event_queues; j++) {
+				if (links_map[j] == 0xdead)
+					continue;
+				sso_port_link_modify((struct otx2_ssogws *)
+						&ws->ws_state[0], j, true);
+				sso_port_link_modify((struct otx2_ssogws *)
+						&ws->ws_state[1], j, true);
+				sso_func_trace("Restoring port %d queue %d "
+						"link", i, j);
+			}
+		} else {
+			struct otx2_ssogws *ws;
+
+			ws = event_dev->data->ports[i];
+			for (j = 0; j < dev->nb_event_queues; j++) {
+				if (links_map[j] == 0xdead)
+					continue;
+				sso_port_link_modify(ws, j, true);
+				sso_func_trace("Restoring port %d queue %d "
+						"link", i, j);
+			}
+		}
+	}
+}
+
 static void
 sso_set_port_ops(struct otx2_ssogws *ws, uintptr_t base)
 {
@@ -765,18 +805,15 @@ sso_configure_dual_ports(const struct rte_eventdev *event_dev)
 		struct otx2_ssogws_dual *ws;
 		uintptr_t base;
 
-		/* Free memory prior to re-allocation if needed */
 		if (event_dev->data->ports[i] != NULL) {
 			ws = event_dev->data->ports[i];
-			rte_free(ws);
-			ws = NULL;
-		}
-
-		/* Allocate event port memory */
-		ws = rte_zmalloc_socket("otx2_sso_ws",
+		} else {
+			/* Allocate event port memory */
+			ws = rte_zmalloc_socket("otx2_sso_ws",
 					sizeof(struct otx2_ssogws_dual),
 					RTE_CACHE_LINE_SIZE,
 					event_dev->data->socket_id);
+		}
 		if (ws == NULL) {
 			otx2_err("Failed to alloc memory for port=%d", i);
 			rc = -ENOMEM;
@@ -1061,8 +1098,11 @@ otx2_sso_configure(const struct rte_eventdev *event_dev)
 		return -EINVAL;
 	}
 
-	if (dev->configured)
+	if (dev->configured) {
 		sso_unregister_irqs(event_dev);
+		/* Clear any prior port-queue mapping. */
+		sso_clr_links(event_dev);
+	}
 
 	if (dev->nb_event_queues) {
 		/* Finit any previous queues. */
@@ -1097,8 +1137,8 @@ otx2_sso_configure(const struct rte_eventdev *event_dev)
 		goto teardown_hwggrp;
 	}
 
-	/* Clear any prior port-queue mapping. */
-	sso_clr_links(event_dev);
+	/* Restore any prior port-queue mapping. */
+	sso_restore_links(event_dev);
 	rc = sso_ggrp_alloc_xaq(dev);
 	if (rc < 0) {
 		otx2_err("Failed to alloc xaq to ggrp %d", rc);
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [dpdk-dev] [PATCH 2/3] event/octeontx2: fix sub event type violation
  2020-06-29  1:33 [dpdk-dev] [PATCH 1/3] event/octeontx2: fix device reconfigure pbhagavatula
@ 2020-06-29  1:33 ` pbhagavatula
  2020-06-29  1:33 ` [dpdk-dev] [PATCH 3/3] event/octeontx2: improve datapath memory locality pbhagavatula
  2020-06-30  5:42 ` [dpdk-dev] [PATCH 1/3] event/octeontx2: fix device reconfigure Jerin Jacob
  2 siblings, 0 replies; 4+ messages in thread
From: pbhagavatula @ 2020-06-29  1:33 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh; +Cc: dev, stable

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

In OCTEONTX2 event device we use sub_event_type to store the ethernet
port identifier when we receive work from OCTEONTX2 ethernet device.
This violates the event device spec as sub_event_type should be 0 in
the initial receive stage.
Set sub_event_type to 0 after copying the port id.

Fixes: 0fe4accd8ec8 ("event/octeontx2: add Rx adapter fastpath ops")
Cc: stable@dpdk.org

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/octeontx2/otx2_worker_dual.h | 7 +++++--
 1 file changed, 5 insertions(+), 2 deletions(-)

diff --git a/drivers/event/octeontx2/otx2_worker_dual.h b/drivers/event/octeontx2/otx2_worker_dual.h
index c88420eb4..60aa14cca 100644
--- a/drivers/event/octeontx2/otx2_worker_dual.h
+++ b/drivers/event/octeontx2/otx2_worker_dual.h
@@ -65,8 +65,11 @@ otx2_ssogws_dual_get_work(struct otx2_ssogws_state *ws,
 
 	if (event.sched_type != SSO_TT_EMPTY &&
 	    event.event_type == RTE_EVENT_TYPE_ETHDEV) {
-		otx2_wqe_to_mbuf(get_work1, mbuf, event.sub_event_type,
-				 (uint32_t) event.get_work0, flags, lookup_mem);
+		uint8_t port = event.sub_event_type;
+
+		event.sub_event_type = 0;
+		otx2_wqe_to_mbuf(get_work1, mbuf, port,
+				 event.flow_id, flags, lookup_mem);
 		/* Extracting tstamp, if PTP enabled. CGX will prepend the
 		 * timestamp at starting of packet data and it can be derieved
 		 * from WQE 9 dword which corresponds to SG iova.
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [dpdk-dev] [PATCH 3/3] event/octeontx2: improve datapath memory locality
  2020-06-29  1:33 [dpdk-dev] [PATCH 1/3] event/octeontx2: fix device reconfigure pbhagavatula
  2020-06-29  1:33 ` [dpdk-dev] [PATCH 2/3] event/octeontx2: fix sub event type violation pbhagavatula
@ 2020-06-29  1:33 ` pbhagavatula
  2020-06-30  5:42 ` [dpdk-dev] [PATCH 1/3] event/octeontx2: fix device reconfigure Jerin Jacob
  2 siblings, 0 replies; 4+ messages in thread
From: pbhagavatula @ 2020-06-29  1:33 UTC (permalink / raw)
  To: jerinj, Pavan Nikhilesh; +Cc: dev, stable

From: Pavan Nikhilesh <pbhagavatula@marvell.com>

When event device is transmitting packet on OCTEONTX2 it needs to access
the destined ethernet device TXq data.
Currently, we get the TXq data through rte_eth_devices global array.
Instead save the TXq address inside event port memory.

Cc: stable@dpdk.org

Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>
---
 drivers/event/octeontx2/otx2_evdev.h       |  5 ++
 drivers/event/octeontx2/otx2_evdev_adptr.c | 67 +++++++++++++++++++++-
 drivers/event/octeontx2/otx2_worker.c      | 15 +++--
 drivers/event/octeontx2/otx2_worker.h      | 21 ++++---
 drivers/event/octeontx2/otx2_worker_dual.c | 15 +++--
 5 files changed, 103 insertions(+), 20 deletions(-)

diff --git a/drivers/event/octeontx2/otx2_evdev.h b/drivers/event/octeontx2/otx2_evdev.h
index 3b477820f..873724dd4 100644
--- a/drivers/event/octeontx2/otx2_evdev.h
+++ b/drivers/event/octeontx2/otx2_evdev.h
@@ -141,6 +141,7 @@ struct otx2_sso_evdev {
 	uint64_t adptr_xae_cnt;
 	uint16_t rx_adptr_pool_cnt;
 	uint64_t *rx_adptr_pools;
+	uint16_t max_port_id;
 	uint16_t tim_adptr_ring_cnt;
 	uint16_t *timer_adptr_rings;
 	uint64_t *timer_adptr_sz;
@@ -185,6 +186,8 @@ struct otx2_ssogws {
 	uintptr_t grps_base[OTX2_SSO_MAX_VHGRP];
 	/* PTP timestamp */
 	struct otx2_timesync_info *tstamp;
+	/* Tx Fastpath data */
+	uint8_t tx_adptr_data[] __rte_cache_aligned;
 } __rte_cache_aligned;
 
 struct otx2_ssogws_state {
@@ -204,6 +207,8 @@ struct otx2_ssogws_dual {
 	uintptr_t grps_base[OTX2_SSO_MAX_VHGRP];
 	/* PTP timestamp */
 	struct otx2_timesync_info *tstamp;
+	/* Tx Fastpath data */
+	uint8_t tx_adptr_data[] __rte_cache_aligned;
 } __rte_cache_aligned;
 
 static inline struct otx2_sso_evdev *
diff --git a/drivers/event/octeontx2/otx2_evdev_adptr.c b/drivers/event/octeontx2/otx2_evdev_adptr.c
index 8bdcfa3ea..0a5d7924a 100644
--- a/drivers/event/octeontx2/otx2_evdev_adptr.c
+++ b/drivers/event/octeontx2/otx2_evdev_adptr.c
@@ -438,6 +438,60 @@ sso_sqb_aura_limit_edit(struct rte_mempool *mp, uint16_t nb_sqb_bufs)
 	return otx2_mbox_process(npa_lf->mbox);
 }
 
+static int
+sso_add_tx_queue_data(const struct rte_eventdev *event_dev,
+		      uint16_t eth_port_id, uint16_t tx_queue_id,
+		      struct otx2_eth_txq *txq)
+{
+	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
+	int i;
+
+	for (i = 0; i < event_dev->data->nb_ports; i++) {
+		dev->max_port_id = RTE_MAX(dev->max_port_id, eth_port_id);
+		if (dev->dual_ws) {
+			struct otx2_ssogws_dual *old_dws;
+			struct otx2_ssogws_dual *dws;
+
+			old_dws = event_dev->data->ports[i];
+			dws = rte_realloc_socket(old_dws,
+						 sizeof(struct otx2_ssogws_dual)
+						 + (sizeof(uint64_t) *
+						    (dev->max_port_id + 1) *
+						    RTE_MAX_QUEUES_PER_PORT),
+						 RTE_CACHE_LINE_SIZE,
+						 event_dev->data->socket_id);
+			if (dws == NULL)
+				return -ENOMEM;
+
+			((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
+			 )&dws->tx_adptr_data)[eth_port_id][tx_queue_id] =
+				(uint64_t)txq;
+			event_dev->data->ports[i] = dws;
+		} else {
+			struct otx2_ssogws *old_ws;
+			struct otx2_ssogws *ws;
+
+			old_ws = event_dev->data->ports[i];
+			ws = rte_realloc_socket(old_ws,
+						sizeof(struct otx2_ssogws_dual)
+						+ (sizeof(uint64_t) *
+						   (dev->max_port_id + 1) *
+						   RTE_MAX_QUEUES_PER_PORT),
+						RTE_CACHE_LINE_SIZE,
+						event_dev->data->socket_id);
+			if (ws == NULL)
+				return -ENOMEM;
+
+			((uint64_t (*)[RTE_MAX_QUEUES_PER_PORT]
+			 )&ws->tx_adptr_data)[eth_port_id][tx_queue_id] =
+				(uint64_t)txq;
+			event_dev->data->ports[i] = ws;
+		}
+	}
+
+	return 0;
+}
+
 int
 otx2_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
 			      const struct rte_eth_dev *eth_dev,
@@ -446,18 +500,27 @@ otx2_sso_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *event_dev,
 	struct otx2_eth_dev *otx2_eth_dev = eth_dev->data->dev_private;
 	struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
 	struct otx2_eth_txq *txq;
-	int i;
+	int i, ret;
 
 	RTE_SET_USED(id);
 	if (tx_queue_id < 0) {
 		for (i = 0 ; i < eth_dev->data->nb_tx_queues; i++) {
 			txq = eth_dev->data->tx_queues[i];
 			sso_sqb_aura_limit_edit(txq->sqb_pool,
-						OTX2_SSO_SQB_LIMIT);
+					OTX2_SSO_SQB_LIMIT);
+			ret = sso_add_tx_queue_data(event_dev,
+						    eth_dev->data->port_id, i,
+						    txq);
+			if (ret < 0)
+				return ret;
 		}
 	} else {
 		txq = eth_dev->data->tx_queues[tx_queue_id];
 		sso_sqb_aura_limit_edit(txq->sqb_pool, OTX2_SSO_SQB_LIMIT);
+		ret = sso_add_tx_queue_data(event_dev, eth_dev->data->port_id,
+					    tx_queue_id, txq);
+		if (ret < 0)
+			return ret;
 	}
 
 	dev->tx_offloads |= otx2_eth_dev->tx_offload_flags;
diff --git a/drivers/event/octeontx2/otx2_worker.c b/drivers/event/octeontx2/otx2_worker.c
index 88bac391c..1d427e4a3 100644
--- a/drivers/event/octeontx2/otx2_worker.c
+++ b/drivers/event/octeontx2/otx2_worker.c
@@ -268,7 +268,7 @@ otx2_ssogws_enq_fwd_burst(void *port, const struct rte_event ev[],
 }
 
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
-uint16_t __rte_hot								\
+uint16_t __rte_hot							\
 otx2_ssogws_tx_adptr_enq_ ## name(void *port, struct rte_event ev[],	\
 				  uint16_t nb_events)			\
 {									\
@@ -276,13 +276,16 @@ otx2_ssogws_tx_adptr_enq_ ## name(void *port, struct rte_event ev[],	\
 	uint64_t cmd[sz];						\
 									\
 	RTE_SET_USED(nb_events);					\
-	return otx2_ssogws_event_tx(ws, ev, cmd, flags);		\
+	return otx2_ssogws_event_tx(ws, ev, cmd, (const uint64_t	\
+				    (*)[RTE_MAX_QUEUES_PER_PORT])	\
+				    &ws->tx_adptr_data,			\
+				    flags);				\
 }
 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
 
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
-uint16_t __rte_hot								\
+uint16_t __rte_hot							\
 otx2_ssogws_tx_adptr_enq_seg_ ## name(void *port, struct rte_event ev[],\
 				      uint16_t nb_events)		\
 {									\
@@ -290,8 +293,10 @@ otx2_ssogws_tx_adptr_enq_seg_ ## name(void *port, struct rte_event ev[],\
 	uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2];			\
 									\
 	RTE_SET_USED(nb_events);					\
-	return otx2_ssogws_event_tx(ws, ev, cmd, (flags) |		\
-				    NIX_TX_MULTI_SEG_F);		\
+	return otx2_ssogws_event_tx(ws, ev, cmd, (const uint64_t	\
+				    (*)[RTE_MAX_QUEUES_PER_PORT])	\
+				    &ws->tx_adptr_data,			\
+				    (flags) | NIX_TX_MULTI_SEG_F);	\
 }
 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
diff --git a/drivers/event/octeontx2/otx2_worker.h b/drivers/event/octeontx2/otx2_worker.h
index 5f5aa8746..924ff7ff4 100644
--- a/drivers/event/octeontx2/otx2_worker.h
+++ b/drivers/event/octeontx2/otx2_worker.h
@@ -260,10 +260,11 @@ otx2_ssogws_order(struct otx2_ssogws *ws, const uint8_t wait_flag)
 }
 
 static __rte_always_inline const struct otx2_eth_txq *
-otx2_ssogws_xtract_meta(struct rte_mbuf *m)
+otx2_ssogws_xtract_meta(struct rte_mbuf *m,
+			const uint64_t txq_data[][RTE_MAX_QUEUES_PER_PORT])
 {
-	return rte_eth_devices[m->port].data->tx_queues[
-			rte_event_eth_tx_adapter_txq_get(m)];
+	return (const struct otx2_eth_txq *)txq_data[m->port][
+					rte_event_eth_tx_adapter_txq_get(m)];
 }
 
 static __rte_always_inline void
@@ -276,20 +277,24 @@ otx2_ssogws_prepare_pkt(const struct otx2_eth_txq *txq, struct rte_mbuf *m,
 
 static __rte_always_inline uint16_t
 otx2_ssogws_event_tx(struct otx2_ssogws *ws, struct rte_event ev[],
-		     uint64_t *cmd, const uint32_t flags)
+		     uint64_t *cmd, const uint64_t
+		     txq_data[][RTE_MAX_QUEUES_PER_PORT],
+		     const uint32_t flags)
 {
 	struct rte_mbuf *m = ev[0].mbuf;
-	const struct otx2_eth_txq *txq = otx2_ssogws_xtract_meta(m);
-
-	rte_prefetch_non_temporal(txq);
+	const struct otx2_eth_txq *txq;
 
 	if ((flags & NIX_TX_OFFLOAD_SECURITY_F) &&
-	    (m->ol_flags & PKT_TX_SEC_OFFLOAD))
+	    (m->ol_flags & PKT_TX_SEC_OFFLOAD)) {
+		txq = otx2_ssogws_xtract_meta(m, txq_data);
 		return otx2_sec_event_tx(ws, ev, m, txq, flags);
+	}
 
+	rte_prefetch_non_temporal(&txq_data[m->port][0]);
 	/* Perform header writes before barrier for TSO */
 	otx2_nix_xmit_prepare_tso(m, flags);
 	otx2_ssogws_order(ws, !ev->sched_type);
+	txq = otx2_ssogws_xtract_meta(m, txq_data);
 	otx2_ssogws_prepare_pkt(txq, m, cmd, flags);
 
 	if (flags & NIX_TX_MULTI_SEG_F) {
diff --git a/drivers/event/octeontx2/otx2_worker_dual.c b/drivers/event/octeontx2/otx2_worker_dual.c
index 3d55d921b..946488eab 100644
--- a/drivers/event/octeontx2/otx2_worker_dual.c
+++ b/drivers/event/octeontx2/otx2_worker_dual.c
@@ -308,7 +308,7 @@ SSO_RX_ADPTR_ENQ_FASTPATH_FUNC
 #undef R
 
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
-uint16_t __rte_hot								\
+uint16_t __rte_hot							\
 otx2_ssogws_dual_tx_adptr_enq_ ## name(void *port,			\
 				       struct rte_event ev[],		\
 				       uint16_t nb_events)		\
@@ -319,13 +319,16 @@ otx2_ssogws_dual_tx_adptr_enq_ ## name(void *port,			\
 	uint64_t cmd[sz];						\
 									\
 	RTE_SET_USED(nb_events);					\
-	return otx2_ssogws_event_tx(vws, ev, cmd, flags);		\
+	return otx2_ssogws_event_tx(vws, ev, cmd, (const uint64_t	\
+				    (*)[RTE_MAX_QUEUES_PER_PORT])	\
+				    ws->tx_adptr_data,			\
+				    flags);				\
 }
 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
 
 #define T(name, f6, f5, f4, f3, f2, f1, f0, sz, flags)			\
-uint16_t __rte_hot								\
+uint16_t __rte_hot							\
 otx2_ssogws_dual_tx_adptr_enq_seg_ ## name(void *port,			\
 					   struct rte_event ev[],	\
 					   uint16_t nb_events)		\
@@ -336,8 +339,10 @@ otx2_ssogws_dual_tx_adptr_enq_seg_ ## name(void *port,			\
 	uint64_t cmd[(sz) + NIX_TX_MSEG_SG_DWORDS - 2];			\
 									\
 	RTE_SET_USED(nb_events);					\
-	return otx2_ssogws_event_tx(vws, ev, cmd, (flags) |		\
-				    NIX_TX_MULTI_SEG_F);		\
+	return otx2_ssogws_event_tx(vws, ev, cmd, (const uint64_t	\
+				    (*)[RTE_MAX_QUEUES_PER_PORT])	\
+				    ws->tx_adptr_data,			\
+				    (flags) | NIX_TX_MULTI_SEG_F);	\
 }
 SSO_TX_ADPTR_ENQ_FASTPATH_FUNC
 #undef T
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [dpdk-dev] [PATCH 1/3] event/octeontx2: fix device reconfigure
  2020-06-29  1:33 [dpdk-dev] [PATCH 1/3] event/octeontx2: fix device reconfigure pbhagavatula
  2020-06-29  1:33 ` [dpdk-dev] [PATCH 2/3] event/octeontx2: fix sub event type violation pbhagavatula
  2020-06-29  1:33 ` [dpdk-dev] [PATCH 3/3] event/octeontx2: improve datapath memory locality pbhagavatula
@ 2020-06-30  5:42 ` Jerin Jacob
  2 siblings, 0 replies; 4+ messages in thread
From: Jerin Jacob @ 2020-06-30  5:42 UTC (permalink / raw)
  To: Pavan Nikhilesh; +Cc: Jerin Jacob, dpdk-dev, dpdk stable

On Mon, Jun 29, 2020 at 7:03 AM <pbhagavatula@marvell.com> wrote:
>
> From: Pavan Nikhilesh <pbhagavatula@marvell.com>
>
> When event device is re-configured maintain the event queue to event port
> links and event port status instead of resetting them.
>
> Fixes: cd24e70258bd ("event/octeontx2: add device configure function")
> Cc: stable@dpdk.org
>
> Signed-off-by: Pavan Nikhilesh <pbhagavatula@marvell.com>


Series applied to dpdk-next-eventdev/master. Thanks.




> ---
>  drivers/event/octeontx2/otx2_evdev.c | 60 +++++++++++++++++++++++-----
>  1 file changed, 50 insertions(+), 10 deletions(-)
>
> diff --git a/drivers/event/octeontx2/otx2_evdev.c b/drivers/event/octeontx2/otx2_evdev.c
> index 630073de5..b8b57c388 100644
> --- a/drivers/event/octeontx2/otx2_evdev.c
> +++ b/drivers/event/octeontx2/otx2_evdev.c
> @@ -725,6 +725,46 @@ sso_clr_links(const struct rte_eventdev *event_dev)
>         }
>  }
>
> +static void
> +sso_restore_links(const struct rte_eventdev *event_dev)
> +{
> +       struct otx2_sso_evdev *dev = sso_pmd_priv(event_dev);
> +       uint16_t *links_map;
> +       int i, j;
> +
> +       for (i = 0; i < dev->nb_event_ports; i++) {
> +               links_map = event_dev->data->links_map;
> +               /* Point links_map to this port specific area */
> +               links_map += (i * RTE_EVENT_MAX_QUEUES_PER_DEV);
> +               if (dev->dual_ws) {
> +                       struct otx2_ssogws_dual *ws;
> +
> +                       ws = event_dev->data->ports[i];
> +                       for (j = 0; j < dev->nb_event_queues; j++) {
> +                               if (links_map[j] == 0xdead)
> +                                       continue;
> +                               sso_port_link_modify((struct otx2_ssogws *)
> +                                               &ws->ws_state[0], j, true);
> +                               sso_port_link_modify((struct otx2_ssogws *)
> +                                               &ws->ws_state[1], j, true);
> +                               sso_func_trace("Restoring port %d queue %d "
> +                                               "link", i, j);
> +                       }
> +               } else {
> +                       struct otx2_ssogws *ws;
> +
> +                       ws = event_dev->data->ports[i];
> +                       for (j = 0; j < dev->nb_event_queues; j++) {
> +                               if (links_map[j] == 0xdead)
> +                                       continue;
> +                               sso_port_link_modify(ws, j, true);
> +                               sso_func_trace("Restoring port %d queue %d "
> +                                               "link", i, j);
> +                       }
> +               }
> +       }
> +}
> +
>  static void
>  sso_set_port_ops(struct otx2_ssogws *ws, uintptr_t base)
>  {
> @@ -765,18 +805,15 @@ sso_configure_dual_ports(const struct rte_eventdev *event_dev)
>                 struct otx2_ssogws_dual *ws;
>                 uintptr_t base;
>
> -               /* Free memory prior to re-allocation if needed */
>                 if (event_dev->data->ports[i] != NULL) {
>                         ws = event_dev->data->ports[i];
> -                       rte_free(ws);
> -                       ws = NULL;
> -               }
> -
> -               /* Allocate event port memory */
> -               ws = rte_zmalloc_socket("otx2_sso_ws",
> +               } else {
> +                       /* Allocate event port memory */
> +                       ws = rte_zmalloc_socket("otx2_sso_ws",
>                                         sizeof(struct otx2_ssogws_dual),
>                                         RTE_CACHE_LINE_SIZE,
>                                         event_dev->data->socket_id);
> +               }
>                 if (ws == NULL) {
>                         otx2_err("Failed to alloc memory for port=%d", i);
>                         rc = -ENOMEM;
> @@ -1061,8 +1098,11 @@ otx2_sso_configure(const struct rte_eventdev *event_dev)
>                 return -EINVAL;
>         }
>
> -       if (dev->configured)
> +       if (dev->configured) {
>                 sso_unregister_irqs(event_dev);
> +               /* Clear any prior port-queue mapping. */
> +               sso_clr_links(event_dev);
> +       }
>
>         if (dev->nb_event_queues) {
>                 /* Finit any previous queues. */
> @@ -1097,8 +1137,8 @@ otx2_sso_configure(const struct rte_eventdev *event_dev)
>                 goto teardown_hwggrp;
>         }
>
> -       /* Clear any prior port-queue mapping. */
> -       sso_clr_links(event_dev);
> +       /* Restore any prior port-queue mapping. */
> +       sso_restore_links(event_dev);
>         rc = sso_ggrp_alloc_xaq(dev);
>         if (rc < 0) {
>                 otx2_err("Failed to alloc xaq to ggrp %d", rc);
> --
> 2.17.1
>

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2020-06-30  5:43 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-29  1:33 [dpdk-dev] [PATCH 1/3] event/octeontx2: fix device reconfigure pbhagavatula
2020-06-29  1:33 ` [dpdk-dev] [PATCH 2/3] event/octeontx2: fix sub event type violation pbhagavatula
2020-06-29  1:33 ` [dpdk-dev] [PATCH 3/3] event/octeontx2: improve datapath memory locality pbhagavatula
2020-06-30  5:42 ` [dpdk-dev] [PATCH 1/3] event/octeontx2: fix device reconfigure Jerin Jacob

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.