All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] net/mlx5: fix erroneous index handling for Tx ring
@ 2017-05-06  1:20 Yongseok Koh
  2017-05-09 20:49 ` [PATCH v2 0/2] " Yongseok Koh
  0 siblings, 1 reply; 6+ messages in thread
From: Yongseok Koh @ 2017-05-06  1:20 UTC (permalink / raw)
  To: ferruh.yigit; +Cc: dev, adrien.mazarguil, nelio.laranjeiro, hhaim, Yongseok Koh

In case of resource deficiency on Tx, mlx5_tx_burst() breaks the loop
without rolling back consumed resources (txq->wqes[] and txq->elts[]). This
can make application crash because unposted mbufs can be freed while
processing completions. In regard to this, some error-prone/redundant
indexing has been cleaned as well.

Reported-by: Hanoch Haim <hhaim@cisco.com>
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
---
 drivers/net/mlx5/mlx5_rxtx.c | 50 +++++++++++++++++++-------------------------
 1 file changed, 21 insertions(+), 29 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 6254228a9..d7176a422 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -493,7 +493,6 @@ uint16_t
 mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 {
 	struct txq *txq = (struct txq *)dpdk_txq;
-	uint16_t elts_head = txq->elts_head;
 	const unsigned int elts_n = 1 << txq->elts_n;
 	unsigned int i = 0;
 	unsigned int j = 0;
@@ -504,6 +503,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	uint16_t max_wqe;
 	unsigned int comp;
 	volatile struct mlx5_wqe_v *wqe = NULL;
+	volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
 	unsigned int segs_n = 0;
 	struct rte_mbuf *buf = NULL;
 	uint8_t *raw;
@@ -514,7 +514,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	rte_prefetch0(*pkts);
 	/* Start processing. */
 	txq_complete(txq);
-	max = (elts_n - (elts_head - txq->elts_tail));
+	max = (elts_n - (txq->elts_head - txq->elts_tail));
 	if (max > elts_n)
 		max -= elts_n;
 	max_wqe = (1u << txq->wqe_n) - (txq->wqe_ci - txq->wqe_pi);
@@ -524,8 +524,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		volatile rte_v128u32_t *dseg = NULL;
 		uint32_t length;
 		unsigned int ds = 0;
+		unsigned int sg = 0;
 		uintptr_t addr;
 		uint64_t naddr;
+		uint16_t elts_head = (txq->elts_head + i + j) & (elts_n - 1);
 		uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
 		uint16_t tso_header_sz = 0;
 		uint16_t ehdr;
@@ -536,7 +538,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 #endif
 
 		/* first_seg */
-		buf = *(pkts++);
+		buf = *(pkts + i);
 		segs_n = buf->nb_segs;
 		/*
 		 * Make sure there is enough room to store this packet and
@@ -547,15 +549,13 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 			break;
 		max -= segs_n;
 		--segs_n;
-		if (!segs_n)
-			--pkts_n;
 		if (unlikely(--max_wqe == 0))
 			break;
 		wqe = (volatile struct mlx5_wqe_v *)
 			tx_mlx5_wqe(txq, txq->wqe_ci);
 		rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
-		if (pkts_n > 1)
-			rte_prefetch0(*pkts);
+		if (pkts_n - i > 1)
+			rte_prefetch0(*(pkts + i + 1));
 		addr = rte_pktmbuf_mtod(buf, uintptr_t);
 		length = DATA_LEN(buf);
 		ehdr = (((uint8_t *)addr)[1] << 8) |
@@ -567,14 +567,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 			break;
 		/* Update element. */
 		(*txq->elts)[elts_head] = buf;
-		elts_head = (elts_head + 1) & (elts_n - 1);
 		/* Prefetch next buffer data. */
-		if (pkts_n > 1) {
-			volatile void *pkt_addr;
-
-			pkt_addr = rte_pktmbuf_mtod(*pkts, volatile void *);
-			rte_prefetch0(pkt_addr);
-		}
+		if (pkts_n - i > 1)
+			rte_prefetch0(
+			    rte_pktmbuf_mtod(*(pkts + i + 1), volatile void *));
 		/* Should we enable HW CKSUM offload */
 		if (buf->ol_flags &
 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
@@ -677,10 +673,6 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 					};
 					ds = 1;
 					total_length = 0;
-					pkts--;
-					pkts_n++;
-					elts_head = (elts_head - 1) &
-						    (elts_n - 1);
 					k++;
 					goto next_wqe;
 				}
@@ -813,14 +805,15 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 			naddr,
 			naddr >> 32,
 		};
-		(*txq->elts)[elts_head] = buf;
 		elts_head = (elts_head + 1) & (elts_n - 1);
-		++j;
+		(*txq->elts)[elts_head] = buf;
+		++sg;
 		--segs_n;
+		/* Advance counter only if all segs are successfully posted. */
 		if (segs_n)
 			goto next_seg;
 		else
-			--pkts_n;
+			j += sg;
 next_pkt:
 		++i;
 		/* Initialize known and common part of the WQE structure. */
@@ -853,24 +846,24 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		}
 next_wqe:
 		txq->wqe_ci += (ds + 3) / 4;
+		/* Save the last successful WQE for completion request */
+		last_wqe = (volatile struct mlx5_wqe_ctrl *)wqe;
 #ifdef MLX5_PMD_SOFT_COUNTERS
 		/* Increment sent bytes counter. */
 		txq->stats.obytes += total_length;
 #endif
-	} while (pkts_n);
+	} while (i < pkts_n);
 	/* Take a shortcut if nothing must be sent. */
 	if (unlikely((i + k) == 0))
 		return 0;
+	txq->elts_head = (txq->elts_head + i + j) & (elts_n - 1);
 	/* Check whether completion threshold has been reached. */
 	comp = txq->elts_comp + i + j + k;
 	if (comp >= MLX5_TX_COMP_THRESH) {
-		volatile struct mlx5_wqe_ctrl *w =
-			(volatile struct mlx5_wqe_ctrl *)wqe;
-
 		/* Request completion on last WQE. */
-		w->ctrl2 = htonl(8);
+		last_wqe->ctrl2 = htonl(8);
 		/* Save elts_head in unused "immediate" field of WQE. */
-		w->ctrl3 = elts_head;
+		last_wqe->ctrl3 = txq->elts_head;
 		txq->elts_comp = 0;
 	} else {
 		txq->elts_comp = comp;
@@ -880,8 +873,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	txq->stats.opackets += i;
 #endif
 	/* Ring QP doorbell. */
-	mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)wqe);
-	txq->elts_head = elts_head;
+	mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)last_wqe);
 	return i;
 }
 
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 0/2] net/mlx5: fix erroneous index handling for Tx ring
  2017-05-06  1:20 [PATCH] net/mlx5: fix erroneous index handling for Tx ring Yongseok Koh
@ 2017-05-09 20:49 ` Yongseok Koh
  2017-05-09 20:49   ` [PATCH v2 1/2] " Yongseok Koh
                     ` (2 more replies)
  0 siblings, 3 replies; 6+ messages in thread
From: Yongseok Koh @ 2017-05-09 20:49 UTC (permalink / raw)
  To: ferruh.yigit; +Cc: dev, adrien.mazarguil, nelio.laranjeiro, hhaim, Yongseok Koh

This patchset is to fix a critical bug which can occur in case of resource
deficiency on Tx path. Flowing multi-segment packets can accelerate the
occurrence rate.

v2:
* Split the patch into two separate ones to make back-port easier.
* Added "Fiexes:" tag and "CC: stable@dpdk.org" for the bug fix.

Yongseok Koh (2):
  net/mlx5: fix erroneous index handling for Tx ring
  net/mlx5: change error-prone code on Tx path

 drivers/net/mlx5/mlx5_rxtx.c | 51 +++++++++++++++++++-------------------------
 1 file changed, 22 insertions(+), 29 deletions(-)

-- 
2.11.0

^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH v2 1/2] net/mlx5: fix erroneous index handling for Tx ring
  2017-05-09 20:49 ` [PATCH v2 0/2] " Yongseok Koh
@ 2017-05-09 20:49   ` Yongseok Koh
  2017-05-09 20:49   ` [PATCH v2 2/2] net/mlx5: change error-prone code on Tx path Yongseok Koh
  2017-05-09 22:10   ` [PATCH v2 0/2] net/mlx5: fix erroneous index handling for Tx ring Adrien Mazarguil
  2 siblings, 0 replies; 6+ messages in thread
From: Yongseok Koh @ 2017-05-09 20:49 UTC (permalink / raw)
  To: ferruh.yigit
  Cc: dev, adrien.mazarguil, nelio.laranjeiro, hhaim, Yongseok Koh, stable

In case of resource deficiency on Tx, mlx5_tx_burst() breaks the loop
without rolling back consumed resources (txq->wqes[] and txq->elts[]). This
can make application crash because unposted mbufs can be freed while
processing completions. Other Tx functions don't have this issue.

Fixes: 3f13f8c23a7c ("net/mlx5: support hardware TSO")
Fixes: f04f1d51564b ("net/mlx5: fix Tx WQE corruption caused by starvation")
CC: stable@dpdk.org

Reported-by: Hanoch Haim <hhaim@cisco.com>
Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
---
 drivers/net/mlx5/mlx5_rxtx.c | 25 ++++++++++++++-----------
 1 file changed, 14 insertions(+), 11 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index 6254228a9..cf63434d5 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -504,6 +504,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	uint16_t max_wqe;
 	unsigned int comp;
 	volatile struct mlx5_wqe_v *wqe = NULL;
+	volatile struct mlx5_wqe_ctrl *last_wqe = NULL;
 	unsigned int segs_n = 0;
 	struct rte_mbuf *buf = NULL;
 	uint8_t *raw;
@@ -524,6 +525,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		volatile rte_v128u32_t *dseg = NULL;
 		uint32_t length;
 		unsigned int ds = 0;
+		unsigned int sg = 0; /* counter of additional segs attached. */
 		uintptr_t addr;
 		uint64_t naddr;
 		uint16_t pkt_inline_sz = MLX5_WQE_DWORD_SIZE + 2;
@@ -815,12 +817,14 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		};
 		(*txq->elts)[elts_head] = buf;
 		elts_head = (elts_head + 1) & (elts_n - 1);
-		++j;
-		--segs_n;
-		if (segs_n)
+		++sg;
+		/* Advance counter only if all segs are successfully posted. */
+		if (sg < segs_n) {
 			goto next_seg;
-		else
+		} else {
 			--pkts_n;
+			j += sg;
+		}
 next_pkt:
 		++i;
 		/* Initialize known and common part of the WQE structure. */
@@ -853,6 +857,8 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		}
 next_wqe:
 		txq->wqe_ci += (ds + 3) / 4;
+		/* Save the last successful WQE for completion request */
+		last_wqe = (volatile struct mlx5_wqe_ctrl *)wqe;
 #ifdef MLX5_PMD_SOFT_COUNTERS
 		/* Increment sent bytes counter. */
 		txq->stats.obytes += total_length;
@@ -861,16 +867,14 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	/* Take a shortcut if nothing must be sent. */
 	if (unlikely((i + k) == 0))
 		return 0;
+	txq->elts_head = (txq->elts_head + i + j) & (elts_n - 1);
 	/* Check whether completion threshold has been reached. */
 	comp = txq->elts_comp + i + j + k;
 	if (comp >= MLX5_TX_COMP_THRESH) {
-		volatile struct mlx5_wqe_ctrl *w =
-			(volatile struct mlx5_wqe_ctrl *)wqe;
-
 		/* Request completion on last WQE. */
-		w->ctrl2 = htonl(8);
+		last_wqe->ctrl2 = htonl(8);
 		/* Save elts_head in unused "immediate" field of WQE. */
-		w->ctrl3 = elts_head;
+		last_wqe->ctrl3 = txq->elts_head;
 		txq->elts_comp = 0;
 	} else {
 		txq->elts_comp = comp;
@@ -880,8 +884,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 	txq->stats.opackets += i;
 #endif
 	/* Ring QP doorbell. */
-	mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)wqe);
-	txq->elts_head = elts_head;
+	mlx5_tx_dbrec(txq, (volatile struct mlx5_wqe *)last_wqe);
 	return i;
 }
 
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH v2 2/2] net/mlx5: change error-prone code on Tx path
  2017-05-09 20:49 ` [PATCH v2 0/2] " Yongseok Koh
  2017-05-09 20:49   ` [PATCH v2 1/2] " Yongseok Koh
@ 2017-05-09 20:49   ` Yongseok Koh
  2017-05-09 22:10   ` [PATCH v2 0/2] net/mlx5: fix erroneous index handling for Tx ring Adrien Mazarguil
  2 siblings, 0 replies; 6+ messages in thread
From: Yongseok Koh @ 2017-05-09 20:49 UTC (permalink / raw)
  To: ferruh.yigit; +Cc: dev, adrien.mazarguil, nelio.laranjeiro, hhaim, Yongseok Koh

In the main loop of mlx5_tx_burst(), poitners/indexes are advanced at the
beginning. Therefore, those should be rolled back if checking resource
availability fails and breaks the loop. And some of them are even
redundant.

Signed-off-by: Yongseok Koh <yskoh@mellanox.com>
---
 drivers/net/mlx5/mlx5_rxtx.c | 34 ++++++++++++----------------------
 1 file changed, 12 insertions(+), 22 deletions(-)

diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c
index cf63434d5..de6e0fa4a 100644
--- a/drivers/net/mlx5/mlx5_rxtx.c
+++ b/drivers/net/mlx5/mlx5_rxtx.c
@@ -538,7 +538,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 #endif
 
 		/* first_seg */
-		buf = *(pkts++);
+		buf = *pkts;
 		segs_n = buf->nb_segs;
 		/*
 		 * Make sure there is enough room to store this packet and
@@ -549,15 +549,13 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 			break;
 		max -= segs_n;
 		--segs_n;
-		if (!segs_n)
-			--pkts_n;
 		if (unlikely(--max_wqe == 0))
 			break;
 		wqe = (volatile struct mlx5_wqe_v *)
 			tx_mlx5_wqe(txq, txq->wqe_ci);
 		rte_prefetch0(tx_mlx5_wqe(txq, txq->wqe_ci + 1));
-		if (pkts_n > 1)
-			rte_prefetch0(*pkts);
+		if (pkts_n - i > 1)
+			rte_prefetch0(*(pkts + 1));
 		addr = rte_pktmbuf_mtod(buf, uintptr_t);
 		length = DATA_LEN(buf);
 		ehdr = (((uint8_t *)addr)[1] << 8) |
@@ -569,14 +567,10 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 			break;
 		/* Update element. */
 		(*txq->elts)[elts_head] = buf;
-		elts_head = (elts_head + 1) & (elts_n - 1);
 		/* Prefetch next buffer data. */
-		if (pkts_n > 1) {
-			volatile void *pkt_addr;
-
-			pkt_addr = rte_pktmbuf_mtod(*pkts, volatile void *);
-			rte_prefetch0(pkt_addr);
-		}
+		if (pkts_n - i > 1)
+			rte_prefetch0(
+			    rte_pktmbuf_mtod(*(pkts + 1), volatile void *));
 		/* Should we enable HW CKSUM offload */
 		if (buf->ol_flags &
 		    (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) {
@@ -679,10 +673,6 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 					};
 					ds = 1;
 					total_length = 0;
-					pkts--;
-					pkts_n++;
-					elts_head = (elts_head - 1) &
-						    (elts_n - 1);
 					k++;
 					goto next_wqe;
 				}
@@ -815,17 +805,17 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 			naddr,
 			naddr >> 32,
 		};
-		(*txq->elts)[elts_head] = buf;
 		elts_head = (elts_head + 1) & (elts_n - 1);
+		(*txq->elts)[elts_head] = buf;
 		++sg;
 		/* Advance counter only if all segs are successfully posted. */
-		if (sg < segs_n) {
+		if (sg < segs_n)
 			goto next_seg;
-		} else {
-			--pkts_n;
+		else
 			j += sg;
-		}
 next_pkt:
+		elts_head = (elts_head + 1) & (elts_n - 1);
+		++pkts;
 		++i;
 		/* Initialize known and common part of the WQE structure. */
 		if (tso) {
@@ -863,7 +853,7 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n)
 		/* Increment sent bytes counter. */
 		txq->stats.obytes += total_length;
 #endif
-	} while (pkts_n);
+	} while (i < pkts_n);
 	/* Take a shortcut if nothing must be sent. */
 	if (unlikely((i + k) == 0))
 		return 0;
-- 
2.11.0

^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 0/2] net/mlx5: fix erroneous index handling for Tx ring
  2017-05-09 20:49 ` [PATCH v2 0/2] " Yongseok Koh
  2017-05-09 20:49   ` [PATCH v2 1/2] " Yongseok Koh
  2017-05-09 20:49   ` [PATCH v2 2/2] net/mlx5: change error-prone code on Tx path Yongseok Koh
@ 2017-05-09 22:10   ` Adrien Mazarguil
  2017-05-10 16:06     ` Thomas Monjalon
  2 siblings, 1 reply; 6+ messages in thread
From: Adrien Mazarguil @ 2017-05-09 22:10 UTC (permalink / raw)
  To: Yongseok Koh, Thomas Monjalon, Ferruh Yigit; +Cc: dev, nelio.laranjeiro, hhaim

On Tue, May 09, 2017 at 01:49:29PM -0700, Yongseok Koh wrote:
> This patchset is to fix a critical bug which can occur in case of resource
> deficiency on Tx path. Flowing multi-segment packets can accelerate the
> occurrence rate.
> 
> v2:
> * Split the patch into two separate ones to make back-port easier.
> * Added "Fiexes:" tag and "CC: stable@dpdk.org" for the bug fix.
> 
> Yongseok Koh (2):
>   net/mlx5: fix erroneous index handling for Tx ring
>   net/mlx5: change error-prone code on Tx path
> 
>  drivers/net/mlx5/mlx5_rxtx.c | 51 +++++++++++++++++++-------------------------
>  1 file changed, 22 insertions(+), 29 deletions(-)

For the series:

Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>

Thanks for extracting the fix into a separate patch.

Ferruh, Thomas, is there still time to include this for 17.05 given the
seriousness of this bug?

-- 
Adrien Mazarguil
6WIND

^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH v2 0/2] net/mlx5: fix erroneous index handling for Tx ring
  2017-05-09 22:10   ` [PATCH v2 0/2] net/mlx5: fix erroneous index handling for Tx ring Adrien Mazarguil
@ 2017-05-10 16:06     ` Thomas Monjalon
  0 siblings, 0 replies; 6+ messages in thread
From: Thomas Monjalon @ 2017-05-10 16:06 UTC (permalink / raw)
  To: Yongseok Koh; +Cc: dev, Adrien Mazarguil, Ferruh Yigit, nelio.laranjeiro, hhaim

10/05/2017 00:10, Adrien Mazarguil:
> On Tue, May 09, 2017 at 01:49:29PM -0700, Yongseok Koh wrote:
> > This patchset is to fix a critical bug which can occur in case of resource
> > deficiency on Tx path. Flowing multi-segment packets can accelerate the
> > occurrence rate.
> > 
> > v2:
> > * Split the patch into two separate ones to make back-port easier.
> > * Added "Fiexes:" tag and "CC: stable@dpdk.org" for the bug fix.
> > 
> > Yongseok Koh (2):
> >   net/mlx5: fix erroneous index handling for Tx ring
> >   net/mlx5: change error-prone code on Tx path
> > 
> >  drivers/net/mlx5/mlx5_rxtx.c | 51 +++++++++++++++++++-------------------------
> >  1 file changed, 22 insertions(+), 29 deletions(-)
> 
> For the series:
> 
> Acked-by: Adrien Mazarguil <adrien.mazarguil@6wind.com>
> 
> Thanks for extracting the fix into a separate patch.
> 
> Ferruh, Thomas, is there still time to include this for 17.05 given the
> seriousness of this bug?

Applied, thanks

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2017-05-10 16:06 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-05-06  1:20 [PATCH] net/mlx5: fix erroneous index handling for Tx ring Yongseok Koh
2017-05-09 20:49 ` [PATCH v2 0/2] " Yongseok Koh
2017-05-09 20:49   ` [PATCH v2 1/2] " Yongseok Koh
2017-05-09 20:49   ` [PATCH v2 2/2] net/mlx5: change error-prone code on Tx path Yongseok Koh
2017-05-09 22:10   ` [PATCH v2 0/2] net/mlx5: fix erroneous index handling for Tx ring Adrien Mazarguil
2017-05-10 16:06     ` Thomas Monjalon

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.