All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/2] net: dwc_eth_qos: Split TX and RX DMA rings
@ 2022-10-09 15:51 Marek Vasut
  2022-10-09 15:51 ` [PATCH 2/2] net: dwc_eth_qos: Add support for bulk RX descriptor cleaning Marek Vasut
                   ` (3 more replies)
  0 siblings, 4 replies; 8+ messages in thread
From: Marek Vasut @ 2022-10-09 15:51 UTC (permalink / raw)
  To: u-boot
  Cc: Marek Vasut, Joe Hershberger, Patrice Chotard, Patrick Delaunay,
	Ramon Fried, Stephen Warren

Separate TX and RX DMA rings to make their handling slightly clearer.
This is a preparatory patch for bulk RX descriptor flushing.

Signed-off-by: Marek Vasut <marex@denx.de>
---
Cc: Joe Hershberger <joe.hershberger@ni.com>
Cc: Patrice Chotard <patrice.chotard@foss.st.com>
Cc: Patrick Delaunay <patrick.delaunay@foss.st.com>
Cc: Ramon Fried <rfried.dev@gmail.com>
Cc: Stephen Warren <swarren@nvidia.com>
---
 drivers/net/dwc_eth_qos.c | 33 ++++++++++++++++++++++-----------
 drivers/net/dwc_eth_qos.h |  3 ++-
 2 files changed, 24 insertions(+), 12 deletions(-)

diff --git a/drivers/net/dwc_eth_qos.c b/drivers/net/dwc_eth_qos.c
index 001b028fa13..dde2c183b06 100644
--- a/drivers/net/dwc_eth_qos.c
+++ b/drivers/net/dwc_eth_qos.c
@@ -75,9 +75,6 @@
  */
 static void *eqos_alloc_descs(struct eqos_priv *eqos, unsigned int num)
 {
-	eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
-				(unsigned int)ARCH_DMA_MINALIGN);
-
 	return memalign(eqos->desc_size, num * eqos->desc_size);
 }
 
@@ -89,8 +86,8 @@ static void eqos_free_descs(void *descs)
 static struct eqos_desc *eqos_get_desc(struct eqos_priv *eqos,
 				       unsigned int num, bool rx)
 {
-	return eqos->descs +
-		((rx ? EQOS_DESCRIPTORS_TX : 0) + num) * eqos->desc_size;
+	return (rx ? eqos->rx_descs : eqos->tx_descs) +
+	       (num * eqos->desc_size);
 }
 
 void eqos_inval_desc_generic(void *desc)
@@ -1001,7 +998,8 @@ static int eqos_start(struct udevice *dev)
 
 	/* Set up descriptors */
 
-	memset(eqos->descs, 0, eqos->desc_size * EQOS_DESCRIPTORS_NUM);
+	memset(eqos->tx_descs, 0, eqos->desc_size * EQOS_DESCRIPTORS_TX);
+	memset(eqos->rx_descs, 0, eqos->desc_size * EQOS_DESCRIPTORS_RX);
 
 	for (i = 0; i < EQOS_DESCRIPTORS_TX; i++) {
 		struct eqos_desc *tx_desc = eqos_get_desc(eqos, i, false);
@@ -1234,13 +1232,23 @@ static int eqos_probe_resources_core(struct udevice *dev)
 
 	debug("%s(dev=%p):\n", __func__, dev);
 
-	eqos->descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_NUM);
-	if (!eqos->descs) {
-		debug("%s: eqos_alloc_descs() failed\n", __func__);
+	eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
+				(unsigned int)ARCH_DMA_MINALIGN);
+
+	eqos->tx_descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_TX);
+	if (!eqos->tx_descs) {
+		debug("%s: eqos_alloc_descs(tx) failed\n", __func__);
 		ret = -ENOMEM;
 		goto err;
 	}
 
+	eqos->rx_descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_RX);
+	if (!eqos->rx_descs) {
+		debug("%s: eqos_alloc_descs(rx) failed\n", __func__);
+		ret = -ENOMEM;
+		goto err_free_tx_descs;
+	}
+
 	eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE);
 	if (!eqos->tx_dma_buf) {
 		debug("%s: memalign(tx_dma_buf) failed\n", __func__);
@@ -1276,7 +1284,9 @@ err_free_rx_dma_buf:
 err_free_tx_dma_buf:
 	free(eqos->tx_dma_buf);
 err_free_descs:
-	eqos_free_descs(eqos->descs);
+	eqos_free_descs(eqos->rx_descs);
+err_free_tx_descs:
+	eqos_free_descs(eqos->tx_descs);
 err:
 
 	debug("%s: returns %d\n", __func__, ret);
@@ -1292,7 +1302,8 @@ static int eqos_remove_resources_core(struct udevice *dev)
 	free(eqos->rx_pkt);
 	free(eqos->rx_dma_buf);
 	free(eqos->tx_dma_buf);
-	eqos_free_descs(eqos->descs);
+	eqos_free_descs(eqos->rx_descs);
+	eqos_free_descs(eqos->tx_descs);
 
 	debug("%s: OK\n", __func__);
 	return 0;
diff --git a/drivers/net/dwc_eth_qos.h b/drivers/net/dwc_eth_qos.h
index b35e7742634..e3e43c86d11 100644
--- a/drivers/net/dwc_eth_qos.h
+++ b/drivers/net/dwc_eth_qos.h
@@ -264,7 +264,8 @@ struct eqos_priv {
 	struct phy_device *phy;
 	ofnode phy_of_node;
 	u32 max_speed;
-	void *descs;
+	void *tx_descs;
+	void *rx_descs;
 	int tx_desc_idx, rx_desc_idx;
 	unsigned int desc_size;
 	void *tx_dma_buf;
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH 2/2] net: dwc_eth_qos: Add support for bulk RX descriptor cleaning
  2022-10-09 15:51 [PATCH 1/2] net: dwc_eth_qos: Split TX and RX DMA rings Marek Vasut
@ 2022-10-09 15:51 ` Marek Vasut
  2022-10-10  7:07   ` Patrice CHOTARD
                     ` (2 more replies)
  2022-10-10  6:37 ` [PATCH 1/2] net: dwc_eth_qos: Split TX and RX DMA rings Patrice CHOTARD
                   ` (2 subsequent siblings)
  3 siblings, 3 replies; 8+ messages in thread
From: Marek Vasut @ 2022-10-09 15:51 UTC (permalink / raw)
  To: u-boot
  Cc: Marek Vasut, Joe Hershberger, Patrice Chotard, Patrick Delaunay,
	Ramon Fried, Stephen Warren

Add new desc_per_cacheline property which lets a platform run RX descriptor
cleanup after every power-of-2 - 1 received packets instead of every packet.
This is useful on platforms where (axi_bus_width EQOS_AXI_WIDTH_n * DMA DSL
inter-descriptor word skip count + DMA descriptor size) is less than cache
line size, which necessitates packing multiple DMA descriptors into single
cache line.

In case of TX descriptors, this is not a problem, since the driver always
does synchronous TX, i.e. the TX descriptor is always written, flushed and
polled for completion in eqos_send().

In case of RX descriptors, it is necessary to update their status in bulk,
i.e. after the entire cache line worth of RX descriptors has been used up
to receive data.

Signed-off-by: Marek Vasut <marex@denx.de>
---
Cc: Joe Hershberger <joe.hershberger@ni.com>
Cc: Patrice Chotard <patrice.chotard@foss.st.com>
Cc: Patrick Delaunay <patrick.delaunay@foss.st.com>
Cc: Ramon Fried <rfried.dev@gmail.com>
Cc: Stephen Warren <swarren@nvidia.com>
---
 drivers/net/dwc_eth_qos.c | 67 +++++++++++++++++++++++++--------------
 drivers/net/dwc_eth_qos.h |  2 ++
 2 files changed, 46 insertions(+), 23 deletions(-)

diff --git a/drivers/net/dwc_eth_qos.c b/drivers/net/dwc_eth_qos.c
index dde2c183b06..afc47b56ff5 100644
--- a/drivers/net/dwc_eth_qos.c
+++ b/drivers/net/dwc_eth_qos.c
@@ -75,7 +75,7 @@
  */
 static void *eqos_alloc_descs(struct eqos_priv *eqos, unsigned int num)
 {
-	return memalign(eqos->desc_size, num * eqos->desc_size);
+	return memalign(ARCH_DMA_MINALIGN, num * eqos->desc_size);
 }
 
 static void eqos_free_descs(void *descs)
@@ -92,7 +92,7 @@ static struct eqos_desc *eqos_get_desc(struct eqos_priv *eqos,
 
 void eqos_inval_desc_generic(void *desc)
 {
-	unsigned long start = (unsigned long)desc;
+	unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
 	unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
 				  ARCH_DMA_MINALIGN);
 
@@ -101,7 +101,7 @@ void eqos_inval_desc_generic(void *desc)
 
 void eqos_flush_desc_generic(void *desc)
 {
-	unsigned long start = (unsigned long)desc;
+	unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
 	unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
 				  ARCH_DMA_MINALIGN);
 
@@ -1185,6 +1185,7 @@ static int eqos_recv(struct udevice *dev, int flags, uchar **packetp)
 static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
 {
 	struct eqos_priv *eqos = dev_get_priv(dev);
+	u32 idx, idx_mask = eqos->desc_per_cacheline - 1;
 	uchar *packet_expected;
 	struct eqos_desc *rx_desc;
 
@@ -1200,24 +1201,30 @@ static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
 
 	eqos->config->ops->eqos_inval_buffer(packet, length);
 
-	rx_desc = eqos_get_desc(eqos, eqos->rx_desc_idx, true);
-
-	rx_desc->des0 = 0;
-	mb();
-	eqos->config->ops->eqos_flush_desc(rx_desc);
-	eqos->config->ops->eqos_inval_buffer(packet, length);
-	rx_desc->des0 = (u32)(ulong)packet;
-	rx_desc->des1 = 0;
-	rx_desc->des2 = 0;
-	/*
-	 * Make sure that if HW sees the _OWN write below, it will see all the
-	 * writes to the rest of the descriptor too.
-	 */
-	mb();
-	rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
-	eqos->config->ops->eqos_flush_desc(rx_desc);
-
-	writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
+	if ((eqos->rx_desc_idx & idx_mask) == idx_mask) {
+		for (idx = eqos->rx_desc_idx - idx_mask;
+		     idx <= eqos->rx_desc_idx;
+		     idx++) {
+			rx_desc = eqos_get_desc(eqos, idx, true);
+			rx_desc->des0 = 0;
+			mb();
+			eqos->config->ops->eqos_flush_desc(rx_desc);
+			eqos->config->ops->eqos_inval_buffer(packet, length);
+			rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf +
+					     (idx * EQOS_MAX_PACKET_SIZE));
+			rx_desc->des1 = 0;
+			rx_desc->des2 = 0;
+			/*
+			 * Make sure that if HW sees the _OWN write below,
+			 * it will see all the writes to the rest of the
+			 * descriptor too.
+			 */
+			mb();
+			rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
+			eqos->config->ops->eqos_flush_desc(rx_desc);
+		}
+		writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
+	}
 
 	eqos->rx_desc_idx++;
 	eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX;
@@ -1228,12 +1235,26 @@ static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
 static int eqos_probe_resources_core(struct udevice *dev)
 {
 	struct eqos_priv *eqos = dev_get_priv(dev);
+	unsigned int desc_step;
 	int ret;
 
 	debug("%s(dev=%p):\n", __func__, dev);
 
-	eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
-				(unsigned int)ARCH_DMA_MINALIGN);
+	/* Maximum distance between neighboring descriptors, in Bytes. */
+	desc_step = sizeof(struct eqos_desc) +
+		    EQOS_DMA_CH0_CONTROL_DSL_MASK * eqos->config->axi_bus_width;
+	if (desc_step < ARCH_DMA_MINALIGN) {
+		/*
+		 * The EQoS hardware implementation cannot place one descriptor
+		 * per cacheline, it is necessary to place multiple descriptors
+		 * per cacheline in memory and do cache management carefully.
+		 */
+		eqos->desc_size = BIT(fls(desc_step) - 1);
+	} else {
+		eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
+					(unsigned int)ARCH_DMA_MINALIGN);
+	}
+	eqos->desc_per_cacheline = ARCH_DMA_MINALIGN / eqos->desc_size;
 
 	eqos->tx_descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_TX);
 	if (!eqos->tx_descs) {
diff --git a/drivers/net/dwc_eth_qos.h b/drivers/net/dwc_eth_qos.h
index e3e43c86d11..8fccd6f0572 100644
--- a/drivers/net/dwc_eth_qos.h
+++ b/drivers/net/dwc_eth_qos.h
@@ -162,6 +162,7 @@ struct eqos_dma_regs {
 #define EQOS_DMA_SYSBUS_MODE_BLEN4			BIT(1)
 
 #define EQOS_DMA_CH0_CONTROL_DSL_SHIFT			18
+#define EQOS_DMA_CH0_CONTROL_DSL_MASK			0x7
 #define EQOS_DMA_CH0_CONTROL_PBLX8			BIT(16)
 
 #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT		16
@@ -268,6 +269,7 @@ struct eqos_priv {
 	void *rx_descs;
 	int tx_desc_idx, rx_desc_idx;
 	unsigned int desc_size;
+	unsigned int desc_per_cacheline;
 	void *tx_dma_buf;
 	void *rx_dma_buf;
 	void *rx_pkt;
-- 
2.35.1


^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] net: dwc_eth_qos: Split TX and RX DMA rings
  2022-10-09 15:51 [PATCH 1/2] net: dwc_eth_qos: Split TX and RX DMA rings Marek Vasut
  2022-10-09 15:51 ` [PATCH 2/2] net: dwc_eth_qos: Add support for bulk RX descriptor cleaning Marek Vasut
@ 2022-10-10  6:37 ` Patrice CHOTARD
  2022-10-16 18:15 ` Ramon Fried
  2022-11-28 19:50 ` Tom Rini
  3 siblings, 0 replies; 8+ messages in thread
From: Patrice CHOTARD @ 2022-10-10  6:37 UTC (permalink / raw)
  To: Marek Vasut, u-boot
  Cc: Joe Hershberger, Patrick Delaunay, Ramon Fried, Stephen Warren

Hi Marek

On 10/9/22 17:51, Marek Vasut wrote:
> Separate TX and RX DMA rings to make their handling slightly clearer.
> This is a preparatory patch for bulk RX descriptor flushing.
> 
> Signed-off-by: Marek Vasut <marex@denx.de>
> ---
> Cc: Joe Hershberger <joe.hershberger@ni.com>
> Cc: Patrice Chotard <patrice.chotard@foss.st.com>
> Cc: Patrick Delaunay <patrick.delaunay@foss.st.com>
> Cc: Ramon Fried <rfried.dev@gmail.com>
> Cc: Stephen Warren <swarren@nvidia.com>
> ---
>  drivers/net/dwc_eth_qos.c | 33 ++++++++++++++++++++++-----------
>  drivers/net/dwc_eth_qos.h |  3 ++-
>  2 files changed, 24 insertions(+), 12 deletions(-)
> 
> diff --git a/drivers/net/dwc_eth_qos.c b/drivers/net/dwc_eth_qos.c
> index 001b028fa13..dde2c183b06 100644
> --- a/drivers/net/dwc_eth_qos.c
> +++ b/drivers/net/dwc_eth_qos.c
> @@ -75,9 +75,6 @@
>   */
>  static void *eqos_alloc_descs(struct eqos_priv *eqos, unsigned int num)
>  {
> -	eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
> -				(unsigned int)ARCH_DMA_MINALIGN);
> -
>  	return memalign(eqos->desc_size, num * eqos->desc_size);
>  }
>  
> @@ -89,8 +86,8 @@ static void eqos_free_descs(void *descs)
>  static struct eqos_desc *eqos_get_desc(struct eqos_priv *eqos,
>  				       unsigned int num, bool rx)
>  {
> -	return eqos->descs +
> -		((rx ? EQOS_DESCRIPTORS_TX : 0) + num) * eqos->desc_size;
> +	return (rx ? eqos->rx_descs : eqos->tx_descs) +
> +	       (num * eqos->desc_size);
>  }
>  
>  void eqos_inval_desc_generic(void *desc)
> @@ -1001,7 +998,8 @@ static int eqos_start(struct udevice *dev)
>  
>  	/* Set up descriptors */
>  
> -	memset(eqos->descs, 0, eqos->desc_size * EQOS_DESCRIPTORS_NUM);
> +	memset(eqos->tx_descs, 0, eqos->desc_size * EQOS_DESCRIPTORS_TX);
> +	memset(eqos->rx_descs, 0, eqos->desc_size * EQOS_DESCRIPTORS_RX);
>  
>  	for (i = 0; i < EQOS_DESCRIPTORS_TX; i++) {
>  		struct eqos_desc *tx_desc = eqos_get_desc(eqos, i, false);
> @@ -1234,13 +1232,23 @@ static int eqos_probe_resources_core(struct udevice *dev)
>  
>  	debug("%s(dev=%p):\n", __func__, dev);
>  
> -	eqos->descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_NUM);
> -	if (!eqos->descs) {
> -		debug("%s: eqos_alloc_descs() failed\n", __func__);
> +	eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
> +				(unsigned int)ARCH_DMA_MINALIGN);
> +
> +	eqos->tx_descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_TX);
> +	if (!eqos->tx_descs) {
> +		debug("%s: eqos_alloc_descs(tx) failed\n", __func__);
>  		ret = -ENOMEM;
>  		goto err;
>  	}
>  
> +	eqos->rx_descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_RX);
> +	if (!eqos->rx_descs) {
> +		debug("%s: eqos_alloc_descs(rx) failed\n", __func__);
> +		ret = -ENOMEM;
> +		goto err_free_tx_descs;
> +	}
> +
>  	eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE);
>  	if (!eqos->tx_dma_buf) {
>  		debug("%s: memalign(tx_dma_buf) failed\n", __func__);
> @@ -1276,7 +1284,9 @@ err_free_rx_dma_buf:
>  err_free_tx_dma_buf:
>  	free(eqos->tx_dma_buf);
>  err_free_descs:
> -	eqos_free_descs(eqos->descs);
> +	eqos_free_descs(eqos->rx_descs);
> +err_free_tx_descs:
> +	eqos_free_descs(eqos->tx_descs);
>  err:
>  
>  	debug("%s: returns %d\n", __func__, ret);
> @@ -1292,7 +1302,8 @@ static int eqos_remove_resources_core(struct udevice *dev)
>  	free(eqos->rx_pkt);
>  	free(eqos->rx_dma_buf);
>  	free(eqos->tx_dma_buf);
> -	eqos_free_descs(eqos->descs);
> +	eqos_free_descs(eqos->rx_descs);
> +	eqos_free_descs(eqos->tx_descs);
>  
>  	debug("%s: OK\n", __func__);
>  	return 0;
> diff --git a/drivers/net/dwc_eth_qos.h b/drivers/net/dwc_eth_qos.h
> index b35e7742634..e3e43c86d11 100644
> --- a/drivers/net/dwc_eth_qos.h
> +++ b/drivers/net/dwc_eth_qos.h
> @@ -264,7 +264,8 @@ struct eqos_priv {
>  	struct phy_device *phy;
>  	ofnode phy_of_node;
>  	u32 max_speed;
> -	void *descs;
> +	void *tx_descs;
> +	void *rx_descs;
>  	int tx_desc_idx, rx_desc_idx;
>  	unsigned int desc_size;
>  	void *tx_dma_buf;

Reviewed-by: Patrice Chotard <patrice.chotard@foss.st.com>

Thanks
Patrice

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 2/2] net: dwc_eth_qos: Add support for bulk RX descriptor cleaning
  2022-10-09 15:51 ` [PATCH 2/2] net: dwc_eth_qos: Add support for bulk RX descriptor cleaning Marek Vasut
@ 2022-10-10  7:07   ` Patrice CHOTARD
  2022-10-16 18:16   ` Ramon Fried
  2022-11-28 19:50   ` Tom Rini
  2 siblings, 0 replies; 8+ messages in thread
From: Patrice CHOTARD @ 2022-10-10  7:07 UTC (permalink / raw)
  To: Marek Vasut, u-boot
  Cc: Joe Hershberger, Patrick Delaunay, Ramon Fried, Stephen Warren

Hi Marek

On 10/9/22 17:51, Marek Vasut wrote:
> Add new desc_per_cacheline property which lets a platform run RX descriptor
> cleanup after every power-of-2 - 1 received packets instead of every packet.
> This is useful on platforms where (axi_bus_width EQOS_AXI_WIDTH_n * DMA DSL
> inter-descriptor word skip count + DMA descriptor size) is less than cache
> line size, which necessitates packing multiple DMA descriptors into single
> cache line.
> 
> In case of TX descriptors, this is not a problem, since the driver always
> does synchronous TX, i.e. the TX descriptor is always written, flushed and
> polled for completion in eqos_send().
> 
> In case of RX descriptors, it is necessary to update their status in bulk,
> i.e. after the entire cache line worth of RX descriptors has been used up
> to receive data.
> 
> Signed-off-by: Marek Vasut <marex@denx.de>
> ---
> Cc: Joe Hershberger <joe.hershberger@ni.com>
> Cc: Patrice Chotard <patrice.chotard@foss.st.com>
> Cc: Patrick Delaunay <patrick.delaunay@foss.st.com>
> Cc: Ramon Fried <rfried.dev@gmail.com>
> Cc: Stephen Warren <swarren@nvidia.com>
> ---
>  drivers/net/dwc_eth_qos.c | 67 +++++++++++++++++++++++++--------------
>  drivers/net/dwc_eth_qos.h |  2 ++
>  2 files changed, 46 insertions(+), 23 deletions(-)
> 
> diff --git a/drivers/net/dwc_eth_qos.c b/drivers/net/dwc_eth_qos.c
> index dde2c183b06..afc47b56ff5 100644
> --- a/drivers/net/dwc_eth_qos.c
> +++ b/drivers/net/dwc_eth_qos.c
> @@ -75,7 +75,7 @@
>   */
>  static void *eqos_alloc_descs(struct eqos_priv *eqos, unsigned int num)
>  {
> -	return memalign(eqos->desc_size, num * eqos->desc_size);
> +	return memalign(ARCH_DMA_MINALIGN, num * eqos->desc_size);
>  }
>  
>  static void eqos_free_descs(void *descs)
> @@ -92,7 +92,7 @@ static struct eqos_desc *eqos_get_desc(struct eqos_priv *eqos,
>  
>  void eqos_inval_desc_generic(void *desc)
>  {
> -	unsigned long start = (unsigned long)desc;
> +	unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
>  	unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
>  				  ARCH_DMA_MINALIGN);
>  
> @@ -101,7 +101,7 @@ void eqos_inval_desc_generic(void *desc)
>  
>  void eqos_flush_desc_generic(void *desc)
>  {
> -	unsigned long start = (unsigned long)desc;
> +	unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
>  	unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
>  				  ARCH_DMA_MINALIGN);
>  
> @@ -1185,6 +1185,7 @@ static int eqos_recv(struct udevice *dev, int flags, uchar **packetp)
>  static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
>  {
>  	struct eqos_priv *eqos = dev_get_priv(dev);
> +	u32 idx, idx_mask = eqos->desc_per_cacheline - 1;
>  	uchar *packet_expected;
>  	struct eqos_desc *rx_desc;
>  
> @@ -1200,24 +1201,30 @@ static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
>  
>  	eqos->config->ops->eqos_inval_buffer(packet, length);
>  
> -	rx_desc = eqos_get_desc(eqos, eqos->rx_desc_idx, true);
> -
> -	rx_desc->des0 = 0;
> -	mb();
> -	eqos->config->ops->eqos_flush_desc(rx_desc);
> -	eqos->config->ops->eqos_inval_buffer(packet, length);
> -	rx_desc->des0 = (u32)(ulong)packet;
> -	rx_desc->des1 = 0;
> -	rx_desc->des2 = 0;
> -	/*
> -	 * Make sure that if HW sees the _OWN write below, it will see all the
> -	 * writes to the rest of the descriptor too.
> -	 */
> -	mb();
> -	rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
> -	eqos->config->ops->eqos_flush_desc(rx_desc);
> -
> -	writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
> +	if ((eqos->rx_desc_idx & idx_mask) == idx_mask) {
> +		for (idx = eqos->rx_desc_idx - idx_mask;
> +		     idx <= eqos->rx_desc_idx;
> +		     idx++) {
> +			rx_desc = eqos_get_desc(eqos, idx, true);
> +			rx_desc->des0 = 0;
> +			mb();
> +			eqos->config->ops->eqos_flush_desc(rx_desc);
> +			eqos->config->ops->eqos_inval_buffer(packet, length);
> +			rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf +
> +					     (idx * EQOS_MAX_PACKET_SIZE));
> +			rx_desc->des1 = 0;
> +			rx_desc->des2 = 0;
> +			/*
> +			 * Make sure that if HW sees the _OWN write below,
> +			 * it will see all the writes to the rest of the
> +			 * descriptor too.
> +			 */
> +			mb();
> +			rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
> +			eqos->config->ops->eqos_flush_desc(rx_desc);
> +		}
> +		writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
> +	}
>  
>  	eqos->rx_desc_idx++;
>  	eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX;
> @@ -1228,12 +1235,26 @@ static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
>  static int eqos_probe_resources_core(struct udevice *dev)
>  {
>  	struct eqos_priv *eqos = dev_get_priv(dev);
> +	unsigned int desc_step;
>  	int ret;
>  
>  	debug("%s(dev=%p):\n", __func__, dev);
>  
> -	eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
> -				(unsigned int)ARCH_DMA_MINALIGN);
> +	/* Maximum distance between neighboring descriptors, in Bytes. */
> +	desc_step = sizeof(struct eqos_desc) +
> +		    EQOS_DMA_CH0_CONTROL_DSL_MASK * eqos->config->axi_bus_width;
> +	if (desc_step < ARCH_DMA_MINALIGN) {
> +		/*
> +		 * The EQoS hardware implementation cannot place one descriptor
> +		 * per cacheline, it is necessary to place multiple descriptors
> +		 * per cacheline in memory and do cache management carefully.
> +		 */
> +		eqos->desc_size = BIT(fls(desc_step) - 1);
> +	} else {
> +		eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
> +					(unsigned int)ARCH_DMA_MINALIGN);
> +	}
> +	eqos->desc_per_cacheline = ARCH_DMA_MINALIGN / eqos->desc_size;
>  
>  	eqos->tx_descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_TX);
>  	if (!eqos->tx_descs) {
> diff --git a/drivers/net/dwc_eth_qos.h b/drivers/net/dwc_eth_qos.h
> index e3e43c86d11..8fccd6f0572 100644
> --- a/drivers/net/dwc_eth_qos.h
> +++ b/drivers/net/dwc_eth_qos.h
> @@ -162,6 +162,7 @@ struct eqos_dma_regs {
>  #define EQOS_DMA_SYSBUS_MODE_BLEN4			BIT(1)
>  
>  #define EQOS_DMA_CH0_CONTROL_DSL_SHIFT			18
> +#define EQOS_DMA_CH0_CONTROL_DSL_MASK			0x7
>  #define EQOS_DMA_CH0_CONTROL_PBLX8			BIT(16)
>  
>  #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT		16
> @@ -268,6 +269,7 @@ struct eqos_priv {
>  	void *rx_descs;
>  	int tx_desc_idx, rx_desc_idx;
>  	unsigned int desc_size;
> +	unsigned int desc_per_cacheline;
>  	void *tx_dma_buf;
>  	void *rx_dma_buf;
>  	void *rx_pkt;
Reviewed-by: Patrice Chotard <patrice.chotard@foss.st.com>

Thanks
Patrice

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] net: dwc_eth_qos: Split TX and RX DMA rings
  2022-10-09 15:51 [PATCH 1/2] net: dwc_eth_qos: Split TX and RX DMA rings Marek Vasut
  2022-10-09 15:51 ` [PATCH 2/2] net: dwc_eth_qos: Add support for bulk RX descriptor cleaning Marek Vasut
  2022-10-10  6:37 ` [PATCH 1/2] net: dwc_eth_qos: Split TX and RX DMA rings Patrice CHOTARD
@ 2022-10-16 18:15 ` Ramon Fried
  2022-11-28 19:50 ` Tom Rini
  3 siblings, 0 replies; 8+ messages in thread
From: Ramon Fried @ 2022-10-16 18:15 UTC (permalink / raw)
  To: Marek Vasut
  Cc: u-boot, Joe Hershberger, Patrice Chotard, Patrick Delaunay,
	Stephen Warren

On Sun, Oct 9, 2022 at 6:51 PM Marek Vasut <marex@denx.de> wrote:
>
> Separate TX and RX DMA rings to make their handling slightly clearer.
> This is a preparatory patch for bulk RX descriptor flushing.
>
> Signed-off-by: Marek Vasut <marex@denx.de>
> ---
> Cc: Joe Hershberger <joe.hershberger@ni.com>
> Cc: Patrice Chotard <patrice.chotard@foss.st.com>
> Cc: Patrick Delaunay <patrick.delaunay@foss.st.com>
> Cc: Ramon Fried <rfried.dev@gmail.com>
> Cc: Stephen Warren <swarren@nvidia.com>
> ---
>  drivers/net/dwc_eth_qos.c | 33 ++++++++++++++++++++++-----------
>  drivers/net/dwc_eth_qos.h |  3 ++-
>  2 files changed, 24 insertions(+), 12 deletions(-)
>
> diff --git a/drivers/net/dwc_eth_qos.c b/drivers/net/dwc_eth_qos.c
> index 001b028fa13..dde2c183b06 100644
> --- a/drivers/net/dwc_eth_qos.c
> +++ b/drivers/net/dwc_eth_qos.c
> @@ -75,9 +75,6 @@
>   */
>  static void *eqos_alloc_descs(struct eqos_priv *eqos, unsigned int num)
>  {
> -       eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
> -                               (unsigned int)ARCH_DMA_MINALIGN);
> -
>         return memalign(eqos->desc_size, num * eqos->desc_size);
>  }
>
> @@ -89,8 +86,8 @@ static void eqos_free_descs(void *descs)
>  static struct eqos_desc *eqos_get_desc(struct eqos_priv *eqos,
>                                        unsigned int num, bool rx)
>  {
> -       return eqos->descs +
> -               ((rx ? EQOS_DESCRIPTORS_TX : 0) + num) * eqos->desc_size;
> +       return (rx ? eqos->rx_descs : eqos->tx_descs) +
> +              (num * eqos->desc_size);
>  }
>
>  void eqos_inval_desc_generic(void *desc)
> @@ -1001,7 +998,8 @@ static int eqos_start(struct udevice *dev)
>
>         /* Set up descriptors */
>
> -       memset(eqos->descs, 0, eqos->desc_size * EQOS_DESCRIPTORS_NUM);
> +       memset(eqos->tx_descs, 0, eqos->desc_size * EQOS_DESCRIPTORS_TX);
> +       memset(eqos->rx_descs, 0, eqos->desc_size * EQOS_DESCRIPTORS_RX);
>
>         for (i = 0; i < EQOS_DESCRIPTORS_TX; i++) {
>                 struct eqos_desc *tx_desc = eqos_get_desc(eqos, i, false);
> @@ -1234,13 +1232,23 @@ static int eqos_probe_resources_core(struct udevice *dev)
>
>         debug("%s(dev=%p):\n", __func__, dev);
>
> -       eqos->descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_NUM);
> -       if (!eqos->descs) {
> -               debug("%s: eqos_alloc_descs() failed\n", __func__);
> +       eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
> +                               (unsigned int)ARCH_DMA_MINALIGN);
> +
> +       eqos->tx_descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_TX);
> +       if (!eqos->tx_descs) {
> +               debug("%s: eqos_alloc_descs(tx) failed\n", __func__);
>                 ret = -ENOMEM;
>                 goto err;
>         }
>
> +       eqos->rx_descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_RX);
> +       if (!eqos->rx_descs) {
> +               debug("%s: eqos_alloc_descs(rx) failed\n", __func__);
> +               ret = -ENOMEM;
> +               goto err_free_tx_descs;
> +       }
> +
>         eqos->tx_dma_buf = memalign(EQOS_BUFFER_ALIGN, EQOS_MAX_PACKET_SIZE);
>         if (!eqos->tx_dma_buf) {
>                 debug("%s: memalign(tx_dma_buf) failed\n", __func__);
> @@ -1276,7 +1284,9 @@ err_free_rx_dma_buf:
>  err_free_tx_dma_buf:
>         free(eqos->tx_dma_buf);
>  err_free_descs:
> -       eqos_free_descs(eqos->descs);
> +       eqos_free_descs(eqos->rx_descs);
> +err_free_tx_descs:
> +       eqos_free_descs(eqos->tx_descs);
>  err:
>
>         debug("%s: returns %d\n", __func__, ret);
> @@ -1292,7 +1302,8 @@ static int eqos_remove_resources_core(struct udevice *dev)
>         free(eqos->rx_pkt);
>         free(eqos->rx_dma_buf);
>         free(eqos->tx_dma_buf);
> -       eqos_free_descs(eqos->descs);
> +       eqos_free_descs(eqos->rx_descs);
> +       eqos_free_descs(eqos->tx_descs);
>
>         debug("%s: OK\n", __func__);
>         return 0;
> diff --git a/drivers/net/dwc_eth_qos.h b/drivers/net/dwc_eth_qos.h
> index b35e7742634..e3e43c86d11 100644
> --- a/drivers/net/dwc_eth_qos.h
> +++ b/drivers/net/dwc_eth_qos.h
> @@ -264,7 +264,8 @@ struct eqos_priv {
>         struct phy_device *phy;
>         ofnode phy_of_node;
>         u32 max_speed;
> -       void *descs;
> +       void *tx_descs;
> +       void *rx_descs;
>         int tx_desc_idx, rx_desc_idx;
>         unsigned int desc_size;
>         void *tx_dma_buf;
> --
> 2.35.1
>
Reviewed-by: Ramon Fried <rfried.dev@gmail.com>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 2/2] net: dwc_eth_qos: Add support for bulk RX descriptor cleaning
  2022-10-09 15:51 ` [PATCH 2/2] net: dwc_eth_qos: Add support for bulk RX descriptor cleaning Marek Vasut
  2022-10-10  7:07   ` Patrice CHOTARD
@ 2022-10-16 18:16   ` Ramon Fried
  2022-11-28 19:50   ` Tom Rini
  2 siblings, 0 replies; 8+ messages in thread
From: Ramon Fried @ 2022-10-16 18:16 UTC (permalink / raw)
  To: Marek Vasut
  Cc: u-boot, Joe Hershberger, Patrice Chotard, Patrick Delaunay,
	Stephen Warren

On Sun, Oct 9, 2022 at 6:51 PM Marek Vasut <marex@denx.de> wrote:
>
> Add new desc_per_cacheline property which lets a platform run RX descriptor
> cleanup after every power-of-2 - 1 received packets instead of every packet.
> This is useful on platforms where (axi_bus_width EQOS_AXI_WIDTH_n * DMA DSL
> inter-descriptor word skip count + DMA descriptor size) is less than cache
> line size, which necessitates packing multiple DMA descriptors into single
> cache line.
>
> In case of TX descriptors, this is not a problem, since the driver always
> does synchronous TX, i.e. the TX descriptor is always written, flushed and
> polled for completion in eqos_send().
>
> In case of RX descriptors, it is necessary to update their status in bulk,
> i.e. after the entire cache line worth of RX descriptors has been used up
> to receive data.
>
> Signed-off-by: Marek Vasut <marex@denx.de>
> ---
> Cc: Joe Hershberger <joe.hershberger@ni.com>
> Cc: Patrice Chotard <patrice.chotard@foss.st.com>
> Cc: Patrick Delaunay <patrick.delaunay@foss.st.com>
> Cc: Ramon Fried <rfried.dev@gmail.com>
> Cc: Stephen Warren <swarren@nvidia.com>
> ---
>  drivers/net/dwc_eth_qos.c | 67 +++++++++++++++++++++++++--------------
>  drivers/net/dwc_eth_qos.h |  2 ++
>  2 files changed, 46 insertions(+), 23 deletions(-)
>
> diff --git a/drivers/net/dwc_eth_qos.c b/drivers/net/dwc_eth_qos.c
> index dde2c183b06..afc47b56ff5 100644
> --- a/drivers/net/dwc_eth_qos.c
> +++ b/drivers/net/dwc_eth_qos.c
> @@ -75,7 +75,7 @@
>   */
>  static void *eqos_alloc_descs(struct eqos_priv *eqos, unsigned int num)
>  {
> -       return memalign(eqos->desc_size, num * eqos->desc_size);
> +       return memalign(ARCH_DMA_MINALIGN, num * eqos->desc_size);
>  }
>
>  static void eqos_free_descs(void *descs)
> @@ -92,7 +92,7 @@ static struct eqos_desc *eqos_get_desc(struct eqos_priv *eqos,
>
>  void eqos_inval_desc_generic(void *desc)
>  {
> -       unsigned long start = (unsigned long)desc;
> +       unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
>         unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
>                                   ARCH_DMA_MINALIGN);
>
> @@ -101,7 +101,7 @@ void eqos_inval_desc_generic(void *desc)
>
>  void eqos_flush_desc_generic(void *desc)
>  {
> -       unsigned long start = (unsigned long)desc;
> +       unsigned long start = (unsigned long)desc & ~(ARCH_DMA_MINALIGN - 1);
>         unsigned long end = ALIGN(start + sizeof(struct eqos_desc),
>                                   ARCH_DMA_MINALIGN);
>
> @@ -1185,6 +1185,7 @@ static int eqos_recv(struct udevice *dev, int flags, uchar **packetp)
>  static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
>  {
>         struct eqos_priv *eqos = dev_get_priv(dev);
> +       u32 idx, idx_mask = eqos->desc_per_cacheline - 1;
>         uchar *packet_expected;
>         struct eqos_desc *rx_desc;
>
> @@ -1200,24 +1201,30 @@ static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
>
>         eqos->config->ops->eqos_inval_buffer(packet, length);
>
> -       rx_desc = eqos_get_desc(eqos, eqos->rx_desc_idx, true);
> -
> -       rx_desc->des0 = 0;
> -       mb();
> -       eqos->config->ops->eqos_flush_desc(rx_desc);
> -       eqos->config->ops->eqos_inval_buffer(packet, length);
> -       rx_desc->des0 = (u32)(ulong)packet;
> -       rx_desc->des1 = 0;
> -       rx_desc->des2 = 0;
> -       /*
> -        * Make sure that if HW sees the _OWN write below, it will see all the
> -        * writes to the rest of the descriptor too.
> -        */
> -       mb();
> -       rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
> -       eqos->config->ops->eqos_flush_desc(rx_desc);
> -
> -       writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
> +       if ((eqos->rx_desc_idx & idx_mask) == idx_mask) {
> +               for (idx = eqos->rx_desc_idx - idx_mask;
> +                    idx <= eqos->rx_desc_idx;
> +                    idx++) {
> +                       rx_desc = eqos_get_desc(eqos, idx, true);
> +                       rx_desc->des0 = 0;
> +                       mb();
> +                       eqos->config->ops->eqos_flush_desc(rx_desc);
> +                       eqos->config->ops->eqos_inval_buffer(packet, length);
> +                       rx_desc->des0 = (u32)(ulong)(eqos->rx_dma_buf +
> +                                            (idx * EQOS_MAX_PACKET_SIZE));
> +                       rx_desc->des1 = 0;
> +                       rx_desc->des2 = 0;
> +                       /*
> +                        * Make sure that if HW sees the _OWN write below,
> +                        * it will see all the writes to the rest of the
> +                        * descriptor too.
> +                        */
> +                       mb();
> +                       rx_desc->des3 = EQOS_DESC3_OWN | EQOS_DESC3_BUF1V;
> +                       eqos->config->ops->eqos_flush_desc(rx_desc);
> +               }
> +               writel((ulong)rx_desc, &eqos->dma_regs->ch0_rxdesc_tail_pointer);
> +       }
>
>         eqos->rx_desc_idx++;
>         eqos->rx_desc_idx %= EQOS_DESCRIPTORS_RX;
> @@ -1228,12 +1235,26 @@ static int eqos_free_pkt(struct udevice *dev, uchar *packet, int length)
>  static int eqos_probe_resources_core(struct udevice *dev)
>  {
>         struct eqos_priv *eqos = dev_get_priv(dev);
> +       unsigned int desc_step;
>         int ret;
>
>         debug("%s(dev=%p):\n", __func__, dev);
>
> -       eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
> -                               (unsigned int)ARCH_DMA_MINALIGN);
> +       /* Maximum distance between neighboring descriptors, in Bytes. */
> +       desc_step = sizeof(struct eqos_desc) +
> +                   EQOS_DMA_CH0_CONTROL_DSL_MASK * eqos->config->axi_bus_width;
> +       if (desc_step < ARCH_DMA_MINALIGN) {
> +               /*
> +                * The EQoS hardware implementation cannot place one descriptor
> +                * per cacheline, it is necessary to place multiple descriptors
> +                * per cacheline in memory and do cache management carefully.
> +                */
> +               eqos->desc_size = BIT(fls(desc_step) - 1);
> +       } else {
> +               eqos->desc_size = ALIGN(sizeof(struct eqos_desc),
> +                                       (unsigned int)ARCH_DMA_MINALIGN);
> +       }
> +       eqos->desc_per_cacheline = ARCH_DMA_MINALIGN / eqos->desc_size;
>
>         eqos->tx_descs = eqos_alloc_descs(eqos, EQOS_DESCRIPTORS_TX);
>         if (!eqos->tx_descs) {
> diff --git a/drivers/net/dwc_eth_qos.h b/drivers/net/dwc_eth_qos.h
> index e3e43c86d11..8fccd6f0572 100644
> --- a/drivers/net/dwc_eth_qos.h
> +++ b/drivers/net/dwc_eth_qos.h
> @@ -162,6 +162,7 @@ struct eqos_dma_regs {
>  #define EQOS_DMA_SYSBUS_MODE_BLEN4                     BIT(1)
>
>  #define EQOS_DMA_CH0_CONTROL_DSL_SHIFT                 18
> +#define EQOS_DMA_CH0_CONTROL_DSL_MASK                  0x7
>  #define EQOS_DMA_CH0_CONTROL_PBLX8                     BIT(16)
>
>  #define EQOS_DMA_CH0_TX_CONTROL_TXPBL_SHIFT            16
> @@ -268,6 +269,7 @@ struct eqos_priv {
>         void *rx_descs;
>         int tx_desc_idx, rx_desc_idx;
>         unsigned int desc_size;
> +       unsigned int desc_per_cacheline;
>         void *tx_dma_buf;
>         void *rx_dma_buf;
>         void *rx_pkt;
> --
> 2.35.1
>
Reviewed-by: Ramon Fried <rfried.dev@gmail.com>

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 1/2] net: dwc_eth_qos: Split TX and RX DMA rings
  2022-10-09 15:51 [PATCH 1/2] net: dwc_eth_qos: Split TX and RX DMA rings Marek Vasut
                   ` (2 preceding siblings ...)
  2022-10-16 18:15 ` Ramon Fried
@ 2022-11-28 19:50 ` Tom Rini
  3 siblings, 0 replies; 8+ messages in thread
From: Tom Rini @ 2022-11-28 19:50 UTC (permalink / raw)
  To: Marek Vasut
  Cc: u-boot, Joe Hershberger, Patrice Chotard, Patrick Delaunay,
	Ramon Fried, Stephen Warren

[-- Attachment #1: Type: text/plain, Size: 413 bytes --]

On Sun, Oct 09, 2022 at 05:51:45PM +0200, Marek Vasut wrote:

> Separate TX and RX DMA rings to make their handling slightly clearer.
> This is a preparatory patch for bulk RX descriptor flushing.
> 
> Signed-off-by: Marek Vasut <marex@denx.de>
> Reviewed-by: Patrice Chotard <patrice.chotard@foss.st.com>
> Reviewed-by: Ramon Fried <rfried.dev@gmail.com>

Applied to u-boot/master, thanks!

-- 
Tom

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 659 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

* Re: [PATCH 2/2] net: dwc_eth_qos: Add support for bulk RX descriptor cleaning
  2022-10-09 15:51 ` [PATCH 2/2] net: dwc_eth_qos: Add support for bulk RX descriptor cleaning Marek Vasut
  2022-10-10  7:07   ` Patrice CHOTARD
  2022-10-16 18:16   ` Ramon Fried
@ 2022-11-28 19:50   ` Tom Rini
  2 siblings, 0 replies; 8+ messages in thread
From: Tom Rini @ 2022-11-28 19:50 UTC (permalink / raw)
  To: Marek Vasut
  Cc: u-boot, Joe Hershberger, Patrice Chotard, Patrick Delaunay,
	Ramon Fried, Stephen Warren

[-- Attachment #1: Type: text/plain, Size: 1064 bytes --]

On Sun, Oct 09, 2022 at 05:51:46PM +0200, Marek Vasut wrote:

> Add new desc_per_cacheline property which lets a platform run RX descriptor
> cleanup after every power-of-2 - 1 received packets instead of every packet.
> This is useful on platforms where (axi_bus_width EQOS_AXI_WIDTH_n * DMA DSL
> inter-descriptor word skip count + DMA descriptor size) is less than cache
> line size, which necessitates packing multiple DMA descriptors into single
> cache line.
> 
> In case of TX descriptors, this is not a problem, since the driver always
> does synchronous TX, i.e. the TX descriptor is always written, flushed and
> polled for completion in eqos_send().
> 
> In case of RX descriptors, it is necessary to update their status in bulk,
> i.e. after the entire cache line worth of RX descriptors has been used up
> to receive data.
> 
> Signed-off-by: Marek Vasut <marex@denx.de>
> Reviewed-by: Patrice Chotard <patrice.chotard@foss.st.com>
> Reviewed-by: Ramon Fried <rfried.dev@gmail.com>

Applied to u-boot/master, thanks!

-- 
Tom

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 659 bytes --]

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2022-11-28 19:51 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-10-09 15:51 [PATCH 1/2] net: dwc_eth_qos: Split TX and RX DMA rings Marek Vasut
2022-10-09 15:51 ` [PATCH 2/2] net: dwc_eth_qos: Add support for bulk RX descriptor cleaning Marek Vasut
2022-10-10  7:07   ` Patrice CHOTARD
2022-10-16 18:16   ` Ramon Fried
2022-11-28 19:50   ` Tom Rini
2022-10-10  6:37 ` [PATCH 1/2] net: dwc_eth_qos: Split TX and RX DMA rings Patrice CHOTARD
2022-10-16 18:15 ` Ramon Fried
2022-11-28 19:50 ` Tom Rini

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.