netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Joakim Zhang <qiangqing.zhang@nxp.com>
To: peppe.cavallaro@st.com, alexandre.torgue@st.com,
	joabreu@synopsys.com, davem@davemloft.net, kuba@kernel.org
Cc: netdev@vger.kernel.org, linux-imx@nxp.com
Subject: [PATCH V4 net 5/5] net: stmmac: re-init rx buffers when mac resume back
Date: Thu,  4 Feb 2021 19:21:44 +0800	[thread overview]
Message-ID: <20210204112144.24163-6-qiangqing.zhang@nxp.com> (raw)
In-Reply-To: <20210204112144.24163-1-qiangqing.zhang@nxp.com>

During suspend/resume stress test, we found descriptor write back by DMA
could exhibit unusual behavior, e.g.:
	003 [0xc4310030]: 0x0 0x40 0x0 0xb5010040

We can see that desc3 write back is 0xb5010040, it is still ownd by DMA,
so application would not recycle this buffer. It will trigger fatal bus
error when DMA try to use this descriptor again. To fix this issue, we
should re-init all rx buffers when mac resume back.

Signed-off-by: Joakim Zhang <qiangqing.zhang@nxp.com>
---
 .../net/ethernet/stmicro/stmmac/stmmac_main.c | 87 ++++++++++++++++++-
 1 file changed, 86 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 04ba77775e52..e8e9e5b8c62d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1379,6 +1379,91 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
 	}
 }
 
+/**
+ * stmmac_reinit_rx_buffers - reinit the RX descriptor buffer.
+ * @priv: driver private structure
+ * Description: this function is called to re-allocate a receive buffer, perform
+ * the DMA mapping and init the descriptor.
+ */
+static int stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
+{
+	u32 rx_count = priv->plat->rx_queues_to_use;
+	u32 queue;
+	int i;
+
+	for (queue = 0; queue < rx_count; queue++) {
+		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+		for (i = 0; i < priv->dma_rx_size; i++) {
+			struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+
+			if (buf->page) {
+				page_pool_recycle_direct(rx_q->page_pool, buf->page);
+				buf->page = NULL;
+			}
+
+			if (priv->sph && buf->sec_page) {
+				page_pool_recycle_direct(rx_q->page_pool, buf->sec_page);
+				buf->sec_page = NULL;
+			}
+		}
+	}
+
+	for (queue = 0; queue < rx_count; queue++) {
+		struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
+
+		for (i = 0; i < priv->dma_rx_size; i++) {
+			struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
+			struct dma_desc *p;
+
+			if (priv->extend_desc)
+				p = &((rx_q->dma_erx + i)->basic);
+			else
+				p = rx_q->dma_rx + i;
+
+			if (!buf->page) {
+				buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
+				if (!buf->page)
+					goto err_reinit_rx_buffers;
+
+				buf->addr = page_pool_get_dma_addr(buf->page);
+			}
+
+			if (priv->sph && !buf->sec_page) {
+				buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
+				if (!buf->sec_page)
+					goto err_reinit_rx_buffers;
+
+				buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
+			}
+
+			stmmac_set_desc_addr(priv, p, buf->addr);
+			if (priv->sph)
+				stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
+			else
+				stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
+			if (priv->dma_buf_sz == BUF_SIZE_16KiB)
+				stmmac_init_desc3(priv, p);
+		}
+	}
+
+	return 0;
+
+err_reinit_rx_buffers:
+	while (queue >= 0) {
+		while (--i >= 0)
+			stmmac_free_rx_buffer(priv, queue, i);
+
+		if (queue == 0)
+			break;
+
+		i = priv->dma_rx_size;
+		queue--;
+	}
+
+	return -ENOMEM;
+}
+
 /**
  * init_dma_rx_desc_rings - init the RX descriptor rings
  * @dev: net device structure
@@ -5340,7 +5425,7 @@ int stmmac_resume(struct device *dev)
 	mutex_lock(&priv->lock);
 
 	stmmac_reset_queues_param(priv);
-
+	stmmac_reinit_rx_buffers(priv);
 	stmmac_free_tx_skbufs(priv);
 	stmmac_clear_descriptors(priv);
 
-- 
2.17.1


  parent reply	other threads:[~2021-02-04 11:23 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-04 11:21 [PATCH V4 net 0/5] ethernet: fixes for stmmac driver Joakim Zhang
2021-02-04 11:21 ` [PATCH V4 net 1/5] net: stmmac: stop each tx channel independently Joakim Zhang
2021-02-04 11:21 ` [PATCH V4 net 2/5] net: stmmac: fix watchdog timeout during suspend/resume stress test Joakim Zhang
2021-02-04 11:21 ` [PATCH V4 net 3/5] net: stmmac: fix dma physical address of descriptor when display ring Joakim Zhang
2021-02-06 20:29   ` Jakub Kicinski
2021-02-20  7:43     ` Joakim Zhang
2021-02-22 19:46       ` Jakub Kicinski
2021-02-23  7:10         ` Joakim Zhang
2021-02-04 11:21 ` [PATCH V4 net 4/5] net: stmmac: fix wrongly set buffer2 valid when sph unsupport Joakim Zhang
2021-02-04 11:21 ` Joakim Zhang [this message]
2021-02-06 20:38   ` [PATCH V4 net 5/5] net: stmmac: re-init rx buffers when mac resume back Jakub Kicinski
2021-02-20  7:52     ` Joakim Zhang
2021-02-22 19:47       ` Jakub Kicinski
2021-02-23  7:16         ` Joakim Zhang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210204112144.24163-6-qiangqing.zhang@nxp.com \
    --to=qiangqing.zhang@nxp.com \
    --cc=alexandre.torgue@st.com \
    --cc=davem@davemloft.net \
    --cc=joabreu@synopsys.com \
    --cc=kuba@kernel.org \
    --cc=linux-imx@nxp.com \
    --cc=netdev@vger.kernel.org \
    --cc=peppe.cavallaro@st.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).