linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] net: mvneta: use the correct napi pointer
@ 2018-08-09 12:02 Jisheng Zhang
  2018-08-09 12:13 ` Andrew Lunn
  0 siblings, 1 reply; 3+ messages in thread
From: Jisheng Zhang @ 2018-08-09 12:02 UTC (permalink / raw)
  To: Thomas Petazzoni, David S. Miller
  Cc: netdev, linux-kernel, Marek Behún, Tomas Hlavacek,
	Andrew Lunn, linux-arm-kernel

if neta_armada3700 is true, the mvneta_pcpu_port's napi is invalid, we
should use pp->napi instead. Fix mvneta_config_rss() with this method.
Although we can fix mvneta_rx_hwbm() and mvneta_rx_swbm() in the same
manner, the napi parm of mvneta_poll() is always correct, so we can
pass the correct napi param to mvneta_rx_hwbm() and mvneta_rx_swbm()

Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
---
 drivers/net/ethernet/marvell/mvneta.c | 55 ++++++++++++++++-----------
 1 file changed, 32 insertions(+), 23 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 0ad2f3f7da85..74b701fed5ef 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1901,10 +1901,9 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
 }
 
 /* Main rx processing when using software buffer management */
-static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
-			  struct mvneta_rx_queue *rxq)
+static int mvneta_rx_swbm(struct mvneta_port *pp, struct napi_struct *napi,
+			  int rx_todo, struct mvneta_rx_queue *rxq)
 {
-	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
 	struct net_device *dev = pp->dev;
 	int rx_done;
 	u32 rcvd_pkts = 0;
@@ -1959,7 +1958,7 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
 
 			skb->protocol = eth_type_trans(skb, dev);
 			mvneta_rx_csum(pp, rx_status, skb);
-			napi_gro_receive(&port->napi, skb);
+			napi_gro_receive(napi, skb);
 
 			rcvd_pkts++;
 			rcvd_bytes += rx_bytes;
@@ -2001,7 +2000,7 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
 
 		mvneta_rx_csum(pp, rx_status, skb);
 
-		napi_gro_receive(&port->napi, skb);
+		napi_gro_receive(napi, skb);
 	}
 
 	if (rcvd_pkts) {
@@ -2020,10 +2019,9 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
 }
 
 /* Main rx processing when using hardware buffer management */
-static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
-			  struct mvneta_rx_queue *rxq)
+static int mvneta_rx_hwbm(struct mvneta_port *pp, struct napi_struct *napi,
+			  int rx_todo, struct mvneta_rx_queue *rxq)
 {
-	struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports);
 	struct net_device *dev = pp->dev;
 	int rx_done;
 	u32 rcvd_pkts = 0;
@@ -2085,7 +2083,7 @@ static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
 
 			skb->protocol = eth_type_trans(skb, dev);
 			mvneta_rx_csum(pp, rx_status, skb);
-			napi_gro_receive(&port->napi, skb);
+			napi_gro_receive(napi, skb);
 
 			rcvd_pkts++;
 			rcvd_bytes += rx_bytes;
@@ -2129,7 +2127,7 @@ static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo,
 
 		mvneta_rx_csum(pp, rx_status, skb);
 
-		napi_gro_receive(&port->napi, skb);
+		napi_gro_receive(napi, skb);
 	}
 
 	if (rcvd_pkts) {
@@ -2722,9 +2720,11 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
 	if (rx_queue) {
 		rx_queue = rx_queue - 1;
 		if (pp->bm_priv)
-			rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]);
+			rx_done = mvneta_rx_hwbm(pp, napi, budget,
+						 &pp->rxqs[rx_queue]);
 		else
-			rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
+			rx_done = mvneta_rx_swbm(pp, napi, budget,
+						 &pp->rxqs[rx_queue]);
 	}
 
 	if (rx_done < budget) {
@@ -4018,13 +4018,18 @@ static int  mvneta_config_rss(struct mvneta_port *pp)
 
 	on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
 
-	/* We have to synchronise on the napi of each CPU */
-	for_each_online_cpu(cpu) {
-		struct mvneta_pcpu_port *pcpu_port =
-			per_cpu_ptr(pp->ports, cpu);
+	if (!pp->neta_armada3700) {
+		/* We have to synchronise on the napi of each CPU */
+		for_each_online_cpu(cpu) {
+			struct mvneta_pcpu_port *pcpu_port =
+				per_cpu_ptr(pp->ports, cpu);
 
-		napi_synchronize(&pcpu_port->napi);
-		napi_disable(&pcpu_port->napi);
+			napi_synchronize(&pcpu_port->napi);
+			napi_disable(&pcpu_port->napi);
+		}
+	} else {
+		napi_synchronize(&pp->napi);
+		napi_disable(&pp->napi);
 	}
 
 	pp->rxq_def = pp->indir[0];
@@ -4041,12 +4046,16 @@ static int  mvneta_config_rss(struct mvneta_port *pp)
 	mvneta_percpu_elect(pp);
 	spin_unlock(&pp->lock);
 
-	/* We have to synchronise on the napi of each CPU */
-	for_each_online_cpu(cpu) {
-		struct mvneta_pcpu_port *pcpu_port =
-			per_cpu_ptr(pp->ports, cpu);
+	if (!pp->neta_armada3700) {
+		/* We have to synchronise on the napi of each CPU */
+		for_each_online_cpu(cpu) {
+			struct mvneta_pcpu_port *pcpu_port =
+				per_cpu_ptr(pp->ports, cpu);
 
-		napi_enable(&pcpu_port->napi);
+			napi_enable(&pcpu_port->napi);
+		}
+	} else {
+		napi_enable(&pp->napi);
 	}
 
 	netif_tx_start_all_queues(pp->dev);
-- 
2.18.0


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] net: mvneta: use the correct napi pointer
  2018-08-09 12:02 [PATCH] net: mvneta: use the correct napi pointer Jisheng Zhang
@ 2018-08-09 12:13 ` Andrew Lunn
  2018-08-10  2:40   ` Jisheng Zhang
  0 siblings, 1 reply; 3+ messages in thread
From: Andrew Lunn @ 2018-08-09 12:13 UTC (permalink / raw)
  To: Jisheng Zhang
  Cc: Thomas Petazzoni, David S. Miller, netdev, linux-kernel,
	Marek Behún, Tomas Hlavacek, linux-arm-kernel

On Thu, Aug 09, 2018 at 08:02:42PM +0800, Jisheng Zhang wrote:
> if neta_armada3700 is true, the mvneta_pcpu_port's napi is invalid, we
> should use pp->napi instead. Fix mvneta_config_rss() with this method.
> Although we can fix mvneta_rx_hwbm() and mvneta_rx_swbm() in the same
> manner, the napi parm of mvneta_poll() is always correct, so we can
> pass the correct napi param to mvneta_rx_hwbm() and mvneta_rx_swbm()
> 
> Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>

Hi Jisheng

How does this differ from

commit 7a86f05faf112463cfbbdfd222012e247de461a1
Author: Andrew Lunn <andrew@lunn.ch>
Date:   Wed Jul 18 18:10:50 2018 +0200

    net: ethernet: mvneta: Fix napi structure mixup on armada 3700
    
    The mvneta Ethernet driver is used on a few different Marvell SoCs.
    Some SoCs have per cpu interrupts for Ethernet events. Some SoCs have
    a single interrupt, independent of the CPU. The driver handles this by
    having a per CPU napi structure when there are per CPU interrupts, and
    a global napi structure when there is a single interrupt.
    
    When the napi core calls mvneta_poll(), it passes the napi
    instance. This was not being propagated through the call chain, and
    instead the per-cpu napi instance was passed to napi_gro_receive()
    call. This breaks when there is a single global napi instance.
    
    Signed-off-by: Andrew Lunn <andrew@lunn.ch>
    Fixes: 2636ac3cc2b4 ("net: mvneta: Add network support for Armada 3700 SoC")
    Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
    Signed-off-by: David S. Miller <davem@davemloft.net>

which is already in net-next, and i hope net?

      Andrew

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] net: mvneta: use the correct napi pointer
  2018-08-09 12:13 ` Andrew Lunn
@ 2018-08-10  2:40   ` Jisheng Zhang
  0 siblings, 0 replies; 3+ messages in thread
From: Jisheng Zhang @ 2018-08-10  2:40 UTC (permalink / raw)
  To: Andrew Lunn
  Cc: Thomas Petazzoni, David S. Miller, netdev, linux-kernel,
	Marek Behún, Tomas Hlavacek, linux-arm-kernel

Hi,

On Thu, 9 Aug 2018 14:13:08 +0200 Andrew Lunn wrote:

> On Thu, Aug 09, 2018 at 08:02:42PM +0800, Jisheng Zhang wrote:
> > if neta_armada3700 is true, the mvneta_pcpu_port's napi is invalid, we
> > should use pp->napi instead. Fix mvneta_config_rss() with this method.
> > Although we can fix mvneta_rx_hwbm() and mvneta_rx_swbm() in the same
> > manner, the napi parm of mvneta_poll() is always correct, so we can
> > pass the correct napi param to mvneta_rx_hwbm() and mvneta_rx_swbm()
> > 
> > Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>  
> 
> Hi Jisheng
> 
> How does this differ from

oops, I didn't monitor the net-next tree recently, glad to see it's fixed.
But the mvneta_config_rss() also need the fix. Since your patch is already
merged, so I will submit one separate patch to fix mvneta_config_rss().

Thanks a lot,
Jisheng

> 
> commit 7a86f05faf112463cfbbdfd222012e247de461a1
> Author: Andrew Lunn <andrew@lunn.ch>
> Date:   Wed Jul 18 18:10:50 2018 +0200
> 
>     net: ethernet: mvneta: Fix napi structure mixup on armada 3700
>     
>     The mvneta Ethernet driver is used on a few different Marvell SoCs.
>     Some SoCs have per cpu interrupts for Ethernet events. Some SoCs have
>     a single interrupt, independent of the CPU. The driver handles this by
>     having a per CPU napi structure when there are per CPU interrupts, and
>     a global napi structure when there is a single interrupt.
>     
>     When the napi core calls mvneta_poll(), it passes the napi
>     instance. This was not being propagated through the call chain, and
>     instead the per-cpu napi instance was passed to napi_gro_receive()
>     call. This breaks when there is a single global napi instance.
>     
>     Signed-off-by: Andrew Lunn <andrew@lunn.ch>
>     Fixes: 2636ac3cc2b4 ("net: mvneta: Add network support for Armada 3700 SoC")
>     Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
>     Signed-off-by: David S. Miller <davem@davemloft.net>
> 
> which is already in net-next, and i hope net?
> 
>       Andrew


^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2018-08-10  2:43 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-09 12:02 [PATCH] net: mvneta: use the correct napi pointer Jisheng Zhang
2018-08-09 12:13 ` Andrew Lunn
2018-08-10  2:40   ` Jisheng Zhang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).