All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v2 0/2] net: mvneta: improve suspend/resume
@ 2018-03-30 10:33 ` Jisheng Zhang
  0 siblings, 0 replies; 8+ messages in thread
From: Jisheng Zhang @ 2018-03-30 10:33 UTC (permalink / raw)
  To: David Miller, Thomas Petazzoni; +Cc: netdev, linux-arm-kernel, linux-kernel

This series tries to optimize the mvneta's suspend/resume
implementation by only taking necessary actions.

Since v1:
 - unify ret check
 - try best to keep the suspend/resume behavior
 - split txq deinit into sw/hw parts as well
 - adjust mvneta_stop_dev() location

I didn't add Thomas's Ack tag to patch1, because in v2, I added new code
to split the txq deinit into two parts.

Jisheng Zhang (2):
  net: mvneta: split rxq/txq init and txq deinit into SW and HW parts
  net: mvneta: improve suspend/resume

 drivers/net/ethernet/marvell/mvneta.c | 156 +++++++++++++++++++++++++++-------
 1 file changed, 127 insertions(+), 29 deletions(-)

-- 
2.16.3

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v2 0/2] net: mvneta: improve suspend/resume
@ 2018-03-30 10:33 ` Jisheng Zhang
  0 siblings, 0 replies; 8+ messages in thread
From: Jisheng Zhang @ 2018-03-30 10:33 UTC (permalink / raw)
  To: linux-arm-kernel

This series tries to optimize the mvneta's suspend/resume
implementation by only taking necessary actions.

Since v1:
 - unify ret check
 - try best to keep the suspend/resume behavior
 - split txq deinit into sw/hw parts as well
 - adjust mvneta_stop_dev() location

I didn't add Thomas's Ack tag to patch1, because in v2, I added new code
to split the txq deinit into two parts.

Jisheng Zhang (2):
  net: mvneta: split rxq/txq init and txq deinit into SW and HW parts
  net: mvneta: improve suspend/resume

 drivers/net/ethernet/marvell/mvneta.c | 156 +++++++++++++++++++++++++++-------
 1 file changed, 127 insertions(+), 29 deletions(-)

-- 
2.16.3

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v2 1/2] net: mvneta: split rxq/txq init and txq deinit into SW and HW parts
  2018-03-30 10:33 ` Jisheng Zhang
@ 2018-03-30 10:34   ` Jisheng Zhang
  -1 siblings, 0 replies; 8+ messages in thread
From: Jisheng Zhang @ 2018-03-30 10:34 UTC (permalink / raw)
  To: David Miller, Thomas Petazzoni; +Cc: netdev, linux-arm-kernel, linux-kernel

This is to prepare the suspend/resume improvement in next patch. The
SW parts can be optimized out during resume.

As for rxq handling during suspend, we'd like to drop packets by
calling mvneta_rxq_drop_pkts() which is both SW and HW operation,
so we don't split rxq deinit.

Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
---
 drivers/net/ethernet/marvell/mvneta.c | 85 +++++++++++++++++++++++++++--------
 1 file changed, 66 insertions(+), 19 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 30aab9bf77cc..f96815853108 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2796,10 +2796,8 @@ static void mvneta_rx_reset(struct mvneta_port *pp)
 
 /* Rx/Tx queue initialization/cleanup methods */
 
-/* Create a specified RX queue */
-static int mvneta_rxq_init(struct mvneta_port *pp,
-			   struct mvneta_rx_queue *rxq)
-
+static int mvneta_rxq_sw_init(struct mvneta_port *pp,
+			      struct mvneta_rx_queue *rxq)
 {
 	rxq->size = pp->rx_ring_size;
 
@@ -2812,6 +2810,12 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
 
 	rxq->last_desc = rxq->size - 1;
 
+	return 0;
+}
+
+static void mvneta_rxq_hw_init(struct mvneta_port *pp,
+			       struct mvneta_rx_queue *rxq)
+{
 	/* Set Rx descriptors queue starting address */
 	mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
 	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
@@ -2835,6 +2839,20 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
 		mvneta_rxq_short_pool_set(pp, rxq);
 		mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
 	}
+}
+
+/* Create a specified RX queue */
+static int mvneta_rxq_init(struct mvneta_port *pp,
+			   struct mvneta_rx_queue *rxq)
+
+{
+	int ret;
+
+	ret = mvneta_rxq_sw_init(pp, rxq);
+	if (ret < 0)
+		return ret;
+
+	mvneta_rxq_hw_init(pp, rxq);
 
 	return 0;
 }
@@ -2857,9 +2875,8 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
 	rxq->descs_phys        = 0;
 }
 
-/* Create and initialize a tx queue */
-static int mvneta_txq_init(struct mvneta_port *pp,
-			   struct mvneta_tx_queue *txq)
+static int mvneta_txq_sw_init(struct mvneta_port *pp,
+			      struct mvneta_tx_queue *txq)
 {
 	int cpu;
 
@@ -2872,7 +2889,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
 	txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
 	txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
 
-
 	/* Allocate memory for TX descriptors */
 	txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
 					txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2882,14 +2898,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
 
 	txq->last_desc = txq->size - 1;
 
-	/* Set maximum bandwidth for enabled TXQs */
-	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
-	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
-
-	/* Set Tx descriptors queue starting address */
-	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
-	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
-
 	txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
 				    GFP_KERNEL);
 	if (!txq->tx_skb) {
@@ -2910,7 +2918,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
 				  txq->descs, txq->descs_phys);
 		return -ENOMEM;
 	}
-	mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
 
 	/* Setup XPS mapping */
 	if (txq_number > 1)
@@ -2923,9 +2930,38 @@ static int mvneta_txq_init(struct mvneta_port *pp,
 	return 0;
 }
 
+static void mvneta_txq_hw_init(struct mvneta_port *pp,
+			       struct mvneta_tx_queue *txq)
+{
+	/* Set maximum bandwidth for enabled TXQs */
+	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
+	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
+
+	/* Set Tx descriptors queue starting address */
+	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
+	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
+
+	mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
+}
+
+/* Create and initialize a tx queue */
+static int mvneta_txq_init(struct mvneta_port *pp,
+			   struct mvneta_tx_queue *txq)
+{
+	int ret;
+
+	ret = mvneta_txq_sw_init(pp, txq);
+	if (ret < 0)
+		return ret;
+
+	mvneta_txq_hw_init(pp, txq);
+
+	return 0;
+}
+
 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
-static void mvneta_txq_deinit(struct mvneta_port *pp,
-			      struct mvneta_tx_queue *txq)
+static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
+				 struct mvneta_tx_queue *txq)
 {
 	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
 
@@ -2946,7 +2982,11 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
 	txq->last_desc         = 0;
 	txq->next_desc_to_proc = 0;
 	txq->descs_phys        = 0;
+}
 
+static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
+				 struct mvneta_tx_queue *txq)
+{
 	/* Set minimum bandwidth for disabled TXQs */
 	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
 	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
@@ -2956,6 +2996,13 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
 	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
 }
 
+static void mvneta_txq_deinit(struct mvneta_port *pp,
+			      struct mvneta_tx_queue *txq)
+{
+	mvneta_txq_sw_deinit(pp, txq);
+	mvneta_txq_hw_deinit(pp, txq);
+}
+
 /* Cleanup all Tx queues */
 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
 {
-- 
2.16.3

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 1/2] net: mvneta: split rxq/txq init and txq deinit into SW and HW parts
@ 2018-03-30 10:34   ` Jisheng Zhang
  0 siblings, 0 replies; 8+ messages in thread
From: Jisheng Zhang @ 2018-03-30 10:34 UTC (permalink / raw)
  To: linux-arm-kernel

This is to prepare the suspend/resume improvement in next patch. The
SW parts can be optimized out during resume.

As for rxq handling during suspend, we'd like to drop packets by
calling mvneta_rxq_drop_pkts() which is both SW and HW operation,
so we don't split rxq deinit.

Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
---
 drivers/net/ethernet/marvell/mvneta.c | 85 +++++++++++++++++++++++++++--------
 1 file changed, 66 insertions(+), 19 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 30aab9bf77cc..f96815853108 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -2796,10 +2796,8 @@ static void mvneta_rx_reset(struct mvneta_port *pp)
 
 /* Rx/Tx queue initialization/cleanup methods */
 
-/* Create a specified RX queue */
-static int mvneta_rxq_init(struct mvneta_port *pp,
-			   struct mvneta_rx_queue *rxq)
-
+static int mvneta_rxq_sw_init(struct mvneta_port *pp,
+			      struct mvneta_rx_queue *rxq)
 {
 	rxq->size = pp->rx_ring_size;
 
@@ -2812,6 +2810,12 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
 
 	rxq->last_desc = rxq->size - 1;
 
+	return 0;
+}
+
+static void mvneta_rxq_hw_init(struct mvneta_port *pp,
+			       struct mvneta_rx_queue *rxq)
+{
 	/* Set Rx descriptors queue starting address */
 	mvreg_write(pp, MVNETA_RXQ_BASE_ADDR_REG(rxq->id), rxq->descs_phys);
 	mvreg_write(pp, MVNETA_RXQ_SIZE_REG(rxq->id), rxq->size);
@@ -2835,6 +2839,20 @@ static int mvneta_rxq_init(struct mvneta_port *pp,
 		mvneta_rxq_short_pool_set(pp, rxq);
 		mvneta_rxq_non_occup_desc_add(pp, rxq, rxq->size);
 	}
+}
+
+/* Create a specified RX queue */
+static int mvneta_rxq_init(struct mvneta_port *pp,
+			   struct mvneta_rx_queue *rxq)
+
+{
+	int ret;
+
+	ret = mvneta_rxq_sw_init(pp, rxq);
+	if (ret < 0)
+		return ret;
+
+	mvneta_rxq_hw_init(pp, rxq);
 
 	return 0;
 }
@@ -2857,9 +2875,8 @@ static void mvneta_rxq_deinit(struct mvneta_port *pp,
 	rxq->descs_phys        = 0;
 }
 
-/* Create and initialize a tx queue */
-static int mvneta_txq_init(struct mvneta_port *pp,
-			   struct mvneta_tx_queue *txq)
+static int mvneta_txq_sw_init(struct mvneta_port *pp,
+			      struct mvneta_tx_queue *txq)
 {
 	int cpu;
 
@@ -2872,7 +2889,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
 	txq->tx_stop_threshold = txq->size - MVNETA_MAX_SKB_DESCS;
 	txq->tx_wake_threshold = txq->tx_stop_threshold / 2;
 
-
 	/* Allocate memory for TX descriptors */
 	txq->descs = dma_alloc_coherent(pp->dev->dev.parent,
 					txq->size * MVNETA_DESC_ALIGNED_SIZE,
@@ -2882,14 +2898,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
 
 	txq->last_desc = txq->size - 1;
 
-	/* Set maximum bandwidth for enabled TXQs */
-	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
-	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
-
-	/* Set Tx descriptors queue starting address */
-	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
-	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
-
 	txq->tx_skb = kmalloc_array(txq->size, sizeof(*txq->tx_skb),
 				    GFP_KERNEL);
 	if (!txq->tx_skb) {
@@ -2910,7 +2918,6 @@ static int mvneta_txq_init(struct mvneta_port *pp,
 				  txq->descs, txq->descs_phys);
 		return -ENOMEM;
 	}
-	mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
 
 	/* Setup XPS mapping */
 	if (txq_number > 1)
@@ -2923,9 +2930,38 @@ static int mvneta_txq_init(struct mvneta_port *pp,
 	return 0;
 }
 
+static void mvneta_txq_hw_init(struct mvneta_port *pp,
+			       struct mvneta_tx_queue *txq)
+{
+	/* Set maximum bandwidth for enabled TXQs */
+	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0x03ffffff);
+	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0x3fffffff);
+
+	/* Set Tx descriptors queue starting address */
+	mvreg_write(pp, MVNETA_TXQ_BASE_ADDR_REG(txq->id), txq->descs_phys);
+	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), txq->size);
+
+	mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal);
+}
+
+/* Create and initialize a tx queue */
+static int mvneta_txq_init(struct mvneta_port *pp,
+			   struct mvneta_tx_queue *txq)
+{
+	int ret;
+
+	ret = mvneta_txq_sw_init(pp, txq);
+	if (ret < 0)
+		return ret;
+
+	mvneta_txq_hw_init(pp, txq);
+
+	return 0;
+}
+
 /* Free allocated resources when mvneta_txq_init() fails to allocate memory*/
-static void mvneta_txq_deinit(struct mvneta_port *pp,
-			      struct mvneta_tx_queue *txq)
+static void mvneta_txq_sw_deinit(struct mvneta_port *pp,
+				 struct mvneta_tx_queue *txq)
 {
 	struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
 
@@ -2946,7 +2982,11 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
 	txq->last_desc         = 0;
 	txq->next_desc_to_proc = 0;
 	txq->descs_phys        = 0;
+}
 
+static void mvneta_txq_hw_deinit(struct mvneta_port *pp,
+				 struct mvneta_tx_queue *txq)
+{
 	/* Set minimum bandwidth for disabled TXQs */
 	mvreg_write(pp, MVETH_TXQ_TOKEN_CFG_REG(txq->id), 0);
 	mvreg_write(pp, MVETH_TXQ_TOKEN_COUNT_REG(txq->id), 0);
@@ -2956,6 +2996,13 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
 	mvreg_write(pp, MVNETA_TXQ_SIZE_REG(txq->id), 0);
 }
 
+static void mvneta_txq_deinit(struct mvneta_port *pp,
+			      struct mvneta_tx_queue *txq)
+{
+	mvneta_txq_sw_deinit(pp, txq);
+	mvneta_txq_hw_deinit(pp, txq);
+}
+
 /* Cleanup all Tx queues */
 static void mvneta_cleanup_txqs(struct mvneta_port *pp)
 {
-- 
2.16.3

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 2/2] net: mvneta: improve suspend/resume
  2018-03-30 10:33 ` Jisheng Zhang
@ 2018-03-30 10:36   ` Jisheng Zhang
  -1 siblings, 0 replies; 8+ messages in thread
From: Jisheng Zhang @ 2018-03-30 10:36 UTC (permalink / raw)
  To: David Miller, Thomas Petazzoni; +Cc: netdev, linux-arm-kernel, linux-kernel

Current suspend/resume implementation reuses the mvneta_open() and
mvneta_close(), but it could be optimized to take only necessary
actions during suspend/resume.

One obvious problem of current implementation is: after hundreds of
system suspend/resume cycles, the resume of mvneta could fail due to
fragmented dma coherent memory. After this patch, the non-necessary
memory alloc/free is optimized out.

Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
---
 drivers/net/ethernet/marvell/mvneta.c | 71 ++++++++++++++++++++++++++++++-----
 1 file changed, 61 insertions(+), 10 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index f96815853108..cb7fce99ed6d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4586,16 +4586,43 @@ static int mvneta_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int mvneta_suspend(struct device *device)
 {
+	int queue;
 	struct net_device *dev = dev_get_drvdata(device);
 	struct mvneta_port *pp = netdev_priv(dev);
 
-	rtnl_lock();
-	if (netif_running(dev))
-		mvneta_stop(dev);
-	rtnl_unlock();
+	if (!netif_running(dev))
+		goto clean_exit;
+
+	if (!pp->neta_armada3700) {
+		spin_lock(&pp->lock);
+		pp->is_stopped = true;
+		spin_unlock(&pp->lock);
+
+		cpuhp_state_remove_instance_nocalls(online_hpstate,
+						    &pp->node_online);
+		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+						    &pp->node_dead);
+	}
+
+	mvneta_stop_dev(pp);
+
+	for (queue = 0; queue < rxq_number; queue++) {
+		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+
+		mvneta_rxq_drop_pkts(pp, rxq);
+	}
+
+	for (queue = 0; queue < txq_number; queue++) {
+		struct mvneta_tx_queue *txq = &pp->txqs[queue];
+
+		mvneta_txq_hw_deinit(pp, txq);
+	}
+
+clean_exit:
 	netif_device_detach(dev);
 	clk_disable_unprepare(pp->clk_bus);
 	clk_disable_unprepare(pp->clk);
+
 	return 0;
 }
 
@@ -4604,7 +4631,7 @@ static int mvneta_resume(struct device *device)
 	struct platform_device *pdev = to_platform_device(device);
 	struct net_device *dev = dev_get_drvdata(device);
 	struct mvneta_port *pp = netdev_priv(dev);
-	int err;
+	int err, queue;
 
 	clk_prepare_enable(pp->clk);
 	if (!IS_ERR(pp->clk_bus))
@@ -4626,12 +4653,36 @@ static int mvneta_resume(struct device *device)
 	}
 
 	netif_device_attach(dev);
-	rtnl_lock();
-	if (netif_running(dev)) {
-		mvneta_open(dev);
-		mvneta_set_rx_mode(dev);
+
+	if (!netif_running(dev))
+		return 0;
+
+	for (queue = 0; queue < rxq_number; queue++) {
+		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+
+		rxq->next_desc_to_proc = 0;
+		mvneta_rxq_hw_init(pp, rxq);
 	}
-	rtnl_unlock();
+
+	for (queue = 0; queue < txq_number; queue++) {
+		struct mvneta_tx_queue *txq = &pp->txqs[queue];
+
+		txq->next_desc_to_proc = 0;
+		mvneta_txq_hw_init(pp, txq);
+	}
+
+	if (!pp->neta_armada3700) {
+		spin_lock(&pp->lock);
+		pp->is_stopped = false;
+		spin_unlock(&pp->lock);
+		cpuhp_state_add_instance_nocalls(online_hpstate,
+						 &pp->node_online);
+		cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+						 &pp->node_dead);
+	}
+
+	mvneta_start_dev(pp);
+	mvneta_set_rx_mode(dev);
 
 	return 0;
 }
-- 
2.16.3

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* [PATCH v2 2/2] net: mvneta: improve suspend/resume
@ 2018-03-30 10:36   ` Jisheng Zhang
  0 siblings, 0 replies; 8+ messages in thread
From: Jisheng Zhang @ 2018-03-30 10:36 UTC (permalink / raw)
  To: linux-arm-kernel

Current suspend/resume implementation reuses the mvneta_open() and
mvneta_close(), but it could be optimized to take only necessary
actions during suspend/resume.

One obvious problem of current implementation is: after hundreds of
system suspend/resume cycles, the resume of mvneta could fail due to
fragmented dma coherent memory. After this patch, the non-necessary
memory alloc/free is optimized out.

Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
---
 drivers/net/ethernet/marvell/mvneta.c | 71 ++++++++++++++++++++++++++++++-----
 1 file changed, 61 insertions(+), 10 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index f96815853108..cb7fce99ed6d 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4586,16 +4586,43 @@ static int mvneta_remove(struct platform_device *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int mvneta_suspend(struct device *device)
 {
+	int queue;
 	struct net_device *dev = dev_get_drvdata(device);
 	struct mvneta_port *pp = netdev_priv(dev);
 
-	rtnl_lock();
-	if (netif_running(dev))
-		mvneta_stop(dev);
-	rtnl_unlock();
+	if (!netif_running(dev))
+		goto clean_exit;
+
+	if (!pp->neta_armada3700) {
+		spin_lock(&pp->lock);
+		pp->is_stopped = true;
+		spin_unlock(&pp->lock);
+
+		cpuhp_state_remove_instance_nocalls(online_hpstate,
+						    &pp->node_online);
+		cpuhp_state_remove_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+						    &pp->node_dead);
+	}
+
+	mvneta_stop_dev(pp);
+
+	for (queue = 0; queue < rxq_number; queue++) {
+		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+
+		mvneta_rxq_drop_pkts(pp, rxq);
+	}
+
+	for (queue = 0; queue < txq_number; queue++) {
+		struct mvneta_tx_queue *txq = &pp->txqs[queue];
+
+		mvneta_txq_hw_deinit(pp, txq);
+	}
+
+clean_exit:
 	netif_device_detach(dev);
 	clk_disable_unprepare(pp->clk_bus);
 	clk_disable_unprepare(pp->clk);
+
 	return 0;
 }
 
@@ -4604,7 +4631,7 @@ static int mvneta_resume(struct device *device)
 	struct platform_device *pdev = to_platform_device(device);
 	struct net_device *dev = dev_get_drvdata(device);
 	struct mvneta_port *pp = netdev_priv(dev);
-	int err;
+	int err, queue;
 
 	clk_prepare_enable(pp->clk);
 	if (!IS_ERR(pp->clk_bus))
@@ -4626,12 +4653,36 @@ static int mvneta_resume(struct device *device)
 	}
 
 	netif_device_attach(dev);
-	rtnl_lock();
-	if (netif_running(dev)) {
-		mvneta_open(dev);
-		mvneta_set_rx_mode(dev);
+
+	if (!netif_running(dev))
+		return 0;
+
+	for (queue = 0; queue < rxq_number; queue++) {
+		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
+
+		rxq->next_desc_to_proc = 0;
+		mvneta_rxq_hw_init(pp, rxq);
 	}
-	rtnl_unlock();
+
+	for (queue = 0; queue < txq_number; queue++) {
+		struct mvneta_tx_queue *txq = &pp->txqs[queue];
+
+		txq->next_desc_to_proc = 0;
+		mvneta_txq_hw_init(pp, txq);
+	}
+
+	if (!pp->neta_armada3700) {
+		spin_lock(&pp->lock);
+		pp->is_stopped = false;
+		spin_unlock(&pp->lock);
+		cpuhp_state_add_instance_nocalls(online_hpstate,
+						 &pp->node_online);
+		cpuhp_state_add_instance_nocalls(CPUHP_NET_MVNETA_DEAD,
+						 &pp->node_dead);
+	}
+
+	mvneta_start_dev(pp);
+	mvneta_set_rx_mode(dev);
 
 	return 0;
 }
-- 
2.16.3

^ permalink raw reply related	[flat|nested] 8+ messages in thread

* Re: [PATCH v2 2/2] net: mvneta: improve suspend/resume
  2018-03-30 10:36   ` Jisheng Zhang
@ 2018-03-30 10:43     ` Russell King - ARM Linux
  -1 siblings, 0 replies; 8+ messages in thread
From: Russell King - ARM Linux @ 2018-03-30 10:43 UTC (permalink / raw)
  To: Jisheng Zhang
  Cc: David Miller, Thomas Petazzoni, netdev, linux-kernel, linux-arm-kernel

On Fri, Mar 30, 2018 at 06:36:15PM +0800, Jisheng Zhang wrote:
> Current suspend/resume implementation reuses the mvneta_open() and
> mvneta_close(), but it could be optimized to take only necessary
> actions during suspend/resume.
> 
> One obvious problem of current implementation is: after hundreds of
> system suspend/resume cycles, the resume of mvneta could fail due to
> fragmented dma coherent memory. After this patch, the non-necessary
> memory alloc/free is optimized out.

I don't think you've properly tested this.  Please ensure that you test
patches with the appropriate debug options enabled.

> @@ -4586,16 +4586,43 @@ static int mvneta_remove(struct platform_device *pdev)
>  #ifdef CONFIG_PM_SLEEP
>  static int mvneta_suspend(struct device *device)
>  {
> +	int queue;
>  	struct net_device *dev = dev_get_drvdata(device);
>  	struct mvneta_port *pp = netdev_priv(dev);
>  
> -	rtnl_lock();
> -	if (netif_running(dev))
> -		mvneta_stop(dev);
> -	rtnl_unlock();
...
> +	mvneta_stop_dev(pp);

You're removing the rtnl_lock() that I introduced in 3b8bc67413de
("net: mvneta: ensure PM paths take the rtnl lock") which is necessary
to provide phylink with consistent locking.  mvneta_stop_dev() calls
phylink_stop() which will check that the rtnl lock is held, and will
print a warning if it isn't.

Your patch will cause a regression here.

> +
> +	for (queue = 0; queue < rxq_number; queue++) {
> +		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
> +
> +		mvneta_rxq_drop_pkts(pp, rxq);
> +	}
> +
> +	for (queue = 0; queue < txq_number; queue++) {
> +		struct mvneta_tx_queue *txq = &pp->txqs[queue];
> +
> +		mvneta_txq_hw_deinit(pp, txq);
> +	}
> +
> +clean_exit:
>  	netif_device_detach(dev);
>  	clk_disable_unprepare(pp->clk_bus);
>  	clk_disable_unprepare(pp->clk);
> +
>  	return 0;
>  }
>  
> @@ -4604,7 +4631,7 @@ static int mvneta_resume(struct device *device)
>  	struct platform_device *pdev = to_platform_device(device);
>  	struct net_device *dev = dev_get_drvdata(device);
>  	struct mvneta_port *pp = netdev_priv(dev);
> -	int err;
> +	int err, queue;
>  
>  	clk_prepare_enable(pp->clk);
>  	if (!IS_ERR(pp->clk_bus))
> @@ -4626,12 +4653,36 @@ static int mvneta_resume(struct device *device)
>  	}
>  
>  	netif_device_attach(dev);
> -	rtnl_lock();
> -	if (netif_running(dev)) {
> -		mvneta_open(dev);
> -		mvneta_set_rx_mode(dev);
...
>  	}
> -	rtnl_unlock();
...
> +	mvneta_start_dev(pp);

Same applies here.

-- 
RMK's Patch system: http://www.armlinux.org.uk/developer/patches/
FTTC broadband for 0.8mile line in suburbia: sync at 8.8Mbps down 630kbps up
According to speedtest.net: 8.21Mbps down 510kbps up

^ permalink raw reply	[flat|nested] 8+ messages in thread

* [PATCH v2 2/2] net: mvneta: improve suspend/resume
@ 2018-03-30 10:43     ` Russell King - ARM Linux
  0 siblings, 0 replies; 8+ messages in thread
From: Russell King - ARM Linux @ 2018-03-30 10:43 UTC (permalink / raw)
  To: linux-arm-kernel

On Fri, Mar 30, 2018 at 06:36:15PM +0800, Jisheng Zhang wrote:
> Current suspend/resume implementation reuses the mvneta_open() and
> mvneta_close(), but it could be optimized to take only necessary
> actions during suspend/resume.
> 
> One obvious problem of current implementation is: after hundreds of
> system suspend/resume cycles, the resume of mvneta could fail due to
> fragmented dma coherent memory. After this patch, the non-necessary
> memory alloc/free is optimized out.

I don't think you've properly tested this.  Please ensure that you test
patches with the appropriate debug options enabled.

> @@ -4586,16 +4586,43 @@ static int mvneta_remove(struct platform_device *pdev)
>  #ifdef CONFIG_PM_SLEEP
>  static int mvneta_suspend(struct device *device)
>  {
> +	int queue;
>  	struct net_device *dev = dev_get_drvdata(device);
>  	struct mvneta_port *pp = netdev_priv(dev);
>  
> -	rtnl_lock();
> -	if (netif_running(dev))
> -		mvneta_stop(dev);
> -	rtnl_unlock();
...
> +	mvneta_stop_dev(pp);

You're removing the rtnl_lock() that I introduced in 3b8bc67413de
("net: mvneta: ensure PM paths take the rtnl lock") which is necessary
to provide phylink with consistent locking.  mvneta_stop_dev() calls
phylink_stop() which will check that the rtnl lock is held, and will
print a warning if it isn't.

Your patch will cause a regression here.

> +
> +	for (queue = 0; queue < rxq_number; queue++) {
> +		struct mvneta_rx_queue *rxq = &pp->rxqs[queue];
> +
> +		mvneta_rxq_drop_pkts(pp, rxq);
> +	}
> +
> +	for (queue = 0; queue < txq_number; queue++) {
> +		struct mvneta_tx_queue *txq = &pp->txqs[queue];
> +
> +		mvneta_txq_hw_deinit(pp, txq);
> +	}
> +
> +clean_exit:
>  	netif_device_detach(dev);
>  	clk_disable_unprepare(pp->clk_bus);
>  	clk_disable_unprepare(pp->clk);
> +
>  	return 0;
>  }
>  
> @@ -4604,7 +4631,7 @@ static int mvneta_resume(struct device *device)
>  	struct platform_device *pdev = to_platform_device(device);
>  	struct net_device *dev = dev_get_drvdata(device);
>  	struct mvneta_port *pp = netdev_priv(dev);
> -	int err;
> +	int err, queue;
>  
>  	clk_prepare_enable(pp->clk);
>  	if (!IS_ERR(pp->clk_bus))
> @@ -4626,12 +4653,36 @@ static int mvneta_resume(struct device *device)
>  	}
>  
>  	netif_device_attach(dev);
> -	rtnl_lock();
> -	if (netif_running(dev)) {
> -		mvneta_open(dev);
> -		mvneta_set_rx_mode(dev);
...
>  	}
> -	rtnl_unlock();
...
> +	mvneta_start_dev(pp);

Same applies here.

-- 
RMK's Patch system: http://www.armlinux.org.uk/developer/patches/
FTTC broadband for 0.8mile line in suburbia: sync at 8.8Mbps down 630kbps up
According to speedtest.net: 8.21Mbps down 510kbps up

^ permalink raw reply	[flat|nested] 8+ messages in thread

end of thread, other threads:[~2018-03-30 10:43 UTC | newest]

Thread overview: 8+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-03-30 10:33 [PATCH v2 0/2] net: mvneta: improve suspend/resume Jisheng Zhang
2018-03-30 10:33 ` Jisheng Zhang
2018-03-30 10:34 ` [PATCH v2 1/2] net: mvneta: split rxq/txq init and txq deinit into SW and HW parts Jisheng Zhang
2018-03-30 10:34   ` Jisheng Zhang
2018-03-30 10:36 ` [PATCH v2 2/2] net: mvneta: improve suspend/resume Jisheng Zhang
2018-03-30 10:36   ` Jisheng Zhang
2018-03-30 10:43   ` Russell King - ARM Linux
2018-03-30 10:43     ` Russell King - ARM Linux

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.