linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Gregory CLEMENT <gregory.clement@bootlin.com>
To: "David S. Miller" <davem@davemloft.net>,
	linux-kernel@vger.kernel.org, netdev@vger.kernel.org
Cc: "Thomas Petazzoni" <thomas.petazzoni@bootlin.com>,
	linux-arm-kernel@lists.infradead.org,
	"Jason Cooper" <jason@lakedaemon.net>,
	"Andrew Lunn" <andrew@lunn.ch>,
	"Sebastian Hesselbarth" <sebastian.hesselbarth@gmail.com>,
	"Gregory CLEMENT" <gregory.clement@bootlin.com>,
	"Yelena Krivosheev" <yelena@marvell.com>,
	"Nadav Haklai" <nadavh@marvell.com>,
	"Marcin Wojtas" <mw@semihalf.com>,
	"Dmitri Epshtein" <dima@marvell.com>,
	"Antoine Tenart" <antoine.tenart@bootlin.com>,
	"Miquèl Raynal" <miquel.raynal@bootlin.com>,
	"Maxime Chevallier" <maxime.chevallier@bootlin.com>
Subject: [PATCH net-next 1/6] net: mvneta: remove data pointer usage from device_node structure
Date: Fri,  6 Jul 2018 15:19:44 +0200	[thread overview]
Message-ID: <20180706131949.2684-2-gregory.clement@bootlin.com> (raw)
In-Reply-To: <20180706131949.2684-1-gregory.clement@bootlin.com>

On year ago Rob Herring wanted to remove the data pointer from the
device_node structure[1]. The mvneta driver seemed to be the only one
which used (abused ?) it. However, the proposal of Rob to remove this
pointer from the driver introduced a regression, and I tested and fixed an
alternative way, but it was never submitted as a proper patch.

Now here it is: Instead of using the device_node structure ->data
pointer, we store the BM private data as the driver data of the BM
platform_device. The core mvneta code can retrieve it by doing a lookup
on which platform_device corresponds to the BM device tree node using
of_find_device_by_node(), and get its driver data

[1]https://www.spinics.net/lists/netdev/msg445197.html

Signed-off-by: Gregory CLEMENT <gregory.clement@bootlin.com>
---
 drivers/net/ethernet/marvell/mvneta.c    | 18 ++++++++++++------
 drivers/net/ethernet/marvell/mvneta_bm.c | 13 +++++++++++++
 drivers/net/ethernet/marvell/mvneta_bm.h |  5 +++++
 3 files changed, 30 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 0ad2f3f7da85..26d68add184f 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -4460,12 +4460,16 @@ static int mvneta_probe(struct platform_device *pdev)
 
 	/* Obtain access to BM resources if enabled and already initialized */
 	bm_node = of_parse_phandle(dn, "buffer-manager", 0);
-	if (bm_node && bm_node->data) {
-		pp->bm_priv = bm_node->data;
-		err = mvneta_bm_port_init(pdev, pp);
-		if (err < 0) {
-			dev_info(&pdev->dev, "use SW buffer management\n");
-			pp->bm_priv = NULL;
+	if (bm_node) {
+		pp->bm_priv = mvneta_bm_get(bm_node);
+		if (pp->bm_priv) {
+			err = mvneta_bm_port_init(pdev, pp);
+			if (err < 0) {
+				dev_info(&pdev->dev,
+					 "use SW buffer management\n");
+				mvneta_bm_put(pp->bm_priv);
+				pp->bm_priv = NULL;
+			}
 		}
 	}
 	of_node_put(bm_node);
@@ -4526,6 +4530,7 @@ static int mvneta_probe(struct platform_device *pdev)
 		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
 		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
 				       1 << pp->id);
+		mvneta_bm_put(pp->bm_priv);
 	}
 err_free_stats:
 	free_percpu(pp->stats);
@@ -4563,6 +4568,7 @@ static int mvneta_remove(struct platform_device *pdev)
 		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id);
 		mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short,
 				       1 << pp->id);
+		mvneta_bm_put(pp->bm_priv);
 	}
 
 	return 0;
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 466939f8f0cf..01e3152e76c8 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/of.h>
+#include <linux/of_platform.h>
 #include <linux/platform_device.h>
 #include <linux/skbuff.h>
 #include <net/hwbm.h>
@@ -392,6 +393,18 @@ static void mvneta_bm_put_sram(struct mvneta_bm *priv)
 		      MVNETA_BM_BPPI_SIZE);
 }
 
+struct mvneta_bm *mvneta_bm_get(struct device_node *node)
+{
+	struct platform_device *pdev = of_find_device_by_node(node);
+
+	return pdev ? platform_get_drvdata(pdev) : NULL;
+}
+
+void mvneta_bm_put(struct mvneta_bm *priv)
+{
+	platform_device_put(priv->pdev);
+}
+
 static int mvneta_bm_probe(struct platform_device *pdev)
 {
 	struct device_node *dn = pdev->dev.of_node;
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
index a32de432800c..9358626e51ec 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.h
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -134,6 +134,9 @@ void *mvneta_frag_alloc(unsigned int frag_size);
 void mvneta_frag_free(unsigned int frag_size, void *data);
 
 #if IS_ENABLED(CONFIG_MVNETA_BM)
+struct mvneta_bm *mvneta_bm_get(struct device_node *node);
+void mvneta_bm_put(struct mvneta_bm *priv);
+
 void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
 			    struct mvneta_bm_pool *bm_pool, u8 port_map);
 void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
@@ -178,5 +181,7 @@ static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv,
 static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv,
 					struct mvneta_bm_pool *bm_pool)
 { return 0; }
+struct mvneta_bm *mvneta_bm_get(struct device_node *node) { return NULL; }
+void mvneta_bm_put(struct mvneta_bm *priv) {}
 #endif /* CONFIG_MVNETA_BM */
 #endif
-- 
2.17.1


  reply	other threads:[~2018-07-06 13:20 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-07-06 13:19 [PATCH net-next 0/6] Few improvements on mvneta Gregory CLEMENT
2018-07-06 13:19 ` Gregory CLEMENT [this message]
2018-07-07 10:45   ` [PATCH net-next 1/6] net: mvneta: remove data pointer usage from device_node structure kbuild test robot
2018-07-06 13:19 ` [PATCH net-next 2/6] net: mvneta: increase number of buffers in RX and TX queue Gregory CLEMENT
2018-07-06 13:19 ` [PATCH net-next 3/6] net: mvneta: discriminate error cause for missed packet Gregory CLEMENT
2018-07-07  2:09   ` David Miller
2018-07-06 13:19 ` [PATCH net-next 4/6] net: mvneta: Allocate page for the descriptor Gregory CLEMENT
2018-07-06 13:19 ` [PATCH net-next 5/6] net: mvneta: Verify hardware checksum only when offload checksum feature is set Gregory CLEMENT
2018-07-07 14:58   ` Marcin Wojtas
2018-07-06 13:19 ` [PATCH net-next 6/6] net: mvneta: Improve the buffer allocation method for SWBM Gregory CLEMENT

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20180706131949.2684-2-gregory.clement@bootlin.com \
    --to=gregory.clement@bootlin.com \
    --cc=andrew@lunn.ch \
    --cc=antoine.tenart@bootlin.com \
    --cc=davem@davemloft.net \
    --cc=dima@marvell.com \
    --cc=jason@lakedaemon.net \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maxime.chevallier@bootlin.com \
    --cc=miquel.raynal@bootlin.com \
    --cc=mw@semihalf.com \
    --cc=nadavh@marvell.com \
    --cc=netdev@vger.kernel.org \
    --cc=sebastian.hesselbarth@gmail.com \
    --cc=thomas.petazzoni@bootlin.com \
    --cc=yelena@marvell.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).