linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Gregory CLEMENT <gregory.clement@free-electrons.com>
To: "David S. Miller" <davem@davemloft.net>,
	linux-kernel@vger.kernel.org, netdev@vger.kernel.org,
	Thomas Petazzoni <thomas.petazzoni@free-electrons.com>,
	Florian Fainelli <f.fainelli@gmail.com>
Cc: Jason Cooper <jason@lakedaemon.net>, Andrew Lunn <andrew@lunn.ch>,
	Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>,
	Gregory CLEMENT <gregory.clement@free-electrons.com>,
	linux-arm-kernel@lists.infradead.org,
	Lior Amsalem <alior@marvell.com>,
	Nadav Haklai <nadavh@marvell.com>,
	Marcin Wojtas <mw@semihalf.com>,
	Simon Guinot <simon.guinot@sequanux.org>,
	Russell King - ARM Linux <linux@arm.linux.org.uk>,
	Willy Tarreau <w@1wt.eu>, Timor Kardashov <timork@marvell.com>,
	Dmitri Epshtein <dima@marvell.com>,
	Sebastian Careba <nitroshift@yahoo.com>
Subject: [PATCH v5 net-next 10/10] net: mvneta: Use the new hwbm framework
Date: Thu, 10 Mar 2016 11:09:18 +0100	[thread overview]
Message-ID: <1457604558-1121-11-git-send-email-gregory.clement@free-electrons.com> (raw)
In-Reply-To: <1457604558-1121-1-git-send-email-gregory.clement@free-electrons.com>

Now that the hardware buffer management framework had been introduced,
let's use it.

Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com>
---
 drivers/net/ethernet/marvell/Kconfig     |   1 +
 drivers/net/ethernet/marvell/mvneta.c    |  18 +++--
 drivers/net/ethernet/marvell/mvneta_bm.c | 125 ++++++++-----------------------
 drivers/net/ethernet/marvell/mvneta_bm.h |  17 ++---
 4 files changed, 49 insertions(+), 112 deletions(-)

diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index ac6605c62f46..62d80fddbe34 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -43,6 +43,7 @@ config MVMDIO
 config MVNETA_BM
 	tristate "Marvell Armada 38x/XP network interface BM support"
 	depends on MVNETA
+	select HWBM
 	---help---
 	  This driver supports auxiliary block of the network
 	  interface units in the Marvell ARMADA XP and ARMADA 38x SoC
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index ade5b0b961d3..daf94a82c9f5 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -30,6 +30,7 @@
 #include <linux/phy.h>
 #include <linux/platform_device.h>
 #include <linux/skbuff.h>
+#include <net/hwbm.h>
 #include "mvneta_bm.h"
 #include <net/ip.h>
 #include <net/ipv6.h>
@@ -1021,11 +1022,12 @@ static int mvneta_bm_port_init(struct platform_device *pdev,
 static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
 {
 	struct mvneta_bm_pool *bm_pool = pp->pool_long;
+	struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
 	int num;
 
 	/* Release all buffers from long pool */
 	mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id);
-	if (bm_pool->buf_num) {
+	if (hwbm_pool->buf_num) {
 		WARN(1, "cannot free all buffers in pool %d\n",
 		     bm_pool->id);
 		goto bm_mtu_err;
@@ -1033,14 +1035,14 @@ static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu)
 
 	bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu);
 	bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size);
-	bm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
-			  SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
+	hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
+			SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size));
 
 	/* Fill entire long pool */
-	num = mvneta_bm_bufs_add(pp->bm_priv, bm_pool, bm_pool->size);
-	if (num != bm_pool->size) {
+	num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
+	if (num != hwbm_pool->size) {
 		WARN(1, "pool %d: %d of %d allocated\n",
-		     bm_pool->id, num, bm_pool->size);
+		     bm_pool->id, num, hwbm_pool->size);
 		goto bm_mtu_err;
 	}
 	mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id);
@@ -2028,14 +2030,14 @@ err_drop_frame:
 		}
 
 		/* Refill processing */
-		err = mvneta_bm_pool_refill(pp->bm_priv, bm_pool);
+		err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC);
 		if (err) {
 			netdev_err(dev, "Linux processing - Can't refill\n");
 			rxq->missed++;
 			goto err_drop_frame_ret_pool;
 		}
 
-		frag_size = bm_pool->frag_size;
+		frag_size = bm_pool->hwbm_pool.frag_size;
 
 		skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size);
 
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.c b/drivers/net/ethernet/marvell/mvneta_bm.c
index 8c968e7d2d8f..01fccec632ec 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.c
+++ b/drivers/net/ethernet/marvell/mvneta_bm.c
@@ -10,16 +10,17 @@
  * warranty of any kind, whether express or implied.
  */
 
-#include <linux/kernel.h>
+#include <linux/clk.h>
 #include <linux/genalloc.h>
-#include <linux/platform_device.h>
-#include <linux/netdevice.h>
-#include <linux/skbuff.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
 #include <linux/mbus.h>
 #include <linux/module.h>
-#include <linux/io.h>
+#include <linux/netdevice.h>
 #include <linux/of.h>
-#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <net/hwbm.h>
 #include "mvneta_bm.h"
 
 #define MVNETA_BM_DRIVER_NAME "mvneta_bm"
@@ -88,17 +89,13 @@ static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id,
 	mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val);
 }
 
-/* Allocate skb for BM pool */
-void *mvneta_buf_alloc(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
-		       dma_addr_t *buf_phys_addr)
+int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf)
 {
-	void *buf;
+	struct mvneta_bm_pool *bm_pool =
+		(struct mvneta_bm_pool *)hwbm_pool->priv;
+	struct mvneta_bm *priv = bm_pool->priv;
 	dma_addr_t phys_addr;
 
-	buf = mvneta_frag_alloc(bm_pool->frag_size);
-	if (!buf)
-		return NULL;
-
 	/* In order to update buf_cookie field of RX descriptor properly,
 	 * BM hardware expects buf virtual address to be placed in the
 	 * first four bytes of mapped buffer.
@@ -106,75 +103,13 @@ void *mvneta_buf_alloc(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
 	*(u32 *)buf = (u32)buf;
 	phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size,
 				   DMA_FROM_DEVICE);
-	if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr))) {
-		mvneta_frag_free(bm_pool->frag_size, buf);
-		return NULL;
-	}
-	*buf_phys_addr = phys_addr;
-
-	return buf;
-}
-
-/* Refill processing for HW buffer management */
-int mvneta_bm_pool_refill(struct mvneta_bm *priv,
-			  struct mvneta_bm_pool *bm_pool)
-{
-	dma_addr_t buf_phys_addr;
-	void *buf;
-
-	buf = mvneta_buf_alloc(priv, bm_pool, &buf_phys_addr);
-	if (!buf)
+	if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr)))
 		return -ENOMEM;
 
-	mvneta_bm_pool_put_bp(priv, bm_pool, buf_phys_addr);
-
+	mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr);
 	return 0;
 }
-EXPORT_SYMBOL_GPL(mvneta_bm_pool_refill);
-
-/* Allocate buffers for the pool */
-int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
-		       int buf_num)
-{
-	int err, i;
-
-	if (bm_pool->buf_num == bm_pool->size) {
-		dev_dbg(&priv->pdev->dev, "pool %d already filled\n",
-			bm_pool->id);
-		return bm_pool->buf_num;
-	}
-
-	if (buf_num < 0 ||
-	    (buf_num + bm_pool->buf_num > bm_pool->size)) {
-		dev_err(&priv->pdev->dev,
-			"cannot allocate %d buffers for pool %d\n",
-			buf_num, bm_pool->id);
-		return 0;
-	}
-
-	for (i = 0; i < buf_num; i++) {
-		err = mvneta_bm_pool_refill(priv, bm_pool);
-		if (err < 0)
-			break;
-	}
-
-	/* Update BM driver with number of buffers added to pool */
-	bm_pool->buf_num += i;
-
-	dev_dbg(&priv->pdev->dev,
-		"%s pool %d: pkt_size=%4d, buf_size=%4d, frag_size=%4d\n",
-		bm_pool->type == MVNETA_BM_SHORT ? "short" : "long",
-		bm_pool->id, bm_pool->pkt_size, bm_pool->buf_size,
-		bm_pool->frag_size);
-
-	dev_dbg(&priv->pdev->dev,
-		"%s pool %d: %d of %d buffers added\n",
-		bm_pool->type == MVNETA_BM_SHORT ? "short" : "long",
-		bm_pool->id, i, buf_num);
-
-	return i;
-}
-EXPORT_SYMBOL_GPL(mvneta_bm_bufs_add);
+EXPORT_SYMBOL_GPL(mvneta_bm_construct);
 
 /* Create pool */
 static int mvneta_bm_pool_create(struct mvneta_bm *priv,
@@ -183,8 +118,7 @@ static int mvneta_bm_pool_create(struct mvneta_bm *priv,
 	struct platform_device *pdev = priv->pdev;
 	u8 target_id, attr;
 	int size_bytes, err;
-
-	size_bytes = sizeof(u32) * bm_pool->size;
+	size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size;
 	bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
 						&bm_pool->phys_addr,
 						GFP_KERNEL);
@@ -245,11 +179,16 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
 
 	/* Allocate buffers in case BM pool hasn't been used yet */
 	if (new_pool->type == MVNETA_BM_FREE) {
+		struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool;
+
+		new_pool->priv = priv;
 		new_pool->type = type;
 		new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size);
-		new_pool->frag_size =
+		hwbm_pool->frag_size =
 			SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) +
 			SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
+		hwbm_pool->construct = mvneta_bm_construct;
+		hwbm_pool->priv = new_pool;
 
 		/* Create new pool */
 		err = mvneta_bm_pool_create(priv, new_pool);
@@ -260,10 +199,10 @@ struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
 		}
 
 		/* Allocate buffers for this pool */
-		num = mvneta_bm_bufs_add(priv, new_pool, new_pool->size);
-		if (num != new_pool->size) {
+		num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC);
+		if (num != hwbm_pool->size) {
 			WARN(1, "pool %d: %d of %d allocated\n",
-			     new_pool->id, num, new_pool->size);
+			     new_pool->id, num, hwbm_pool->size);
 			return NULL;
 		}
 	}
@@ -284,7 +223,7 @@ void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
 
 	mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
 
-	for (i = 0; i < bm_pool->buf_num; i++) {
+	for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) {
 		dma_addr_t buf_phys_addr;
 		u32 *vaddr;
 
@@ -303,13 +242,13 @@ void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
 
 		dma_unmap_single(&priv->pdev->dev, buf_phys_addr,
 				 bm_pool->buf_size, DMA_FROM_DEVICE);
-		mvneta_frag_free(bm_pool->frag_size, vaddr);
+		hwbm_buf_free(&bm_pool->hwbm_pool, vaddr);
 	}
 
 	mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK);
 
 	/* Update BM driver with number of buffers removed from pool */
-	bm_pool->buf_num -= i;
+	bm_pool->hwbm_pool.buf_num -= i;
 }
 EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free);
 
@@ -317,6 +256,7 @@ EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free);
 void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
 			    struct mvneta_bm_pool *bm_pool, u8 port_map)
 {
+	struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool;
 	bm_pool->port_map &= ~port_map;
 	if (bm_pool->port_map)
 		return;
@@ -324,11 +264,12 @@ void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
 	bm_pool->type = MVNETA_BM_FREE;
 
 	mvneta_bm_bufs_free(priv, bm_pool, port_map);
-	if (bm_pool->buf_num)
+	if (hwbm_pool->buf_num)
 		WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
 
 	if (bm_pool->virt_addr) {
-		dma_free_coherent(&priv->pdev->dev, sizeof(u32) * bm_pool->size,
+		dma_free_coherent(&priv->pdev->dev,
+				  sizeof(u32) * hwbm_pool->size,
 				  bm_pool->virt_addr, bm_pool->phys_addr);
 		bm_pool->virt_addr = NULL;
 	}
@@ -381,10 +322,10 @@ static void mvneta_bm_pools_init(struct mvneta_bm *priv)
 				 MVNETA_BM_POOL_CAP_ALIGN));
 			size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN);
 		}
-		bm_pool->size = size;
+		bm_pool->hwbm_pool.size = size;
 
 		mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i),
-				bm_pool->size);
+				bm_pool->hwbm_pool.size);
 
 		/* Obtain custom pkt_size from DT */
 		sprintf(prop, "pool%d,pkt-size", i);
diff --git a/drivers/net/ethernet/marvell/mvneta_bm.h b/drivers/net/ethernet/marvell/mvneta_bm.h
index db239e061ab0..e74fd44a92f7 100644
--- a/drivers/net/ethernet/marvell/mvneta_bm.h
+++ b/drivers/net/ethernet/marvell/mvneta_bm.h
@@ -108,20 +108,15 @@ struct mvneta_bm {
 };
 
 struct mvneta_bm_pool {
+	struct hwbm_pool hwbm_pool;
 	/* Pool number in the range 0-3 */
 	u8 id;
 	enum mvneta_bm_type type;
 
-	/* Buffer Pointers Pool External (BPPE) size in number of bytes */
-	int size;
-	/* Number of buffers used by this pool */
-	int buf_num;
-	/* Pool buffer size */
-	int buf_size;
 	/* Packet size */
 	int pkt_size;
-	/* Single frag size */
-	u32 frag_size;
+	/* Size of the buffer acces through DMA*/
+	u32 buf_size;
 
 	/* BPPE virtual base address */
 	u32 *virt_addr;
@@ -143,8 +138,7 @@ void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
 			    struct mvneta_bm_pool *bm_pool, u8 port_map);
 void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
 			 u8 port_map);
-int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
-		       int buf_num);
+int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf);
 int mvneta_bm_pool_refill(struct mvneta_bm *priv,
 			  struct mvneta_bm_pool *bm_pool);
 struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
@@ -170,8 +164,7 @@ void mvneta_bm_pool_destroy(struct mvneta_bm *priv,
 			    struct mvneta_bm_pool *bm_pool, u8 port_map) {}
 void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
 			 u8 port_map) {}
-int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool,
-		       int buf_num) { return 0; }
+int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { return 0; }
 int mvneta_bm_pool_refill(struct mvneta_bm *priv,
 			  struct mvneta_bm_pool *bm_pool) {return 0; }
 struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id,
-- 
2.5.0

  parent reply	other threads:[~2016-03-10 10:12 UTC|newest]

Thread overview: 12+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-03-10 10:09 [PATCH v5 net-next 00/10] API set for HW Buffer management Gregory CLEMENT
2016-03-10 10:09 ` [PATCH v5 net-next 01/10] misc: sram: add optional ioremap without write combining Gregory CLEMENT
2016-03-10 10:09 ` [PATCH v5 net-next 02/10] ARM: dts: armada-38x: add buffer manager nodes Gregory CLEMENT
2016-03-10 10:09 ` [PATCH v5 net-next 03/10] ARM: dts: armada-38x: enable buffer manager support on Armada 38x boards Gregory CLEMENT
2016-03-10 10:09 ` [PATCH v5 net-next 04/10] ARM: dts: armada-xp: add buffer manager nodes Gregory CLEMENT
2016-03-10 10:09 ` [PATCH v5 net-next 05/10] ARM: dts: armada-xp: enable buffer manager support on Armada XP boards Gregory CLEMENT
2016-03-10 10:09 ` [PATCH v5 net-next 06/10] ARM: dts: armada-xp-openblocks-ax3-4: Add BM support Gregory CLEMENT
2016-03-10 10:09 ` [PATCH v5 net-next 07/10] bus: mvebu-mbus: provide api for obtaining IO and DRAM window information Gregory CLEMENT
2016-03-10 10:09 ` [PATCH v5 net-next 08/10] net: mvneta: bm: add support for hardware buffer management Gregory CLEMENT
2016-03-10 10:09 ` [PATCH v5 net-next 09/10] net: add a hardware buffer management helper API Gregory CLEMENT
2016-03-10 10:09 ` Gregory CLEMENT [this message]
2016-03-14  2:02 ` [PATCH v5 net-next 00/10] API set for HW Buffer management David Miller

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1457604558-1121-11-git-send-email-gregory.clement@free-electrons.com \
    --to=gregory.clement@free-electrons.com \
    --cc=alior@marvell.com \
    --cc=andrew@lunn.ch \
    --cc=davem@davemloft.net \
    --cc=dima@marvell.com \
    --cc=f.fainelli@gmail.com \
    --cc=jason@lakedaemon.net \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux@arm.linux.org.uk \
    --cc=mw@semihalf.com \
    --cc=nadavh@marvell.com \
    --cc=netdev@vger.kernel.org \
    --cc=nitroshift@yahoo.com \
    --cc=sebastian.hesselbarth@gmail.com \
    --cc=simon.guinot@sequanux.org \
    --cc=thomas.petazzoni@free-electrons.com \
    --cc=timork@marvell.com \
    --cc=w@1wt.eu \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).