All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH v3 0/5] net: ethernet: ti: cpsw: add cpdma multi-queue support
@ 2016-08-15 23:25 Ivan Khoronzhuk
  2016-08-15 23:25 ` [PATCH v3 1/5] net: ethernet: ti: davinci_cpdma: split descs num between all channels Ivan Khoronzhuk
                   ` (4 more replies)
  0 siblings, 5 replies; 13+ messages in thread
From: Ivan Khoronzhuk @ 2016-08-15 23:25 UTC (permalink / raw)
  To: davem, netdev, mugunthanvnm, grygorii.strashko
  Cc: linux-kernel, linux-omap, nsekhar, Ivan Khoronzhuk

This series is intended to allow cpsw driver to use cpdma ability of
h/w shaper to send/receive data with up to 8 tx and 8 rx queues. This
series doesn't contain interface to configure h/w shaper itself, it
contains only multi-queue support part and ability to configure number
of tx/rx queues with ethtool, it also doesn't contain mapping of input
traffic to rx queues, as it can depend on usage and requires separate
interface for setup.

Default shaper mode - priority mode. The h/w shaper configuration will
be added with separate patch series. This series doesn't affect on net
throughput.

Tested on:
am572x-idk, 1Gbps link
am335-boneblack, 100Mbps link.

A simple example for splitting traffic on queues:

#check how many queues are supported and active:
$ ethtool -l eth0

#increase number of active rx and tx queues,
#by default 1 rx and 1 tx queue
#can be set any combination of 0 < rx <= 8 and 0 < tx <= 8
$ ethtool -L eth0 rx 8 tx 8

#set multi-queue-aware queuing discipline
$ tc qdisc add dev eth0 root handle 1: multiq

#send packets with ip 172.22.39.12 to queue #5 which can be
#prioritized or throughput limited by h/w shaper.
$ tc filter add dev eth0 parent 1: protocol ip prio 1 u32 \
    match ip dst 172.22.39.12 \
    action skbedit queue_mapping 5

Based on: net-next/master
V1: https://lkml.org/lkml/2016/6/30/603

Since v2:
- added new patch to avoid warn while ctrl stop

  net: ethernet: ti: cpsw: add ethtool channels support
- enable ctrl in case at least one interface is running

Since v1:
- removed cpdam_check_free_desc function
- remove pm_runtime calls as they are used in begin/complete ethtool calls now
- removed change of driver version. it can be done later
- corrected setup of channels for dual_emac mode with ethtool

Ivan Khoronzhuk (5):
  net: ethernet: ti: davinci_cpdma: split descs num between all channels
  net: ethernet: ti: davinci_cpdma: fix locking while ctrl_stop
  net: ethernet: ti: cpsw: add multi queue support
  net: ethernet: ti: davinci_cpdma: move cpdma channel struct macroses
    to internals
  net: ethernet: ti: cpsw: add ethtool channels support

 drivers/net/ethernet/ti/cpsw.c          | 488 +++++++++++++++++++++++++-------
 drivers/net/ethernet/ti/davinci_cpdma.c |  73 ++++-
 drivers/net/ethernet/ti/davinci_cpdma.h |  13 +-
 drivers/net/ethernet/ti/davinci_emac.c  |   8 +-
 4 files changed, 459 insertions(+), 123 deletions(-)

-- 
1.9.1

^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH v3 1/5] net: ethernet: ti: davinci_cpdma: split descs num between all channels
  2016-08-15 23:25 [PATCH v3 0/5] net: ethernet: ti: cpsw: add cpdma multi-queue support Ivan Khoronzhuk
@ 2016-08-15 23:25 ` Ivan Khoronzhuk
  2016-08-15 23:25 ` [PATCH v3 2/5] net: ethernet: ti: davinci_cpdma: fix locking while ctrl_stop Ivan Khoronzhuk
                   ` (3 subsequent siblings)
  4 siblings, 0 replies; 13+ messages in thread
From: Ivan Khoronzhuk @ 2016-08-15 23:25 UTC (permalink / raw)
  To: davem, netdev, mugunthanvnm, grygorii.strashko
  Cc: linux-kernel, linux-omap, nsekhar, Ivan Khoronzhuk

Tx channels share same pool of descriptors. Thus one channel can
block another if pool is emptied by one. But, the shaper should
decide which channel is allowed to send packets. To avoid such
impact of one channel on another, let every channel to have its
own piece of pool.

Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
---
 drivers/net/ethernet/ti/cpsw.c          | 61 ++++++++++++++++++++-------------
 drivers/net/ethernet/ti/davinci_cpdma.c | 46 +++++++++++++++++++++++--
 drivers/net/ethernet/ti/davinci_cpdma.h |  2 +-
 3 files changed, 83 insertions(+), 26 deletions(-)

diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index b4d3b41..a4c1538 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1212,6 +1212,40 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
 	}
 }
 
+static int cpsw_fill_rx_channels(struct net_device *ndev)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	struct sk_buff *skb;
+	int ch_buf_num;
+	int i, ret;
+
+	ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxch);
+	for (i = 0; i < ch_buf_num; i++) {
+		skb = __netdev_alloc_skb_ip_align(ndev,
+						  cpsw->rx_packet_max,
+						  GFP_KERNEL);
+		if (!skb) {
+			cpsw_err(priv, ifup, "cannot allocate skb\n");
+			return -ENOMEM;
+		}
+
+		ret = cpdma_chan_submit(cpsw->rxch, skb, skb->data,
+					skb_tailroom(skb), 0);
+		if (ret < 0) {
+			cpsw_err(priv, ifup,
+				 "cannot submit skb to rx channel, error %d\n",
+				 ret);
+			kfree_skb(skb);
+			return ret;
+		}
+	}
+
+	cpsw_info(priv, ifup, "submitted %d rx descriptors\n", ch_buf_num);
+
+	return ch_buf_num;
+}
+
 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
 {
 	u32 slave_port;
@@ -1232,7 +1266,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
 {
 	struct cpsw_priv *priv = netdev_priv(ndev);
 	struct cpsw_common *cpsw = priv->cpsw;
-	int i, ret;
+	int ret;
 	u32 reg;
 
 	ret = pm_runtime_get_sync(cpsw->dev);
@@ -1264,8 +1298,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
 				  ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
 
 	if (!cpsw_common_res_usage_state(cpsw)) {
-		int buf_num;
-
 		/* setup tx dma to fixed prio and zero offset */
 		cpdma_control_set(cpsw->dma, CPDMA_TX_PRIO_FIXED, 1);
 		cpdma_control_set(cpsw->dma, CPDMA_RX_BUFFER_OFFSET, 0);
@@ -1292,26 +1324,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
 			enable_irq(cpsw->irqs_table[0]);
 		}
 
-		buf_num = cpdma_chan_get_rx_buf_num(cpsw->dma);
-		for (i = 0; i < buf_num; i++) {
-			struct sk_buff *skb;
-
-			ret = -ENOMEM;
-			skb = __netdev_alloc_skb_ip_align(priv->ndev,
-					cpsw->rx_packet_max, GFP_KERNEL);
-			if (!skb)
-				goto err_cleanup;
-			ret = cpdma_chan_submit(cpsw->rxch, skb, skb->data,
-						skb_tailroom(skb), 0);
-			if (ret < 0) {
-				kfree_skb(skb);
-				goto err_cleanup;
-			}
-		}
-		/* continue even if we didn't manage to submit all
-		 * receive descs
-		 */
-		cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
+		ret = cpsw_fill_rx_channels(ndev);
+		if (ret < 0)
+			goto err_cleanup;
 
 		if (cpts_register(cpsw->dev, cpsw->cpts,
 				  cpsw->data.cpts_clock_mult,
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index cf72b33..167fd65 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -104,6 +104,7 @@ struct cpdma_ctlr {
 	struct cpdma_desc_pool	*pool;
 	spinlock_t		lock;
 	struct cpdma_chan	*channels[2 * CPDMA_MAX_CHANNELS];
+	int chan_num;
 };
 
 struct cpdma_chan {
@@ -256,6 +257,7 @@ struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
 	ctlr->state = CPDMA_STATE_IDLE;
 	ctlr->params = *params;
 	ctlr->dev = params->dev;
+	ctlr->chan_num = 0;
 	spin_lock_init(&ctlr->lock);
 
 	ctlr->pool = cpdma_desc_pool_create(ctlr->dev,
@@ -399,6 +401,31 @@ void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
 }
 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
 
+/**
+ * cpdma_chan_split_pool - Splits ctrl pool between all channels.
+ * Has to be called under ctlr lock
+ */
+static void cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
+{
+	struct cpdma_desc_pool *pool = ctlr->pool;
+	struct cpdma_chan *chan;
+	int ch_desc_num;
+	int i;
+
+	if (!ctlr->chan_num)
+		return;
+
+	/* calculate average size of pool slice */
+	ch_desc_num = pool->num_desc / ctlr->chan_num;
+
+	/* split ctlr pool */
+	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
+		chan = ctlr->channels[i];
+		if (chan)
+			chan->desc_num = ch_desc_num;
+	}
+}
+
 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
 				     cpdma_handler_fn handler)
 {
@@ -447,14 +474,25 @@ struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
 	spin_lock_init(&chan->lock);
 
 	ctlr->channels[chan_num] = chan;
+	ctlr->chan_num++;
+
+	cpdma_chan_split_pool(ctlr);
+
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 	return chan;
 }
 EXPORT_SYMBOL_GPL(cpdma_chan_create);
 
-int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr)
+int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
 {
-	return ctlr->pool->num_desc / 2;
+	unsigned long flags;
+	int desc_num;
+
+	spin_lock_irqsave(&chan->lock, flags);
+	desc_num = chan->desc_num;
+	spin_unlock_irqrestore(&chan->lock, flags);
+
+	return desc_num;
 }
 EXPORT_SYMBOL_GPL(cpdma_chan_get_rx_buf_num);
 
@@ -471,6 +509,10 @@ int cpdma_chan_destroy(struct cpdma_chan *chan)
 	if (chan->state != CPDMA_STATE_IDLE)
 		cpdma_chan_stop(chan);
 	ctlr->channels[chan->chan_num] = NULL;
+	ctlr->chan_num--;
+
+	cpdma_chan_split_pool(ctlr);
+
 	spin_unlock_irqrestore(&ctlr->lock, flags);
 	return 0;
 }
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index 4b46cd6..9119b43 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -80,7 +80,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
 
 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
 				     cpdma_handler_fn handler);
-int cpdma_chan_get_rx_buf_num(struct cpdma_ctlr *ctlr);
+int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan);
 int cpdma_chan_destroy(struct cpdma_chan *chan);
 int cpdma_chan_start(struct cpdma_chan *chan);
 int cpdma_chan_stop(struct cpdma_chan *chan);
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v3 2/5] net: ethernet: ti: davinci_cpdma: fix locking while ctrl_stop
  2016-08-15 23:25 [PATCH v3 0/5] net: ethernet: ti: cpsw: add cpdma multi-queue support Ivan Khoronzhuk
  2016-08-15 23:25 ` [PATCH v3 1/5] net: ethernet: ti: davinci_cpdma: split descs num between all channels Ivan Khoronzhuk
@ 2016-08-15 23:25 ` Ivan Khoronzhuk
  2016-08-17  5:42     ` Mugunthan V N
  2016-08-15 23:25 ` [PATCH v3 3/5] net: ethernet: ti: cpsw: add multi queue support Ivan Khoronzhuk
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 13+ messages in thread
From: Ivan Khoronzhuk @ 2016-08-15 23:25 UTC (permalink / raw)
  To: davem, netdev, mugunthanvnm, grygorii.strashko
  Cc: linux-kernel, linux-omap, nsekhar, Ivan Khoronzhuk

The interrupts shouldn't be disabled while receiving skb, but while
ctrl_stop, the channels are stopped and all remaining packets are
handled with netif_receive_skb():

lock_irq_save
    cpdma_ctlr_stop
       cpdma_chan_top
           __cpdma_chan_free
               cpsw_rx_handler
                   netif_receive_skb

So, split locking while ctrl stop thus interrupts are still
enabled while skbs handling. It can cause WARN_ONCE in rare
cases when ctrl is stopping while not all packets were handled
with NAPIs.

Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
---
 drivers/net/ethernet/ti/davinci_cpdma.c | 2 ++
 1 file changed, 2 insertions(+)

diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 167fd65..ffb32af 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -334,12 +334,14 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
 	}
 
 	ctlr->state = CPDMA_STATE_TEARDOWN;
+	spin_unlock_irqrestore(&ctlr->lock, flags);
 
 	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
 		if (ctlr->channels[i])
 			cpdma_chan_stop(ctlr->channels[i]);
 	}
 
+	spin_lock_irqsave(&ctlr->lock, flags);
 	dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
 	dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
 
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v3 3/5] net: ethernet: ti: cpsw: add multi queue support
  2016-08-15 23:25 [PATCH v3 0/5] net: ethernet: ti: cpsw: add cpdma multi-queue support Ivan Khoronzhuk
  2016-08-15 23:25 ` [PATCH v3 1/5] net: ethernet: ti: davinci_cpdma: split descs num between all channels Ivan Khoronzhuk
  2016-08-15 23:25 ` [PATCH v3 2/5] net: ethernet: ti: davinci_cpdma: fix locking while ctrl_stop Ivan Khoronzhuk
@ 2016-08-15 23:25 ` Ivan Khoronzhuk
  2016-08-15 23:25 ` [PATCH v3 4/5] net: ethernet: ti: davinci_cpdma: move cpdma channel struct macroses to internals Ivan Khoronzhuk
  2016-08-15 23:25 ` [PATCH v3 5/5] net: ethernet: ti: cpsw: add ethtool channels support Ivan Khoronzhuk
  4 siblings, 0 replies; 13+ messages in thread
From: Ivan Khoronzhuk @ 2016-08-15 23:25 UTC (permalink / raw)
  To: davem, netdev, mugunthanvnm, grygorii.strashko
  Cc: linux-kernel, linux-omap, nsekhar, Ivan Khoronzhuk

The cpsw h/w supports up to 8 tx and 8 rx channels. This patch adds
multi-queue support to the driver only, shaper configuration will
be added with separate patch series. Default shaper mode, as
before, priority mode, but with corrected priority order, 0 - is
highest priority, 7 - lowest.

The poll function handles all unprocessed channels, till all of
them are free, beginning from hi priority channel.

In dual_emac mode the channels are shared between two network devices,
as it's with single-queue default mode.

The statistic for every channel can be read with:
$ ethtool -S ethX

Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
---
 drivers/net/ethernet/ti/cpsw.c          | 302 +++++++++++++++++++++-----------
 drivers/net/ethernet/ti/davinci_cpdma.c |  12 ++
 drivers/net/ethernet/ti/davinci_cpdma.h |   2 +
 3 files changed, 211 insertions(+), 105 deletions(-)

diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index a4c1538..05f36fc 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -124,7 +124,7 @@ do {								\
 
 #define RX_PRIORITY_MAPPING	0x76543210
 #define TX_PRIORITY_MAPPING	0x33221100
-#define CPDMA_TX_PRIORITY_MAP	0x76543210
+#define CPDMA_TX_PRIORITY_MAP	0x01234567
 
 #define CPSW_VLAN_AWARE		BIT(1)
 #define CPSW_ALE_VLAN_AWARE	1
@@ -144,6 +144,7 @@ do {								\
 		((cpsw->data.dual_emac) ? priv->emac_port :	\
 		cpsw->data.active_slave)
 #define IRQ_NUM			2
+#define CPSW_MAX_QUEUES		8
 
 static int debug_level;
 module_param(debug_level, int, 0);
@@ -379,13 +380,15 @@ struct cpsw_common {
 	int				rx_packet_max;
 	struct cpsw_slave		*slaves;
 	struct cpdma_ctlr		*dma;
-	struct cpdma_chan		*txch, *rxch;
+	struct cpdma_chan		*txch[CPSW_MAX_QUEUES];
+	struct cpdma_chan		*rxch[CPSW_MAX_QUEUES];
 	struct cpsw_ale			*ale;
 	bool				quirk_irq;
 	bool				rx_irq_disabled;
 	bool				tx_irq_disabled;
 	u32 irqs_table[IRQ_NUM];
 	struct cpts			*cpts;
+	int				rx_ch_num, tx_ch_num;
 };
 
 struct cpsw_priv {
@@ -457,35 +460,26 @@ static const struct cpsw_stats cpsw_gstrings_stats[] = {
 	{ "Rx Start of Frame Overruns", CPSW_STAT(rxsofoverruns) },
 	{ "Rx Middle of Frame Overruns", CPSW_STAT(rxmofoverruns) },
 	{ "Rx DMA Overruns", CPSW_STAT(rxdmaoverruns) },
-	{ "Rx DMA chan: head_enqueue", CPDMA_RX_STAT(head_enqueue) },
-	{ "Rx DMA chan: tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
-	{ "Rx DMA chan: pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
-	{ "Rx DMA chan: misqueued", CPDMA_RX_STAT(misqueued) },
-	{ "Rx DMA chan: desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
-	{ "Rx DMA chan: pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
-	{ "Rx DMA chan: runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
-	{ "Rx DMA chan: runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
-	{ "Rx DMA chan: empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
-	{ "Rx DMA chan: busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
-	{ "Rx DMA chan: good_dequeue", CPDMA_RX_STAT(good_dequeue) },
-	{ "Rx DMA chan: requeue", CPDMA_RX_STAT(requeue) },
-	{ "Rx DMA chan: teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
-	{ "Tx DMA chan: head_enqueue", CPDMA_TX_STAT(head_enqueue) },
-	{ "Tx DMA chan: tail_enqueue", CPDMA_TX_STAT(tail_enqueue) },
-	{ "Tx DMA chan: pad_enqueue", CPDMA_TX_STAT(pad_enqueue) },
-	{ "Tx DMA chan: misqueued", CPDMA_TX_STAT(misqueued) },
-	{ "Tx DMA chan: desc_alloc_fail", CPDMA_TX_STAT(desc_alloc_fail) },
-	{ "Tx DMA chan: pad_alloc_fail", CPDMA_TX_STAT(pad_alloc_fail) },
-	{ "Tx DMA chan: runt_receive_buf", CPDMA_TX_STAT(runt_receive_buff) },
-	{ "Tx DMA chan: runt_transmit_buf", CPDMA_TX_STAT(runt_transmit_buff) },
-	{ "Tx DMA chan: empty_dequeue", CPDMA_TX_STAT(empty_dequeue) },
-	{ "Tx DMA chan: busy_dequeue", CPDMA_TX_STAT(busy_dequeue) },
-	{ "Tx DMA chan: good_dequeue", CPDMA_TX_STAT(good_dequeue) },
-	{ "Tx DMA chan: requeue", CPDMA_TX_STAT(requeue) },
-	{ "Tx DMA chan: teardown_dequeue", CPDMA_TX_STAT(teardown_dequeue) },
 };
 
-#define CPSW_STATS_LEN	ARRAY_SIZE(cpsw_gstrings_stats)
+static const struct cpsw_stats cpsw_gstrings_ch_stats[] = {
+	{ "head_enqueue", CPDMA_RX_STAT(head_enqueue) },
+	{ "tail_enqueue", CPDMA_RX_STAT(tail_enqueue) },
+	{ "pad_enqueue", CPDMA_RX_STAT(pad_enqueue) },
+	{ "misqueued", CPDMA_RX_STAT(misqueued) },
+	{ "desc_alloc_fail", CPDMA_RX_STAT(desc_alloc_fail) },
+	{ "pad_alloc_fail", CPDMA_RX_STAT(pad_alloc_fail) },
+	{ "runt_receive_buf", CPDMA_RX_STAT(runt_receive_buff) },
+	{ "runt_transmit_buf", CPDMA_RX_STAT(runt_transmit_buff) },
+	{ "empty_dequeue", CPDMA_RX_STAT(empty_dequeue) },
+	{ "busy_dequeue", CPDMA_RX_STAT(busy_dequeue) },
+	{ "good_dequeue", CPDMA_RX_STAT(good_dequeue) },
+	{ "requeue", CPDMA_RX_STAT(requeue) },
+	{ "teardown_dequeue", CPDMA_RX_STAT(teardown_dequeue) },
+};
+
+#define CPSW_STATS_COMMON_LEN	ARRAY_SIZE(cpsw_gstrings_stats)
+#define CPSW_STATS_CH_LEN	ARRAY_SIZE(cpsw_gstrings_ch_stats)
 
 #define ndev_to_cpsw(ndev) (((struct cpsw_priv *)netdev_priv(ndev))->cpsw)
 #define napi_to_cpsw(napi)	container_of(napi, struct cpsw_common, napi)
@@ -669,6 +663,7 @@ static void cpsw_intr_disable(struct cpsw_common *cpsw)
 
 static void cpsw_tx_handler(void *token, int len, int status)
 {
+	struct netdev_queue	*txq;
 	struct sk_buff		*skb = token;
 	struct net_device	*ndev = skb->dev;
 	struct cpsw_common	*cpsw = ndev_to_cpsw(ndev);
@@ -676,8 +671,10 @@ static void cpsw_tx_handler(void *token, int len, int status)
 	/* Check whether the queue is stopped due to stalled tx dma, if the
 	 * queue is stopped then start the queue as we have free desc for tx
 	 */
-	if (unlikely(netif_queue_stopped(ndev)))
-		netif_wake_queue(ndev);
+	txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
+	if (unlikely(netif_tx_queue_stopped(txq)))
+		netif_tx_wake_queue(txq);
+
 	cpts_tx_timestamp(cpsw->cpts, skb);
 	ndev->stats.tx_packets++;
 	ndev->stats.tx_bytes += len;
@@ -686,6 +683,7 @@ static void cpsw_tx_handler(void *token, int len, int status)
 
 static void cpsw_rx_handler(void *token, int len, int status)
 {
+	struct cpdma_chan	*ch;
 	struct sk_buff		*skb = token;
 	struct sk_buff		*new_skb;
 	struct net_device	*ndev = skb->dev;
@@ -724,6 +722,7 @@ static void cpsw_rx_handler(void *token, int len, int status)
 
 	new_skb = netdev_alloc_skb_ip_align(ndev, cpsw->rx_packet_max);
 	if (new_skb) {
+		skb_copy_queue_mapping(new_skb, skb);
 		skb_put(skb, len);
 		cpts_rx_timestamp(cpsw->cpts, skb);
 		skb->protocol = eth_type_trans(skb, ndev);
@@ -736,7 +735,8 @@ static void cpsw_rx_handler(void *token, int len, int status)
 	}
 
 requeue:
-	ret = cpdma_chan_submit(cpsw->rxch, new_skb, new_skb->data,
+	ch = cpsw->rxch[skb_get_queue_mapping(new_skb)];
+	ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
 				skb_tailroom(new_skb), 0);
 	if (WARN_ON(ret < 0))
 		dev_kfree_skb_any(new_skb);
@@ -776,10 +776,27 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
 
 static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
 {
+	u32			ch_map;
+	int			num_tx, ch;
 	struct cpsw_common	*cpsw = napi_to_cpsw(napi_tx);
-	int			num_tx;
 
-	num_tx = cpdma_chan_process(cpsw->txch, budget);
+	/* process every unprocessed channel */
+	ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
+	for (ch = 0, num_tx = 0; num_tx < budget; ch_map >>= 1, ch++) {
+		if (!ch_map) {
+			ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
+			if (!ch_map)
+				break;
+
+			ch = 0;
+		}
+
+		if (!(ch_map & 0x01))
+			continue;
+
+		num_tx += cpdma_chan_process(cpsw->txch[ch], budget - num_tx);
+	}
+
 	if (num_tx < budget) {
 		napi_complete(napi_tx);
 		writel(0xff, &cpsw->wr_regs->tx_en);
@@ -794,10 +811,27 @@ static int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
 
 static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
 {
+	u32			ch_map;
+	int			num_rx, ch;
 	struct cpsw_common	*cpsw = napi_to_cpsw(napi_rx);
-	int			num_rx;
 
-	num_rx = cpdma_chan_process(cpsw->rxch, budget);
+	/* process every unprocessed channel */
+	ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
+	for (ch = 0, num_rx = 0; num_rx < budget; ch_map >>= 1, ch++) {
+		if (!ch_map) {
+			ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
+			if (!ch_map)
+				break;
+
+			ch = 0;
+		}
+
+		if (!(ch_map & 0x01))
+			continue;
+
+		num_rx += cpdma_chan_process(cpsw->rxch[ch], budget - num_rx);
+	}
+
 	if (num_rx < budget) {
 		napi_complete(napi_rx);
 		writel(0xff, &cpsw->wr_regs->rx_en);
@@ -896,10 +930,10 @@ static void cpsw_adjust_link(struct net_device *ndev)
 	if (link) {
 		netif_carrier_on(ndev);
 		if (netif_running(ndev))
-			netif_wake_queue(ndev);
+			netif_tx_wake_all_queues(ndev);
 	} else {
 		netif_carrier_off(ndev);
-		netif_stop_queue(ndev);
+		netif_tx_stop_all_queues(ndev);
 	}
 }
 
@@ -972,26 +1006,51 @@ update_return:
 
 static int cpsw_get_sset_count(struct net_device *ndev, int sset)
 {
+	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
 	switch (sset) {
 	case ETH_SS_STATS:
-		return CPSW_STATS_LEN;
+		return (CPSW_STATS_COMMON_LEN +
+		       (cpsw->rx_ch_num + cpsw->tx_ch_num) *
+		       CPSW_STATS_CH_LEN);
 	default:
 		return -EOPNOTSUPP;
 	}
 }
 
+static void cpsw_add_ch_strings(u8 **p, int ch_num, int rx_dir)
+{
+	int ch_stats_len;
+	int line;
+	int i;
+
+	ch_stats_len = CPSW_STATS_CH_LEN * ch_num;
+	for (i = 0; i < ch_stats_len; i++) {
+		line = i % CPSW_STATS_CH_LEN;
+		snprintf(*p, ETH_GSTRING_LEN,
+			 "%s DMA chan %d: %s", rx_dir ? "Rx" : "Tx",
+			 i / CPSW_STATS_CH_LEN,
+			 cpsw_gstrings_ch_stats[line].stat_string);
+		*p += ETH_GSTRING_LEN;
+	}
+}
+
 static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
 {
+	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
 	u8 *p = data;
 	int i;
 
 	switch (stringset) {
 	case ETH_SS_STATS:
-		for (i = 0; i < CPSW_STATS_LEN; i++) {
+		for (i = 0; i < CPSW_STATS_COMMON_LEN; i++) {
 			memcpy(p, cpsw_gstrings_stats[i].stat_string,
 			       ETH_GSTRING_LEN);
 			p += ETH_GSTRING_LEN;
 		}
+
+		cpsw_add_ch_strings(&p, cpsw->rx_ch_num, 1);
+		cpsw_add_ch_strings(&p, cpsw->tx_ch_num, 0);
 		break;
 	}
 }
@@ -999,36 +1058,31 @@ static void cpsw_get_strings(struct net_device *ndev, u32 stringset, u8 *data)
 static void cpsw_get_ethtool_stats(struct net_device *ndev,
 				    struct ethtool_stats *stats, u64 *data)
 {
-	struct cpdma_chan_stats rx_stats;
-	struct cpdma_chan_stats tx_stats;
-	u32 val;
 	u8 *p;
-	int i;
 	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+	struct cpdma_chan_stats ch_stats;
+	int i, l, ch;
 
 	/* Collect Davinci CPDMA stats for Rx and Tx Channel */
-	cpdma_chan_get_stats(cpsw->rxch, &rx_stats);
-	cpdma_chan_get_stats(cpsw->txch, &tx_stats);
-
-	for (i = 0; i < CPSW_STATS_LEN; i++) {
-		switch (cpsw_gstrings_stats[i].type) {
-		case CPSW_STATS:
-			val = readl(cpsw->hw_stats +
-				    cpsw_gstrings_stats[i].stat_offset);
-			data[i] = val;
-			break;
-
-		case CPDMA_RX_STATS:
-			p = (u8 *)&rx_stats +
-				cpsw_gstrings_stats[i].stat_offset;
-			data[i] = *(u32 *)p;
-			break;
+	for (l = 0; l < CPSW_STATS_COMMON_LEN; l++)
+		data[l] = readl(cpsw->hw_stats +
+				cpsw_gstrings_stats[l].stat_offset);
+
+	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+		cpdma_chan_get_stats(cpsw->rxch[ch], &ch_stats);
+		for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
+			p = (u8 *)&ch_stats +
+				cpsw_gstrings_ch_stats[i].stat_offset;
+			data[l] = *(u32 *)p;
+		}
+	}
 
-		case CPDMA_TX_STATS:
-			p = (u8 *)&tx_stats +
-				cpsw_gstrings_stats[i].stat_offset;
-			data[i] = *(u32 *)p;
-			break;
+	for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
+		cpdma_chan_get_stats(cpsw->txch[ch], &ch_stats);
+		for (i = 0; i < CPSW_STATS_CH_LEN; i++, l++) {
+			p = (u8 *)&ch_stats +
+				cpsw_gstrings_ch_stats[i].stat_offset;
+			data[l] = *(u32 *)p;
 		}
 	}
 }
@@ -1049,11 +1103,12 @@ static int cpsw_common_res_usage_state(struct cpsw_common *cpsw)
 }
 
 static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
-					struct sk_buff *skb)
+					struct sk_buff *skb,
+					struct cpdma_chan *txch)
 {
 	struct cpsw_common *cpsw = priv->cpsw;
 
-	return cpdma_chan_submit(cpsw->txch, skb, skb->data, skb->len,
+	return cpdma_chan_submit(txch, skb, skb->data, skb->len,
 				 priv->emac_port + cpsw->data.dual_emac);
 }
 
@@ -1217,33 +1272,38 @@ static int cpsw_fill_rx_channels(struct net_device *ndev)
 	struct cpsw_priv *priv = netdev_priv(ndev);
 	struct cpsw_common *cpsw = priv->cpsw;
 	struct sk_buff *skb;
-	int ch_buf_num;
-	int i, ret;
-
-	ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxch);
-	for (i = 0; i < ch_buf_num; i++) {
-		skb = __netdev_alloc_skb_ip_align(ndev,
-						  cpsw->rx_packet_max,
-						  GFP_KERNEL);
-		if (!skb) {
-			cpsw_err(priv, ifup, "cannot allocate skb\n");
-			return -ENOMEM;
-		}
+	int ch, i, ret;
+
+	for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
+		int ch_buf_num;
+
+		ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxch[ch]);
+		for (i = 0; i < ch_buf_num; i++) {
+			skb = __netdev_alloc_skb_ip_align(ndev,
+							  cpsw->rx_packet_max,
+							  GFP_KERNEL);
+			if (!skb) {
+				cpsw_err(priv, ifup, "cannot allocate skb\n");
+				return -ENOMEM;
+			}
 
-		ret = cpdma_chan_submit(cpsw->rxch, skb, skb->data,
-					skb_tailroom(skb), 0);
-		if (ret < 0) {
-			cpsw_err(priv, ifup,
-				 "cannot submit skb to rx channel, error %d\n",
-				 ret);
-			kfree_skb(skb);
-			return ret;
+			skb_set_queue_mapping(skb, ch);
+			ret = cpdma_chan_submit(cpsw->rxch[ch], skb, skb->data,
+						skb_tailroom(skb), 0);
+			if (ret < 0) {
+				cpsw_err(priv, ifup,
+					 "cannot submit skb to channel %d rx, error %d\n",
+					 ch, ret);
+				kfree_skb(skb);
+				return ret;
+			}
 		}
-	}
 
-	cpsw_info(priv, ifup, "submitted %d rx descriptors\n", ch_buf_num);
+		cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
+			  ch, ch_buf_num);
+	}
 
-	return ch_buf_num;
+	return 0;
 }
 
 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_common *cpsw)
@@ -1279,6 +1339,19 @@ static int cpsw_ndo_open(struct net_device *ndev)
 		cpsw_intr_disable(cpsw);
 	netif_carrier_off(ndev);
 
+	/* Notify the stack of the actual queue counts. */
+	ret = netif_set_real_num_tx_queues(ndev, cpsw->tx_ch_num);
+	if (ret) {
+		dev_err(priv->dev, "cannot set real number of tx queues\n");
+		goto err_cleanup;
+	}
+
+	ret = netif_set_real_num_rx_queues(ndev, cpsw->rx_ch_num);
+	if (ret) {
+		dev_err(priv->dev, "cannot set real number of rx queues\n");
+		goto err_cleanup;
+	}
+
 	reg = cpsw->version;
 
 	dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
@@ -1348,6 +1421,9 @@ static int cpsw_ndo_open(struct net_device *ndev)
 
 	if (cpsw->data.dual_emac)
 		cpsw->slaves[priv->emac_port].open_stat = true;
+
+	netif_tx_start_all_queues(ndev);
+
 	return 0;
 
 err_cleanup:
@@ -1364,7 +1440,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
 	struct cpsw_common *cpsw = priv->cpsw;
 
 	cpsw_info(priv, ifdown, "shutting down cpsw device\n");
-	netif_stop_queue(priv->ndev);
+	netif_tx_stop_all_queues(priv->ndev);
 	netif_carrier_off(priv->ndev);
 
 	if (cpsw_common_res_usage_state(cpsw) <= 1) {
@@ -1386,8 +1462,10 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
 				       struct net_device *ndev)
 {
 	struct cpsw_priv *priv = netdev_priv(ndev);
-	int ret;
 	struct cpsw_common *cpsw = priv->cpsw;
+	struct netdev_queue *txq;
+	struct cpdma_chan *txch;
+	int ret, q_idx;
 
 	netif_trans_update(ndev);
 
@@ -1403,7 +1481,12 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
 
 	skb_tx_timestamp(skb);
 
-	ret = cpsw_tx_packet_submit(priv, skb);
+	q_idx = skb_get_queue_mapping(skb);
+	if (q_idx >= cpsw->tx_ch_num)
+		q_idx = q_idx % cpsw->tx_ch_num;
+
+	txch = cpsw->txch[q_idx];
+	ret = cpsw_tx_packet_submit(priv, skb, txch);
 	if (unlikely(ret != 0)) {
 		cpsw_err(priv, tx_err, "desc submit failed\n");
 		goto fail;
@@ -1412,13 +1495,16 @@ static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
 	/* If there is no more tx desc left free then we need to
 	 * tell the kernel to stop sending us tx frames.
 	 */
-	if (unlikely(!cpdma_check_free_tx_desc(cpsw->txch)))
-		netif_stop_queue(ndev);
+	if (unlikely(!cpdma_check_free_tx_desc(txch))) {
+		txq = netdev_get_tx_queue(ndev, q_idx);
+		netif_tx_stop_queue(txq);
+	}
 
 	return NETDEV_TX_OK;
 fail:
 	ndev->stats.tx_dropped++;
-	netif_stop_queue(ndev);
+	txq = netdev_get_tx_queue(ndev, skb_get_queue_mapping(skb));
+	netif_tx_stop_queue(txq);
 	return NETDEV_TX_BUSY;
 }
 
@@ -1600,12 +1686,16 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
 {
 	struct cpsw_priv *priv = netdev_priv(ndev);
 	struct cpsw_common *cpsw = priv->cpsw;
+	int ch;
 
 	cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
 	ndev->stats.tx_errors++;
 	cpsw_intr_disable(cpsw);
-	cpdma_chan_stop(cpsw->txch);
-	cpdma_chan_start(cpsw->txch);
+	for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
+		cpdma_chan_stop(cpsw->txch[ch]);
+		cpdma_chan_start(cpsw->txch[ch]);
+	}
+
 	cpsw_intr_enable(cpsw);
 }
 
@@ -2177,7 +2267,7 @@ static int cpsw_probe_dual_emac(struct cpsw_priv *priv)
 	struct cpsw_priv		*priv_sl2;
 	int ret = 0;
 
-	ndev = alloc_etherdev(sizeof(struct cpsw_priv));
+	ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
 	if (!ndev) {
 		dev_err(cpsw->dev, "cpsw: error allocating net_device\n");
 		return -ENOMEM;
@@ -2278,7 +2368,7 @@ static int cpsw_probe(struct platform_device *pdev)
 	cpsw = devm_kzalloc(&pdev->dev, sizeof(struct cpsw_common), GFP_KERNEL);
 	cpsw->dev = &pdev->dev;
 
-	ndev = alloc_etherdev(sizeof(struct cpsw_priv));
+	ndev = alloc_etherdev_mq(sizeof(struct cpsw_priv), CPSW_MAX_QUEUES);
 	if (!ndev) {
 		dev_err(&pdev->dev, "error allocating net_device\n");
 		return -ENOMEM;
@@ -2319,6 +2409,8 @@ static int cpsw_probe(struct platform_device *pdev)
 		goto clean_runtime_disable_ret;
 	}
 	data = &cpsw->data;
+	cpsw->rx_ch_num = 1;
+	cpsw->tx_ch_num = 1;
 
 	if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
 		memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
@@ -2443,12 +2535,12 @@ static int cpsw_probe(struct platform_device *pdev)
 		goto clean_runtime_disable_ret;
 	}
 
-	cpsw->txch = cpdma_chan_create(cpsw->dma, tx_chan_num(0),
-				       cpsw_tx_handler);
-	cpsw->rxch = cpdma_chan_create(cpsw->dma, rx_chan_num(0),
-				       cpsw_rx_handler);
+	cpsw->txch[0] = cpdma_chan_create(cpsw->dma, tx_chan_num(0),
+					  cpsw_tx_handler);
+	cpsw->rxch[0] = cpdma_chan_create(cpsw->dma, rx_chan_num(0),
+					  cpsw_rx_handler);
 
-	if (WARN_ON(!cpsw->txch || !cpsw->rxch)) {
+	if (WARN_ON(!cpsw->rxch[0] || !cpsw->txch[0])) {
 		dev_err(priv->dev, "error initializing dma channels\n");
 		ret = -ENOMEM;
 		goto clean_dma_ret;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index ffb32af..4b578b1 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -403,6 +403,18 @@ void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
 }
 EXPORT_SYMBOL_GPL(cpdma_ctlr_eoi);
 
+u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
+{
+	return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
+}
+EXPORT_SYMBOL_GPL(cpdma_ctrl_rxchs_state);
+
+u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
+{
+	return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
+}
+EXPORT_SYMBOL_GPL(cpdma_ctrl_txchs_state);
+
 /**
  * cpdma_chan_split_pool - Splits ctrl pool between all channels.
  * Has to be called under ctlr lock
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index 9119b43..070f1d0 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -94,6 +94,8 @@ int cpdma_chan_process(struct cpdma_chan *chan, int quota);
 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable);
 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value);
 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable);
+u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr);
+u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr);
 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan);
 
 enum cpdma_control {
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v3 4/5] net: ethernet: ti: davinci_cpdma: move cpdma channel struct macroses to internals
  2016-08-15 23:25 [PATCH v3 0/5] net: ethernet: ti: cpsw: add cpdma multi-queue support Ivan Khoronzhuk
                   ` (2 preceding siblings ...)
  2016-08-15 23:25 ` [PATCH v3 3/5] net: ethernet: ti: cpsw: add multi queue support Ivan Khoronzhuk
@ 2016-08-15 23:25 ` Ivan Khoronzhuk
  2016-08-17  6:23     ` Mugunthan V N
  2016-08-15 23:25 ` [PATCH v3 5/5] net: ethernet: ti: cpsw: add ethtool channels support Ivan Khoronzhuk
  4 siblings, 1 reply; 13+ messages in thread
From: Ivan Khoronzhuk @ 2016-08-15 23:25 UTC (permalink / raw)
  To: davem, netdev, mugunthanvnm, grygorii.strashko
  Cc: linux-kernel, linux-omap, nsekhar, Ivan Khoronzhuk

Keep the driver internals in C file. Currently it's not required for
drivers to know rx or tx a channel is, except create function.
So correct "channel create" function, and use all channel struct
macroses only for internal use.

Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
---
 drivers/net/ethernet/ti/cpsw.c          |  7 ++-----
 drivers/net/ethernet/ti/davinci_cpdma.c | 13 +++++++++++--
 drivers/net/ethernet/ti/davinci_cpdma.h |  9 +--------
 drivers/net/ethernet/ti/davinci_emac.c  |  8 ++++----
 4 files changed, 18 insertions(+), 19 deletions(-)

diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 05f36fc..6dcbd8a 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -2535,11 +2535,8 @@ static int cpsw_probe(struct platform_device *pdev)
 		goto clean_runtime_disable_ret;
 	}
 
-	cpsw->txch[0] = cpdma_chan_create(cpsw->dma, tx_chan_num(0),
-					  cpsw_tx_handler);
-	cpsw->rxch[0] = cpdma_chan_create(cpsw->dma, rx_chan_num(0),
-					  cpsw_rx_handler);
-
+	cpsw->txch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_tx_handler, 0);
+	cpsw->rxch[0] = cpdma_chan_create(cpsw->dma, 0, cpsw_rx_handler, 1);
 	if (WARN_ON(!cpsw->rxch[0] || !cpsw->txch[0])) {
 		dev_err(priv->dev, "error initializing dma channels\n");
 		ret = -ENOMEM;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 4b578b1..c3f35f1 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -124,6 +124,13 @@ struct cpdma_chan {
 	int	int_set, int_clear, td;
 };
 
+#define tx_chan_num(chan)	(chan)
+#define rx_chan_num(chan)	((chan) + CPDMA_MAX_CHANNELS)
+#define is_rx_chan(chan)	((chan)->chan_num >= CPDMA_MAX_CHANNELS)
+#define is_tx_chan(chan)	(!is_rx_chan(chan))
+#define __chan_linear(chan_num)	((chan_num) & (CPDMA_MAX_CHANNELS - 1))
+#define chan_linear(chan)	__chan_linear((chan)->chan_num)
+
 /* The following make access to common cpdma_ctlr params more readable */
 #define dmaregs		params.dmaregs
 #define num_chan	params.num_chan
@@ -441,12 +448,14 @@ static void cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
 }
 
 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
-				     cpdma_handler_fn handler)
+				     cpdma_handler_fn handler, int rx_type)
 {
+	int offset = chan_num * 4;
 	struct cpdma_chan *chan;
-	int offset = (chan_num % CPDMA_MAX_CHANNELS) * 4;
 	unsigned long flags;
 
+	chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
+
 	if (__chan_linear(chan_num) >= ctlr->num_chan)
 		return NULL;
 
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.h b/drivers/net/ethernet/ti/davinci_cpdma.h
index 070f1d0..a07b22b 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.h
+++ b/drivers/net/ethernet/ti/davinci_cpdma.h
@@ -17,13 +17,6 @@
 
 #define CPDMA_MAX_CHANNELS	BITS_PER_LONG
 
-#define tx_chan_num(chan)	(chan)
-#define rx_chan_num(chan)	((chan) + CPDMA_MAX_CHANNELS)
-#define is_rx_chan(chan)	((chan)->chan_num >= CPDMA_MAX_CHANNELS)
-#define is_tx_chan(chan)	(!is_rx_chan(chan))
-#define __chan_linear(chan_num)	((chan_num) & (CPDMA_MAX_CHANNELS - 1))
-#define chan_linear(chan)	__chan_linear((chan)->chan_num)
-
 #define CPDMA_RX_SOURCE_PORT(__status__)	((__status__ >> 16) & 0x7)
 
 #define CPDMA_EOI_RX_THRESH	0x0
@@ -79,7 +72,7 @@ int cpdma_ctlr_start(struct cpdma_ctlr *ctlr);
 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr);
 
 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
-				     cpdma_handler_fn handler);
+				     cpdma_handler_fn handler, int rx_type);
 int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan);
 int cpdma_chan_destroy(struct cpdma_chan *chan);
 int cpdma_chan_start(struct cpdma_chan *chan);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 2d6fc9a..2fd94a5 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1870,10 +1870,10 @@ static int davinci_emac_probe(struct platform_device *pdev)
 		goto no_pdata;
 	}
 
-	priv->txchan = cpdma_chan_create(priv->dma, tx_chan_num(EMAC_DEF_TX_CH),
-				       emac_tx_handler);
-	priv->rxchan = cpdma_chan_create(priv->dma, rx_chan_num(EMAC_DEF_RX_CH),
-				       emac_rx_handler);
+	priv->txchan = cpdma_chan_create(priv->dma, EMAC_DEF_TX_CH,
+					 emac_tx_handler, 0);
+	priv->rxchan = cpdma_chan_create(priv->dma, EMAC_DEF_RX_CH,
+					 emac_rx_handler, 1);
 	if (WARN_ON(!priv->txchan || !priv->rxchan)) {
 		rc = -ENOMEM;
 		goto no_cpdma_chan;
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH v3 5/5] net: ethernet: ti: cpsw: add ethtool channels support
  2016-08-15 23:25 [PATCH v3 0/5] net: ethernet: ti: cpsw: add cpdma multi-queue support Ivan Khoronzhuk
                   ` (3 preceding siblings ...)
  2016-08-15 23:25 ` [PATCH v3 4/5] net: ethernet: ti: davinci_cpdma: move cpdma channel struct macroses to internals Ivan Khoronzhuk
@ 2016-08-15 23:25 ` Ivan Khoronzhuk
  2016-08-17  6:22     ` Mugunthan V N
  4 siblings, 1 reply; 13+ messages in thread
From: Ivan Khoronzhuk @ 2016-08-15 23:25 UTC (permalink / raw)
  To: davem, netdev, mugunthanvnm, grygorii.strashko
  Cc: linux-kernel, linux-omap, nsekhar, Ivan Khoronzhuk

These ops allow to control number of channels driver is allowed to
work with at cpdma level. The maximum number of channels is 8 for
rx and 8 for tx. In dual_emac mode the h/w channels are shared
between two interfaces and changing number on one interface changes
number of channels on another.

How many channels are supported and enabled:
$ ethtool -l ethX

Change number of channels (up to 8)
$ ethtool -L ethX rx 6 tx 6

Per-channel statistic:
$ ethtool -S ethX

Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
---
 drivers/net/ethernet/ti/cpsw.c | 180 ++++++++++++++++++++++++++++++++++++++++-
 1 file changed, 176 insertions(+), 4 deletions(-)

diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 6dcbd8a..f02e577 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -735,6 +735,11 @@ static void cpsw_rx_handler(void *token, int len, int status)
 	}
 
 requeue:
+	if (netif_dormant(ndev)) {
+		dev_kfree_skb_any(new_skb);
+		return;
+	}
+
 	ch = cpsw->rxch[skb_get_queue_mapping(new_skb)];
 	ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
 				skb_tailroom(new_skb), 0);
@@ -1267,9 +1272,8 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
 	}
 }
 
-static int cpsw_fill_rx_channels(struct net_device *ndev)
+static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
 {
-	struct cpsw_priv *priv = netdev_priv(ndev);
 	struct cpsw_common *cpsw = priv->cpsw;
 	struct sk_buff *skb;
 	int ch, i, ret;
@@ -1279,7 +1283,7 @@ static int cpsw_fill_rx_channels(struct net_device *ndev)
 
 		ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxch[ch]);
 		for (i = 0; i < ch_buf_num; i++) {
-			skb = __netdev_alloc_skb_ip_align(ndev,
+			skb = __netdev_alloc_skb_ip_align(priv->ndev,
 							  cpsw->rx_packet_max,
 							  GFP_KERNEL);
 			if (!skb) {
@@ -1397,7 +1401,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
 			enable_irq(cpsw->irqs_table[0]);
 		}
 
-		ret = cpsw_fill_rx_channels(ndev);
+		ret = cpsw_fill_rx_channels(priv);
 		if (ret < 0)
 			goto err_cleanup;
 
@@ -2060,6 +2064,172 @@ static void cpsw_ethtool_op_complete(struct net_device *ndev)
 		cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
 }
 
+static void cpsw_get_channels(struct net_device *ndev,
+			      struct ethtool_channels *ch)
+{
+	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
+
+	ch->max_combined = 0;
+	ch->max_rx = CPSW_MAX_QUEUES;
+	ch->max_tx = CPSW_MAX_QUEUES;
+	ch->max_other = 0;
+	ch->other_count = 0;
+	ch->rx_count = cpsw->rx_ch_num;
+	ch->tx_count = cpsw->tx_ch_num;
+	ch->combined_count = 0;
+}
+
+static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
+				  struct ethtool_channels *ch)
+{
+	if (ch->combined_count)
+		return -EINVAL;
+
+	/* verify we have at least one channel in each direction */
+	if (!ch->rx_count || !ch->tx_count)
+		return -EINVAL;
+
+	if (ch->rx_count > cpsw->data.channels ||
+	    ch->tx_count > cpsw->data.channels)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
+{
+	int (*poll)(struct napi_struct *, int);
+	struct cpsw_common *cpsw = priv->cpsw;
+	void (*handler)(void *, int, int);
+	struct cpdma_chan **chan;
+	int ret, *ch;
+
+	if (rx) {
+		ch = &cpsw->rx_ch_num;
+		chan = cpsw->rxch;
+		handler = cpsw_rx_handler;
+		poll = cpsw_rx_poll;
+	} else {
+		ch = &cpsw->tx_ch_num;
+		chan = cpsw->txch;
+		handler = cpsw_tx_handler;
+		poll = cpsw_tx_poll;
+	}
+
+	while (*ch < ch_num) {
+		chan[*ch] = cpdma_chan_create(cpsw->dma, *ch, handler, rx);
+
+		if (IS_ERR(chan[*ch]))
+			return PTR_ERR(chan[*ch]);
+
+		if (!chan[*ch])
+			return -EINVAL;
+
+		cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
+			  (rx ? "rx" : "tx"));
+		(*ch)++;
+	}
+
+	while (*ch > ch_num) {
+		(*ch)--;
+
+		ret = cpdma_chan_destroy(chan[*ch]);
+		if (ret)
+			return ret;
+
+		cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
+			  (rx ? "rx" : "tx"));
+	}
+
+	return 0;
+}
+
+static int cpsw_update_channels(struct cpsw_priv *priv,
+				struct ethtool_channels *ch)
+{
+	int ret;
+
+	ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
+	if (ret)
+		return ret;
+
+	ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int cpsw_set_channels(struct net_device *ndev,
+			     struct ethtool_channels *chs)
+{
+	struct cpsw_priv *priv = netdev_priv(ndev);
+	struct cpsw_common *cpsw = priv->cpsw;
+	struct cpsw_slave *slave;
+	int i, ret;
+
+	ret = cpsw_check_ch_settings(cpsw, chs);
+	if (ret < 0)
+		return ret;
+
+	cpsw_intr_disable(cpsw);
+
+	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
+		if (!(slave->ndev && netif_running(slave->ndev)))
+			continue;
+
+		netif_tx_stop_all_queues(slave->ndev);
+		netif_dormant_on(slave->ndev);
+	}
+
+	cpdma_ctlr_stop(cpsw->dma);
+	ret = cpsw_update_channels(priv, chs);
+	if (ret)
+		goto err;
+
+	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
+		if (!(slave->ndev && netif_running(slave->ndev)))
+			continue;
+
+		/* inform stack about new count of queues */
+		ret = netif_set_real_num_tx_queues(slave->ndev,
+						   cpsw->tx_ch_num);
+		if (ret) {
+			dev_err(priv->dev, "cannot set real number of tx queues\n");
+			goto err;
+		}
+
+		ret = netif_set_real_num_rx_queues(slave->ndev,
+						   cpsw->rx_ch_num);
+		if (ret) {
+			dev_err(priv->dev, "cannot set real number of rx queues\n");
+			goto err;
+		}
+
+		netif_dormant_off(slave->ndev);
+	}
+
+	if (cpsw_common_res_usage_state(cpsw)) {
+		if (cpsw_fill_rx_channels(priv))
+			goto err;
+
+		cpdma_ctlr_start(cpsw->dma);
+		cpsw_intr_enable(cpsw);
+	}
+
+	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
+		if (!(slave->ndev && netif_running(slave->ndev)))
+			continue;
+		netif_tx_start_all_queues(slave->ndev);
+	}
+
+	return 0;
+err:
+	dev_err(priv->dev, "cannot update channels number, closing device\n");
+	dev_close(ndev);
+	return ret;
+}
+
 static const struct ethtool_ops cpsw_ethtool_ops = {
 	.get_drvinfo	= cpsw_get_drvinfo,
 	.get_msglevel	= cpsw_get_msglevel,
@@ -2081,6 +2251,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
 	.get_regs	= cpsw_get_regs,
 	.begin		= cpsw_ethtool_op_begin,
 	.complete	= cpsw_ethtool_op_complete,
+	.get_channels	= cpsw_get_channels,
+	.set_channels	= cpsw_set_channels,
 };
 
 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
-- 
1.9.1

^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH v3 2/5] net: ethernet: ti: davinci_cpdma: fix locking while ctrl_stop
  2016-08-15 23:25 ` [PATCH v3 2/5] net: ethernet: ti: davinci_cpdma: fix locking while ctrl_stop Ivan Khoronzhuk
@ 2016-08-17  5:42     ` Mugunthan V N
  0 siblings, 0 replies; 13+ messages in thread
From: Mugunthan V N @ 2016-08-17  5:42 UTC (permalink / raw)
  To: Ivan Khoronzhuk, davem, netdev, grygorii.strashko
  Cc: linux-kernel, linux-omap, nsekhar

On Tuesday 16 August 2016 04:55 AM, Ivan Khoronzhuk wrote:
> The interrupts shouldn't be disabled while receiving skb, but while
> ctrl_stop, the channels are stopped and all remaining packets are
> handled with netif_receive_skb():
> 
> lock_irq_save
>     cpdma_ctlr_stop
>        cpdma_chan_top
>            __cpdma_chan_free
>                cpsw_rx_handler
>                    netif_receive_skb
> 
> So, split locking while ctrl stop thus interrupts are still
> enabled while skbs handling. It can cause WARN_ONCE in rare
> cases when ctrl is stopping while not all packets were handled
> with NAPIs.
> 
> Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>

Reviewed-by: Mugunthan V N <mugunthanvnm@ti.com>

Regards
Mugunthan V N

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v3 2/5] net: ethernet: ti: davinci_cpdma: fix locking while ctrl_stop
@ 2016-08-17  5:42     ` Mugunthan V N
  0 siblings, 0 replies; 13+ messages in thread
From: Mugunthan V N @ 2016-08-17  5:42 UTC (permalink / raw)
  To: Ivan Khoronzhuk, davem, netdev, grygorii.strashko
  Cc: linux-kernel, linux-omap, nsekhar

On Tuesday 16 August 2016 04:55 AM, Ivan Khoronzhuk wrote:
> The interrupts shouldn't be disabled while receiving skb, but while
> ctrl_stop, the channels are stopped and all remaining packets are
> handled with netif_receive_skb():
> 
> lock_irq_save
>     cpdma_ctlr_stop
>        cpdma_chan_top
>            __cpdma_chan_free
>                cpsw_rx_handler
>                    netif_receive_skb
> 
> So, split locking while ctrl stop thus interrupts are still
> enabled while skbs handling. It can cause WARN_ONCE in rare
> cases when ctrl is stopping while not all packets were handled
> with NAPIs.
> 
> Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>

Reviewed-by: Mugunthan V N <mugunthanvnm@ti.com>

Regards
Mugunthan V N

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v3 5/5] net: ethernet: ti: cpsw: add ethtool channels support
  2016-08-15 23:25 ` [PATCH v3 5/5] net: ethernet: ti: cpsw: add ethtool channels support Ivan Khoronzhuk
@ 2016-08-17  6:22     ` Mugunthan V N
  0 siblings, 0 replies; 13+ messages in thread
From: Mugunthan V N @ 2016-08-17  6:22 UTC (permalink / raw)
  To: Ivan Khoronzhuk, davem, netdev, grygorii.strashko
  Cc: linux-kernel, linux-omap, nsekhar

On Tuesday 16 August 2016 04:55 AM, Ivan Khoronzhuk wrote:
> These ops allow to control number of channels driver is allowed to
> work with at cpdma level. The maximum number of channels is 8 for
> rx and 8 for tx. In dual_emac mode the h/w channels are shared
> between two interfaces and changing number on one interface changes
> number of channels on another.
> 
> How many channels are supported and enabled:
> $ ethtool -l ethX
> 
> Change number of channels (up to 8)
> $ ethtool -L ethX rx 6 tx 6
> 
> Per-channel statistic:
> $ ethtool -S ethX
> 
> Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
> ---
>  drivers/net/ethernet/ti/cpsw.c | 180 ++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 176 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
> index 6dcbd8a..f02e577 100644
> --- a/drivers/net/ethernet/ti/cpsw.c
> +++ b/drivers/net/ethernet/ti/cpsw.c
> @@ -735,6 +735,11 @@ static void cpsw_rx_handler(void *token, int len, int status)
>  	}
>  
>  requeue:
> +	if (netif_dormant(ndev)) {
> +		dev_kfree_skb_any(new_skb);
> +		return;
> +	}
> +
>  	ch = cpsw->rxch[skb_get_queue_mapping(new_skb)];
>  	ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
>  				skb_tailroom(new_skb), 0);
> @@ -1267,9 +1272,8 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
>  	}
>  }
>  
> -static int cpsw_fill_rx_channels(struct net_device *ndev)
> +static int cpsw_fill_rx_channels(struct cpsw_priv *priv)

This change can be moved to patch 1/5 where the function definition is
added at first.

>  {
> -	struct cpsw_priv *priv = netdev_priv(ndev);
>  	struct cpsw_common *cpsw = priv->cpsw;
>  	struct sk_buff *skb;
>  	int ch, i, ret;
> @@ -1279,7 +1283,7 @@ static int cpsw_fill_rx_channels(struct net_device *ndev)
>  
>  		ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxch[ch]);
>  		for (i = 0; i < ch_buf_num; i++) {
> -			skb = __netdev_alloc_skb_ip_align(ndev,
> +			skb = __netdev_alloc_skb_ip_align(priv->ndev,
>  							  cpsw->rx_packet_max,
>  							  GFP_KERNEL);
>  			if (!skb) {
> @@ -1397,7 +1401,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
>  			enable_irq(cpsw->irqs_table[0]);
>  		}
>  
> -		ret = cpsw_fill_rx_channels(ndev);
> +		ret = cpsw_fill_rx_channels(priv);
>  		if (ret < 0)
>  			goto err_cleanup;
>  
> @@ -2060,6 +2064,172 @@ static void cpsw_ethtool_op_complete(struct net_device *ndev)
>  		cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
>  }
>  
> +static void cpsw_get_channels(struct net_device *ndev,
> +			      struct ethtool_channels *ch)
> +{
> +	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
> +
> +	ch->max_combined = 0;
> +	ch->max_rx = CPSW_MAX_QUEUES;
> +	ch->max_tx = CPSW_MAX_QUEUES;
> +	ch->max_other = 0;
> +	ch->other_count = 0;
> +	ch->rx_count = cpsw->rx_ch_num;
> +	ch->tx_count = cpsw->tx_ch_num;
> +	ch->combined_count = 0;
> +}
> +
> +static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
> +				  struct ethtool_channels *ch)
> +{
> +	if (ch->combined_count)
> +		return -EINVAL;
> +
> +	/* verify we have at least one channel in each direction */
> +	if (!ch->rx_count || !ch->tx_count)
> +		return -EINVAL;
> +
> +	if (ch->rx_count > cpsw->data.channels ||
> +	    ch->tx_count > cpsw->data.channels)
> +		return -EINVAL;
> +
> +	return 0;
> +}
> +
> +static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
> +{
> +	int (*poll)(struct napi_struct *, int);
> +	struct cpsw_common *cpsw = priv->cpsw;
> +	void (*handler)(void *, int, int);
> +	struct cpdma_chan **chan;
> +	int ret, *ch;
> +
> +	if (rx) {
> +		ch = &cpsw->rx_ch_num;
> +		chan = cpsw->rxch;
> +		handler = cpsw_rx_handler;
> +		poll = cpsw_rx_poll;
> +	} else {
> +		ch = &cpsw->tx_ch_num;
> +		chan = cpsw->txch;
> +		handler = cpsw_tx_handler;
> +		poll = cpsw_tx_poll;
> +	}
> +
> +	while (*ch < ch_num) {
> +		chan[*ch] = cpdma_chan_create(cpsw->dma, *ch, handler, rx);
> +
> +		if (IS_ERR(chan[*ch]))
> +			return PTR_ERR(chan[*ch]);
> +
> +		if (!chan[*ch])
> +			return -EINVAL;
> +
> +		cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
> +			  (rx ? "rx" : "tx"));
> +		(*ch)++;
> +	}
> +
> +	while (*ch > ch_num) {
> +		(*ch)--;
> +
> +		ret = cpdma_chan_destroy(chan[*ch]);
> +		if (ret)
> +			return ret;
> +
> +		cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
> +			  (rx ? "rx" : "tx"));
> +	}
> +
> +	return 0;
> +}
> +
> +static int cpsw_update_channels(struct cpsw_priv *priv,
> +				struct ethtool_channels *ch)
> +{
> +	int ret;
> +
> +	ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
> +	if (ret)
> +		return ret;
> +
> +	ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
> +	if (ret)
> +		return ret;
> +
> +	return 0;
> +}
> +
> +static int cpsw_set_channels(struct net_device *ndev,
> +			     struct ethtool_channels *chs)
> +{
> +	struct cpsw_priv *priv = netdev_priv(ndev);
> +	struct cpsw_common *cpsw = priv->cpsw;
> +	struct cpsw_slave *slave;
> +	int i, ret;
> +
> +	ret = cpsw_check_ch_settings(cpsw, chs);
> +	if (ret < 0)
> +		return ret;
> +
> +	cpsw_intr_disable(cpsw);
> +
> +	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
> +		if (!(slave->ndev && netif_running(slave->ndev)))
> +			continue;
> +
> +		netif_tx_stop_all_queues(slave->ndev);
> +		netif_dormant_on(slave->ndev);
> +	}
> +
> +	cpdma_ctlr_stop(cpsw->dma);
> +	ret = cpsw_update_channels(priv, chs);
> +	if (ret)
> +		goto err;
> +
> +	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
> +		if (!(slave->ndev && netif_running(slave->ndev)))
> +			continue;
> +
> +		/* inform stack about new count of queues */
> +		ret = netif_set_real_num_tx_queues(slave->ndev,
> +						   cpsw->tx_ch_num);
> +		if (ret) {
> +			dev_err(priv->dev, "cannot set real number of tx queues\n");
> +			goto err;
> +		}
> +
> +		ret = netif_set_real_num_rx_queues(slave->ndev,
> +						   cpsw->rx_ch_num);
> +		if (ret) {
> +			dev_err(priv->dev, "cannot set real number of rx queues\n");
> +			goto err;
> +		}
> +
> +		netif_dormant_off(slave->ndev);
> +	}
> +
> +	if (cpsw_common_res_usage_state(cpsw)) {
> +		if (cpsw_fill_rx_channels(priv))
> +			goto err;
> +
> +		cpdma_ctlr_start(cpsw->dma);
> +		cpsw_intr_enable(cpsw);
> +	}
> +
> +	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
> +		if (!(slave->ndev && netif_running(slave->ndev)))
> +			continue;
> +		netif_tx_start_all_queues(slave->ndev);
> +	}
> +
> +	return 0;
> +err:
> +	dev_err(priv->dev, "cannot update channels number, closing device\n");
> +	dev_close(ndev);
> +	return ret;
> +}
> +
>  static const struct ethtool_ops cpsw_ethtool_ops = {
>  	.get_drvinfo	= cpsw_get_drvinfo,
>  	.get_msglevel	= cpsw_get_msglevel,
> @@ -2081,6 +2251,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
>  	.get_regs	= cpsw_get_regs,
>  	.begin		= cpsw_ethtool_op_begin,
>  	.complete	= cpsw_ethtool_op_complete,
> +	.get_channels	= cpsw_get_channels,
> +	.set_channels	= cpsw_set_channels,
>  };
>  
>  static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
> 

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v3 5/5] net: ethernet: ti: cpsw: add ethtool channels support
@ 2016-08-17  6:22     ` Mugunthan V N
  0 siblings, 0 replies; 13+ messages in thread
From: Mugunthan V N @ 2016-08-17  6:22 UTC (permalink / raw)
  To: Ivan Khoronzhuk, davem, netdev, grygorii.strashko
  Cc: linux-kernel, linux-omap, nsekhar

On Tuesday 16 August 2016 04:55 AM, Ivan Khoronzhuk wrote:
> These ops allow to control number of channels driver is allowed to
> work with at cpdma level. The maximum number of channels is 8 for
> rx and 8 for tx. In dual_emac mode the h/w channels are shared
> between two interfaces and changing number on one interface changes
> number of channels on another.
> 
> How many channels are supported and enabled:
> $ ethtool -l ethX
> 
> Change number of channels (up to 8)
> $ ethtool -L ethX rx 6 tx 6
> 
> Per-channel statistic:
> $ ethtool -S ethX
> 
> Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
> ---
>  drivers/net/ethernet/ti/cpsw.c | 180 ++++++++++++++++++++++++++++++++++++++++-
>  1 file changed, 176 insertions(+), 4 deletions(-)
> 
> diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
> index 6dcbd8a..f02e577 100644
> --- a/drivers/net/ethernet/ti/cpsw.c
> +++ b/drivers/net/ethernet/ti/cpsw.c
> @@ -735,6 +735,11 @@ static void cpsw_rx_handler(void *token, int len, int status)
>  	}
>  
>  requeue:
> +	if (netif_dormant(ndev)) {
> +		dev_kfree_skb_any(new_skb);
> +		return;
> +	}
> +
>  	ch = cpsw->rxch[skb_get_queue_mapping(new_skb)];
>  	ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
>  				skb_tailroom(new_skb), 0);
> @@ -1267,9 +1272,8 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
>  	}
>  }
>  
> -static int cpsw_fill_rx_channels(struct net_device *ndev)
> +static int cpsw_fill_rx_channels(struct cpsw_priv *priv)

This change can be moved to patch 1/5 where the function definition is
added at first.

>  {
> -	struct cpsw_priv *priv = netdev_priv(ndev);
>  	struct cpsw_common *cpsw = priv->cpsw;
>  	struct sk_buff *skb;
>  	int ch, i, ret;
> @@ -1279,7 +1283,7 @@ static int cpsw_fill_rx_channels(struct net_device *ndev)
>  
>  		ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxch[ch]);
>  		for (i = 0; i < ch_buf_num; i++) {
> -			skb = __netdev_alloc_skb_ip_align(ndev,
> +			skb = __netdev_alloc_skb_ip_align(priv->ndev,
>  							  cpsw->rx_packet_max,
>  							  GFP_KERNEL);
>  			if (!skb) {
> @@ -1397,7 +1401,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
>  			enable_irq(cpsw->irqs_table[0]);
>  		}
>  
> -		ret = cpsw_fill_rx_channels(ndev);
> +		ret = cpsw_fill_rx_channels(priv);
>  		if (ret < 0)
>  			goto err_cleanup;
>  
> @@ -2060,6 +2064,172 @@ static void cpsw_ethtool_op_complete(struct net_device *ndev)
>  		cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
>  }
>  
> +static void cpsw_get_channels(struct net_device *ndev,
> +			      struct ethtool_channels *ch)
> +{
> +	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
> +
> +	ch->max_combined = 0;
> +	ch->max_rx = CPSW_MAX_QUEUES;
> +	ch->max_tx = CPSW_MAX_QUEUES;
> +	ch->max_other = 0;
> +	ch->other_count = 0;
> +	ch->rx_count = cpsw->rx_ch_num;
> +	ch->tx_count = cpsw->tx_ch_num;
> +	ch->combined_count = 0;
> +}
> +
> +static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
> +				  struct ethtool_channels *ch)
> +{
> +	if (ch->combined_count)
> +		return -EINVAL;
> +
> +	/* verify we have at least one channel in each direction */
> +	if (!ch->rx_count || !ch->tx_count)
> +		return -EINVAL;
> +
> +	if (ch->rx_count > cpsw->data.channels ||
> +	    ch->tx_count > cpsw->data.channels)
> +		return -EINVAL;
> +
> +	return 0;
> +}
> +
> +static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
> +{
> +	int (*poll)(struct napi_struct *, int);
> +	struct cpsw_common *cpsw = priv->cpsw;
> +	void (*handler)(void *, int, int);
> +	struct cpdma_chan **chan;
> +	int ret, *ch;
> +
> +	if (rx) {
> +		ch = &cpsw->rx_ch_num;
> +		chan = cpsw->rxch;
> +		handler = cpsw_rx_handler;
> +		poll = cpsw_rx_poll;
> +	} else {
> +		ch = &cpsw->tx_ch_num;
> +		chan = cpsw->txch;
> +		handler = cpsw_tx_handler;
> +		poll = cpsw_tx_poll;
> +	}
> +
> +	while (*ch < ch_num) {
> +		chan[*ch] = cpdma_chan_create(cpsw->dma, *ch, handler, rx);
> +
> +		if (IS_ERR(chan[*ch]))
> +			return PTR_ERR(chan[*ch]);
> +
> +		if (!chan[*ch])
> +			return -EINVAL;
> +
> +		cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
> +			  (rx ? "rx" : "tx"));
> +		(*ch)++;
> +	}
> +
> +	while (*ch > ch_num) {
> +		(*ch)--;
> +
> +		ret = cpdma_chan_destroy(chan[*ch]);
> +		if (ret)
> +			return ret;
> +
> +		cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
> +			  (rx ? "rx" : "tx"));
> +	}
> +
> +	return 0;
> +}
> +
> +static int cpsw_update_channels(struct cpsw_priv *priv,
> +				struct ethtool_channels *ch)
> +{
> +	int ret;
> +
> +	ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
> +	if (ret)
> +		return ret;
> +
> +	ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
> +	if (ret)
> +		return ret;
> +
> +	return 0;
> +}
> +
> +static int cpsw_set_channels(struct net_device *ndev,
> +			     struct ethtool_channels *chs)
> +{
> +	struct cpsw_priv *priv = netdev_priv(ndev);
> +	struct cpsw_common *cpsw = priv->cpsw;
> +	struct cpsw_slave *slave;
> +	int i, ret;
> +
> +	ret = cpsw_check_ch_settings(cpsw, chs);
> +	if (ret < 0)
> +		return ret;
> +
> +	cpsw_intr_disable(cpsw);
> +
> +	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
> +		if (!(slave->ndev && netif_running(slave->ndev)))
> +			continue;
> +
> +		netif_tx_stop_all_queues(slave->ndev);
> +		netif_dormant_on(slave->ndev);
> +	}
> +
> +	cpdma_ctlr_stop(cpsw->dma);
> +	ret = cpsw_update_channels(priv, chs);
> +	if (ret)
> +		goto err;
> +
> +	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
> +		if (!(slave->ndev && netif_running(slave->ndev)))
> +			continue;
> +
> +		/* inform stack about new count of queues */
> +		ret = netif_set_real_num_tx_queues(slave->ndev,
> +						   cpsw->tx_ch_num);
> +		if (ret) {
> +			dev_err(priv->dev, "cannot set real number of tx queues\n");
> +			goto err;
> +		}
> +
> +		ret = netif_set_real_num_rx_queues(slave->ndev,
> +						   cpsw->rx_ch_num);
> +		if (ret) {
> +			dev_err(priv->dev, "cannot set real number of rx queues\n");
> +			goto err;
> +		}
> +
> +		netif_dormant_off(slave->ndev);
> +	}
> +
> +	if (cpsw_common_res_usage_state(cpsw)) {
> +		if (cpsw_fill_rx_channels(priv))
> +			goto err;
> +
> +		cpdma_ctlr_start(cpsw->dma);
> +		cpsw_intr_enable(cpsw);
> +	}
> +
> +	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
> +		if (!(slave->ndev && netif_running(slave->ndev)))
> +			continue;
> +		netif_tx_start_all_queues(slave->ndev);
> +	}
> +
> +	return 0;
> +err:
> +	dev_err(priv->dev, "cannot update channels number, closing device\n");
> +	dev_close(ndev);
> +	return ret;
> +}
> +
>  static const struct ethtool_ops cpsw_ethtool_ops = {
>  	.get_drvinfo	= cpsw_get_drvinfo,
>  	.get_msglevel	= cpsw_get_msglevel,
> @@ -2081,6 +2251,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
>  	.get_regs	= cpsw_get_regs,
>  	.begin		= cpsw_ethtool_op_begin,
>  	.complete	= cpsw_ethtool_op_complete,
> +	.get_channels	= cpsw_get_channels,
> +	.set_channels	= cpsw_set_channels,
>  };
>  
>  static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
> 

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v3 4/5] net: ethernet: ti: davinci_cpdma: move cpdma channel struct macroses to internals
  2016-08-15 23:25 ` [PATCH v3 4/5] net: ethernet: ti: davinci_cpdma: move cpdma channel struct macroses to internals Ivan Khoronzhuk
@ 2016-08-17  6:23     ` Mugunthan V N
  0 siblings, 0 replies; 13+ messages in thread
From: Mugunthan V N @ 2016-08-17  6:23 UTC (permalink / raw)
  To: Ivan Khoronzhuk, davem, netdev, grygorii.strashko
  Cc: linux-kernel, linux-omap, nsekhar

On Tuesday 16 August 2016 04:55 AM, Ivan Khoronzhuk wrote:
> Keep the driver internals in C file. Currently it's not required for
> drivers to know rx or tx a channel is, except create function.
> So correct "channel create" function, and use all channel struct
> macroses only for internal use.
> 
> Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>

Reviewed-by: Mugunthan V N <mugunthanvnm@ti.com>

Regards
Mugunthan V N

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v3 4/5] net: ethernet: ti: davinci_cpdma: move cpdma channel struct macroses to internals
@ 2016-08-17  6:23     ` Mugunthan V N
  0 siblings, 0 replies; 13+ messages in thread
From: Mugunthan V N @ 2016-08-17  6:23 UTC (permalink / raw)
  To: Ivan Khoronzhuk, davem, netdev, grygorii.strashko
  Cc: linux-kernel, linux-omap, nsekhar

On Tuesday 16 August 2016 04:55 AM, Ivan Khoronzhuk wrote:
> Keep the driver internals in C file. Currently it's not required for
> drivers to know rx or tx a channel is, except create function.
> So correct "channel create" function, and use all channel struct
> macroses only for internal use.
> 
> Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>

Reviewed-by: Mugunthan V N <mugunthanvnm@ti.com>

Regards
Mugunthan V N

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH v3 5/5] net: ethernet: ti: cpsw: add ethtool channels support
  2016-08-17  6:22     ` Mugunthan V N
  (?)
@ 2016-08-17  7:22     ` ivan.khoronzhuk
  -1 siblings, 0 replies; 13+ messages in thread
From: ivan.khoronzhuk @ 2016-08-17  7:22 UTC (permalink / raw)
  To: Mugunthan V N, Ivan Khoronzhuk, davem, netdev, grygorii.strashko
  Cc: linux-kernel, linux-omap, nsekhar



On 17.08.16 09:22, Mugunthan V N wrote:
> On Tuesday 16 August 2016 04:55 AM, Ivan Khoronzhuk wrote:
>> These ops allow to control number of channels driver is allowed to
>> work with at cpdma level. The maximum number of channels is 8 for
>> rx and 8 for tx. In dual_emac mode the h/w channels are shared
>> between two interfaces and changing number on one interface changes
>> number of channels on another.
>>
>> How many channels are supported and enabled:
>> $ ethtool -l ethX
>>
>> Change number of channels (up to 8)
>> $ ethtool -L ethX rx 6 tx 6
>>
>> Per-channel statistic:
>> $ ethtool -S ethX
>>
>> Signed-off-by: Ivan Khoronzhuk <ivan.khoronzhuk@linaro.org>
>> ---
>>  drivers/net/ethernet/ti/cpsw.c | 180 ++++++++++++++++++++++++++++++++++++++++-
>>  1 file changed, 176 insertions(+), 4 deletions(-)
>>
>> diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
>> index 6dcbd8a..f02e577 100644
>> --- a/drivers/net/ethernet/ti/cpsw.c
>> +++ b/drivers/net/ethernet/ti/cpsw.c
>> @@ -735,6 +735,11 @@ static void cpsw_rx_handler(void *token, int len, int status)
>>  	}
>>
>>  requeue:
>> +	if (netif_dormant(ndev)) {
>> +		dev_kfree_skb_any(new_skb);
>> +		return;
>> +	}
>> +
>>  	ch = cpsw->rxch[skb_get_queue_mapping(new_skb)];
>>  	ret = cpdma_chan_submit(ch, new_skb, new_skb->data,
>>  				skb_tailroom(new_skb), 0);
>> @@ -1267,9 +1272,8 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
>>  	}
>>  }
>>
>> -static int cpsw_fill_rx_channels(struct net_device *ndev)
>> +static int cpsw_fill_rx_channels(struct cpsw_priv *priv)
>
> This change can be moved to patch 1/5 where the function definition is
> added at first.
Yes. Will do it in v4.

>
>>  {
>> -	struct cpsw_priv *priv = netdev_priv(ndev);
>>  	struct cpsw_common *cpsw = priv->cpsw;
>>  	struct sk_buff *skb;
>>  	int ch, i, ret;
>> @@ -1279,7 +1283,7 @@ static int cpsw_fill_rx_channels(struct net_device *ndev)
>>
>>  		ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxch[ch]);
>>  		for (i = 0; i < ch_buf_num; i++) {
>> -			skb = __netdev_alloc_skb_ip_align(ndev,
>> +			skb = __netdev_alloc_skb_ip_align(priv->ndev,
>>  							  cpsw->rx_packet_max,
>>  							  GFP_KERNEL);
>>  			if (!skb) {
>> @@ -1397,7 +1401,7 @@ static int cpsw_ndo_open(struct net_device *ndev)
>>  			enable_irq(cpsw->irqs_table[0]);
>>  		}
>>
>> -		ret = cpsw_fill_rx_channels(ndev);
>> +		ret = cpsw_fill_rx_channels(priv);
>>  		if (ret < 0)
>>  			goto err_cleanup;
>>
>> @@ -2060,6 +2064,172 @@ static void cpsw_ethtool_op_complete(struct net_device *ndev)
>>  		cpsw_err(priv, drv, "ethtool complete failed %d\n", ret);
>>  }
>>
>> +static void cpsw_get_channels(struct net_device *ndev,
>> +			      struct ethtool_channels *ch)
>> +{
>> +	struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
>> +
>> +	ch->max_combined = 0;
>> +	ch->max_rx = CPSW_MAX_QUEUES;
>> +	ch->max_tx = CPSW_MAX_QUEUES;
>> +	ch->max_other = 0;
>> +	ch->other_count = 0;
>> +	ch->rx_count = cpsw->rx_ch_num;
>> +	ch->tx_count = cpsw->tx_ch_num;
>> +	ch->combined_count = 0;
>> +}
>> +
>> +static int cpsw_check_ch_settings(struct cpsw_common *cpsw,
>> +				  struct ethtool_channels *ch)
>> +{
>> +	if (ch->combined_count)
>> +		return -EINVAL;
>> +
>> +	/* verify we have at least one channel in each direction */
>> +	if (!ch->rx_count || !ch->tx_count)
>> +		return -EINVAL;
>> +
>> +	if (ch->rx_count > cpsw->data.channels ||
>> +	    ch->tx_count > cpsw->data.channels)
>> +		return -EINVAL;
>> +
>> +	return 0;
>> +}
>> +
>> +static int cpsw_update_channels_res(struct cpsw_priv *priv, int ch_num, int rx)
>> +{
>> +	int (*poll)(struct napi_struct *, int);
>> +	struct cpsw_common *cpsw = priv->cpsw;
>> +	void (*handler)(void *, int, int);
>> +	struct cpdma_chan **chan;
>> +	int ret, *ch;
>> +
>> +	if (rx) {
>> +		ch = &cpsw->rx_ch_num;
>> +		chan = cpsw->rxch;
>> +		handler = cpsw_rx_handler;
>> +		poll = cpsw_rx_poll;
>> +	} else {
>> +		ch = &cpsw->tx_ch_num;
>> +		chan = cpsw->txch;
>> +		handler = cpsw_tx_handler;
>> +		poll = cpsw_tx_poll;
>> +	}
>> +
>> +	while (*ch < ch_num) {
>> +		chan[*ch] = cpdma_chan_create(cpsw->dma, *ch, handler, rx);
>> +
>> +		if (IS_ERR(chan[*ch]))
>> +			return PTR_ERR(chan[*ch]);
>> +
>> +		if (!chan[*ch])
>> +			return -EINVAL;
>> +
>> +		cpsw_info(priv, ifup, "created new %d %s channel\n", *ch,
>> +			  (rx ? "rx" : "tx"));
>> +		(*ch)++;
>> +	}
>> +
>> +	while (*ch > ch_num) {
>> +		(*ch)--;
>> +
>> +		ret = cpdma_chan_destroy(chan[*ch]);
>> +		if (ret)
>> +			return ret;
>> +
>> +		cpsw_info(priv, ifup, "destroyed %d %s channel\n", *ch,
>> +			  (rx ? "rx" : "tx"));
>> +	}
>> +
>> +	return 0;
>> +}
>> +
>> +static int cpsw_update_channels(struct cpsw_priv *priv,
>> +				struct ethtool_channels *ch)
>> +{
>> +	int ret;
>> +
>> +	ret = cpsw_update_channels_res(priv, ch->rx_count, 1);
>> +	if (ret)
>> +		return ret;
>> +
>> +	ret = cpsw_update_channels_res(priv, ch->tx_count, 0);
>> +	if (ret)
>> +		return ret;
>> +
>> +	return 0;
>> +}
>> +
>> +static int cpsw_set_channels(struct net_device *ndev,
>> +			     struct ethtool_channels *chs)
>> +{
>> +	struct cpsw_priv *priv = netdev_priv(ndev);
>> +	struct cpsw_common *cpsw = priv->cpsw;
>> +	struct cpsw_slave *slave;
>> +	int i, ret;
>> +
>> +	ret = cpsw_check_ch_settings(cpsw, chs);
>> +	if (ret < 0)
>> +		return ret;
>> +
>> +	cpsw_intr_disable(cpsw);
>> +
>> +	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
>> +		if (!(slave->ndev && netif_running(slave->ndev)))
>> +			continue;
>> +
>> +		netif_tx_stop_all_queues(slave->ndev);
>> +		netif_dormant_on(slave->ndev);
>> +	}
>> +
>> +	cpdma_ctlr_stop(cpsw->dma);
>> +	ret = cpsw_update_channels(priv, chs);
>> +	if (ret)
>> +		goto err;
>> +
>> +	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
>> +		if (!(slave->ndev && netif_running(slave->ndev)))
>> +			continue;
>> +
>> +		/* inform stack about new count of queues */
>> +		ret = netif_set_real_num_tx_queues(slave->ndev,
>> +						   cpsw->tx_ch_num);
>> +		if (ret) {
>> +			dev_err(priv->dev, "cannot set real number of tx queues\n");
>> +			goto err;
>> +		}
>> +
>> +		ret = netif_set_real_num_rx_queues(slave->ndev,
>> +						   cpsw->rx_ch_num);
>> +		if (ret) {
>> +			dev_err(priv->dev, "cannot set real number of rx queues\n");
>> +			goto err;
>> +		}
>> +
>> +		netif_dormant_off(slave->ndev);
>> +	}
>> +
>> +	if (cpsw_common_res_usage_state(cpsw)) {
>> +		if (cpsw_fill_rx_channels(priv))
>> +			goto err;
>> +
>> +		cpdma_ctlr_start(cpsw->dma);
>> +		cpsw_intr_enable(cpsw);
>> +	}
>> +
>> +	for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
>> +		if (!(slave->ndev && netif_running(slave->ndev)))
>> +			continue;
>> +		netif_tx_start_all_queues(slave->ndev);
>> +	}
>> +
>> +	return 0;
>> +err:
>> +	dev_err(priv->dev, "cannot update channels number, closing device\n");
>> +	dev_close(ndev);
>> +	return ret;
>> +}
>> +
>>  static const struct ethtool_ops cpsw_ethtool_ops = {
>>  	.get_drvinfo	= cpsw_get_drvinfo,
>>  	.get_msglevel	= cpsw_get_msglevel,
>> @@ -2081,6 +2251,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
>>  	.get_regs	= cpsw_get_regs,
>>  	.begin		= cpsw_ethtool_op_begin,
>>  	.complete	= cpsw_ethtool_op_complete,
>> +	.get_channels	= cpsw_get_channels,
>> +	.set_channels	= cpsw_set_channels,
>>  };
>>
>>  static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_common *cpsw,
>>
>

^ permalink raw reply	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2016-08-17  7:23 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-08-15 23:25 [PATCH v3 0/5] net: ethernet: ti: cpsw: add cpdma multi-queue support Ivan Khoronzhuk
2016-08-15 23:25 ` [PATCH v3 1/5] net: ethernet: ti: davinci_cpdma: split descs num between all channels Ivan Khoronzhuk
2016-08-15 23:25 ` [PATCH v3 2/5] net: ethernet: ti: davinci_cpdma: fix locking while ctrl_stop Ivan Khoronzhuk
2016-08-17  5:42   ` Mugunthan V N
2016-08-17  5:42     ` Mugunthan V N
2016-08-15 23:25 ` [PATCH v3 3/5] net: ethernet: ti: cpsw: add multi queue support Ivan Khoronzhuk
2016-08-15 23:25 ` [PATCH v3 4/5] net: ethernet: ti: davinci_cpdma: move cpdma channel struct macroses to internals Ivan Khoronzhuk
2016-08-17  6:23   ` Mugunthan V N
2016-08-17  6:23     ` Mugunthan V N
2016-08-15 23:25 ` [PATCH v3 5/5] net: ethernet: ti: cpsw: add ethtool channels support Ivan Khoronzhuk
2016-08-17  6:22   ` Mugunthan V N
2016-08-17  6:22     ` Mugunthan V N
2016-08-17  7:22     ` ivan.khoronzhuk

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.