All of lore.kernel.org
 help / color / mirror / Atom feed
* [net-next 00/13] DPAA Ethernet changes
@ 2019-10-31 11:36 Madalin Bucur
  2019-10-31 11:36 ` [net-next 01/13] dpaa_eth: use only one buffer pool per interface Madalin Bucur
                   ` (12 more replies)
  0 siblings, 13 replies; 16+ messages in thread
From: Madalin Bucur @ 2019-10-31 11:36 UTC (permalink / raw)
  To: davem, netdev; +Cc: roy.pledge, jakub.kicinski, Madalin Bucur

Here are some more changes for the DPAA 1.x area.
In summary, these changes use pages for the receive buffers and
for the scatter-gather table fed to the HW on the Tx path, perform
a bit of cleanup in some convoluted parts of the code, add some
minor fixes related to DMA (un)mapping sequencing for a not so
common scenario, add a device link that removes the interfaces
when the QMan portal in use by them is removed.


Madalin Bucur (13):
  dpaa_eth: use only one buffer pool per interface
  dpaa_eth: use page backed rx buffers
  dpaa_eth: perform DMA unmapping before read
  dpaa_eth: avoid timestamp read on error paths
  dpaa_eth: simplify variables used in dpaa_cleanup_tx_fd()
  dpaa_eth: use fd information in dpaa_cleanup_tx_fd()
  dpaa_eth: cleanup skb_to_contig_fd()
  dpaa_eth: use a page to store the SGT
  dpaa_eth: add dropped frames to percpu ethtool stats
  dpaa_eth: remove netdev_err() for user errors
  dpaa_eth: extend delays in ndo_stop
  soc: fsl: qbman: allow registering a device link for the portal user
  dpaa_eth: register a device link for the qman portal used

 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c     | 274 ++++++++++-----------
 drivers/net/ethernet/freescale/dpaa/dpaa_eth.h     |   4 +-
 .../net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c   |   6 +-
 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c |  56 ++---
 drivers/soc/fsl/qbman/qman.c                       |   7 +
 include/soc/fsl/qman.h                             |  11 +
 6 files changed, 173 insertions(+), 185 deletions(-)

-- 
2.1.0


^ permalink raw reply	[flat|nested] 16+ messages in thread

* [net-next 01/13] dpaa_eth: use only one buffer pool per interface
  2019-10-31 11:36 [net-next 00/13] DPAA Ethernet changes Madalin Bucur
@ 2019-10-31 11:36 ` Madalin Bucur
  2019-10-31 11:36 ` [net-next 02/13] dpaa_eth: use page backed rx buffers Madalin Bucur
                   ` (11 subsequent siblings)
  12 siblings, 0 replies; 16+ messages in thread
From: Madalin Bucur @ 2019-10-31 11:36 UTC (permalink / raw)
  To: davem, netdev; +Cc: roy.pledge, jakub.kicinski, Madalin Bucur

Currently the DPAA Ethernet driver is using three buffer pools
for each interface, with three different sizes for the buffers
provided for the FMan reception path. This patch reduces the
number of buffer pools to one per interface. This change is in
preparation of another, that will be switching from netdev_frags
to page backed buffers for the receive path.

Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
---
 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c     | 95 ++++++++--------------
 drivers/net/ethernet/freescale/dpaa/dpaa_eth.h     |  4 +-
 .../net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c   |  6 +-
 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c | 44 +++++-----
 4 files changed, 57 insertions(+), 92 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index d3214541c7c5..60d63c1be9c6 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -178,23 +178,7 @@ struct fm_port_fqs {
 /* All the dpa bps in use at any moment */
 static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
 
-/* The raw buffer size must be cacheline aligned */
 #define DPAA_BP_RAW_SIZE 4096
-/* When using more than one buffer pool, the raw sizes are as follows:
- * 1 bp: 4KB
- * 2 bp: 2KB, 4KB
- * 3 bp: 1KB, 2KB, 4KB
- * 4 bp: 1KB, 2KB, 4KB, 8KB
- */
-static inline size_t bpool_buffer_raw_size(u8 index, u8 cnt)
-{
-	size_t res = DPAA_BP_RAW_SIZE / 4;
-	u8 i;
-
-	for (i = (cnt < 3) ? cnt : 3; i < 3 + index; i++)
-		res *= 2;
-	return res;
-}
 
 /* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
  * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
@@ -596,10 +580,7 @@ static void dpaa_bp_free(struct dpaa_bp *dpaa_bp)
 
 static void dpaa_bps_free(struct dpaa_priv *priv)
 {
-	int i;
-
-	for (i = 0; i < DPAA_BPS_NUM; i++)
-		dpaa_bp_free(priv->dpaa_bps[i]);
+	dpaa_bp_free(priv->dpaa_bp);
 }
 
 /* Use multiple WQs for FQ assignment:
@@ -1197,15 +1178,15 @@ static int dpaa_eth_init_tx_port(struct fman_port *port, struct dpaa_fq *errq,
 	return err;
 }
 
-static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
-				 size_t count, struct dpaa_fq *errq,
+static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp *bp,
+				 struct dpaa_fq *errq,
 				 struct dpaa_fq *defq, struct dpaa_fq *pcdq,
 				 struct dpaa_buffer_layout *buf_layout)
 {
 	struct fman_buffer_prefix_content buf_prefix_content;
 	struct fman_port_rx_params *rx_p;
 	struct fman_port_params params;
-	int i, err;
+	int err;
 
 	memset(&params, 0, sizeof(params));
 	memset(&buf_prefix_content, 0, sizeof(buf_prefix_content));
@@ -1224,12 +1205,9 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
 		rx_p->pcd_fqs_count = DPAA_ETH_PCD_RXQ_NUM;
 	}
 
-	count = min(ARRAY_SIZE(rx_p->ext_buf_pools.ext_buf_pool), count);
-	rx_p->ext_buf_pools.num_of_pools_used = (u8)count;
-	for (i = 0; i < count; i++) {
-		rx_p->ext_buf_pools.ext_buf_pool[i].id =  bps[i]->bpid;
-		rx_p->ext_buf_pools.ext_buf_pool[i].size = (u16)bps[i]->size;
-	}
+	rx_p->ext_buf_pools.num_of_pools_used = 1;
+	rx_p->ext_buf_pools.ext_buf_pool[0].id =  bp->bpid;
+	rx_p->ext_buf_pools.ext_buf_pool[0].size = (u16)bp->size;
 
 	err = fman_port_config(port, &params);
 	if (err) {
@@ -1252,7 +1230,7 @@ static int dpaa_eth_init_rx_port(struct fman_port *port, struct dpaa_bp **bps,
 }
 
 static int dpaa_eth_init_ports(struct mac_device *mac_dev,
-			       struct dpaa_bp **bps, size_t count,
+			       struct dpaa_bp *bp,
 			       struct fm_port_fqs *port_fqs,
 			       struct dpaa_buffer_layout *buf_layout,
 			       struct device *dev)
@@ -1266,7 +1244,7 @@ static int dpaa_eth_init_ports(struct mac_device *mac_dev,
 	if (err)
 		return err;
 
-	err = dpaa_eth_init_rx_port(rxport, bps, count, port_fqs->rx_errq,
+	err = dpaa_eth_init_rx_port(rxport, bp, port_fqs->rx_errq,
 				    port_fqs->rx_defq, port_fqs->rx_pcdq,
 				    &buf_layout[RX]);
 
@@ -1583,17 +1561,16 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
 {
 	struct dpaa_bp *dpaa_bp;
 	int *countptr;
-	int res, i;
+	int res;
+
+	dpaa_bp = priv->dpaa_bp;
+	if (!dpaa_bp)
+		return -EINVAL;
+	countptr = this_cpu_ptr(dpaa_bp->percpu_count);
+	res  = dpaa_eth_refill_bpool(dpaa_bp, countptr);
+	if (res)
+		return res;
 
-	for (i = 0; i < DPAA_BPS_NUM; i++) {
-		dpaa_bp = priv->dpaa_bps[i];
-		if (!dpaa_bp)
-			return -EINVAL;
-		countptr = this_cpu_ptr(dpaa_bp->percpu_count);
-		res  = dpaa_eth_refill_bpool(dpaa_bp, countptr);
-		if (res)
-			return res;
-	}
 	return 0;
 }
 
@@ -2761,13 +2738,13 @@ static inline u16 dpaa_get_headroom(struct dpaa_buffer_layout *bl)
 
 static int dpaa_eth_probe(struct platform_device *pdev)
 {
-	struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM] = {NULL};
+	struct dpaa_bp *dpaa_bp = NULL;
 	struct net_device *net_dev = NULL;
 	struct dpaa_fq *dpaa_fq, *tmp;
 	struct dpaa_priv *priv = NULL;
 	struct fm_port_fqs port_fqs;
 	struct mac_device *mac_dev;
-	int err = 0, i, channel;
+	int err = 0, channel;
 	struct device *dev;
 
 	dev = &pdev->dev;
@@ -2856,23 +2833,21 @@ static int dpaa_eth_probe(struct platform_device *pdev)
 	priv->buf_layout[TX].priv_data_size = DPAA_TX_PRIV_DATA_SIZE; /* Tx */
 
 	/* bp init */
-	for (i = 0; i < DPAA_BPS_NUM; i++) {
-		dpaa_bps[i] = dpaa_bp_alloc(dev);
-		if (IS_ERR(dpaa_bps[i])) {
-			err = PTR_ERR(dpaa_bps[i]);
-			goto free_dpaa_bps;
-		}
-		/* the raw size of the buffers used for reception */
-		dpaa_bps[i]->raw_size = bpool_buffer_raw_size(i, DPAA_BPS_NUM);
-		/* avoid runtime computations by keeping the usable size here */
-		dpaa_bps[i]->size = dpaa_bp_size(dpaa_bps[i]->raw_size);
-		dpaa_bps[i]->priv = priv;
-
-		err = dpaa_bp_alloc_pool(dpaa_bps[i]);
-		if (err < 0)
-			goto free_dpaa_bps;
-		priv->dpaa_bps[i] = dpaa_bps[i];
+	dpaa_bp = dpaa_bp_alloc(dev);
+	if (IS_ERR(dpaa_bp)) {
+		err = PTR_ERR(dpaa_bp);
+		goto free_dpaa_bps;
 	}
+	/* the raw size of the buffers used for reception */
+	dpaa_bp->raw_size = DPAA_BP_RAW_SIZE;
+	/* avoid runtime computations by keeping the usable size here */
+	dpaa_bp->size = dpaa_bp_size(dpaa_bp->raw_size);
+	dpaa_bp->priv = priv;
+
+	err = dpaa_bp_alloc_pool(dpaa_bp);
+	if (err < 0)
+		goto free_dpaa_bps;
+	priv->dpaa_bp = dpaa_bp;
 
 	INIT_LIST_HEAD(&priv->dpaa_fq_list);
 
@@ -2930,7 +2905,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
 	priv->rx_headroom = dpaa_get_headroom(&priv->buf_layout[RX]);
 
 	/* All real interfaces need their ports initialized */
-	err = dpaa_eth_init_ports(mac_dev, dpaa_bps, DPAA_BPS_NUM, &port_fqs,
+	err = dpaa_eth_init_ports(mac_dev, dpaa_bp, &port_fqs,
 				  &priv->buf_layout[0], dev);
 	if (err)
 		goto free_dpaa_fqs;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
index 1bdfead1d334..fc2cc4c48e06 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.h
@@ -47,8 +47,6 @@
 /* Total number of Tx queues */
 #define DPAA_ETH_TXQ_NUM	(DPAA_TC_NUM * DPAA_TC_TXQ_NUM)
 
-#define DPAA_BPS_NUM 3 /* number of bpools per interface */
-
 /* More detailed FQ types - used for fine-grained WQ assignments */
 enum dpaa_fq_type {
 	FQ_TYPE_RX_DEFAULT = 1, /* Rx Default FQs */
@@ -148,7 +146,7 @@ struct dpaa_buffer_layout {
 
 struct dpaa_priv {
 	struct dpaa_percpu_priv __percpu *percpu_priv;
-	struct dpaa_bp *dpaa_bps[DPAA_BPS_NUM];
+	struct dpaa_bp *dpaa_bp;
 	/* Store here the needed Tx headroom for convenience and speed
 	 * (even though it can be computed based on the fields of buf_layout)
 	 */
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
index 0d9b185e317f..ee62d25cac81 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c
@@ -131,11 +131,9 @@ static ssize_t dpaa_eth_show_bpids(struct device *dev,
 {
 	struct dpaa_priv *priv = netdev_priv(to_net_dev(dev));
 	ssize_t bytes = 0;
-	int i = 0;
 
-	for (i = 0; i < DPAA_BPS_NUM; i++)
-		bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n",
-				  priv->dpaa_bps[i]->bpid);
+	bytes += snprintf(buf + bytes, PAGE_SIZE - bytes, "%u\n",
+				  priv->dpaa_bp->bpid);
 
 	return bytes;
 }
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 7ce2e99b594d..bc6ed1df53ca 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -223,7 +223,7 @@ static int dpaa_get_sset_count(struct net_device *net_dev, int type)
 	unsigned int total_stats, num_stats;
 
 	num_stats   = num_online_cpus() + 1;
-	total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM) +
+	total_stats = num_stats * (DPAA_STATS_PERCPU_LEN + 1) +
 			DPAA_STATS_GLOBAL_LEN;
 
 	switch (type) {
@@ -235,10 +235,10 @@ static int dpaa_get_sset_count(struct net_device *net_dev, int type)
 }
 
 static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
-		       int crr_cpu, u64 *bp_count, u64 *data)
+		       int crr_cpu, u64 bp_count, u64 *data)
 {
 	int num_values = num_cpus + 1;
-	int crr = 0, j;
+	int crr = 0;
 
 	/* update current CPU's stats and also add them to the total values */
 	data[crr * num_values + crr_cpu] = percpu_priv->in_interrupt;
@@ -262,23 +262,21 @@ static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
 	data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
 	data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
 
-	for (j = 0; j < DPAA_BPS_NUM; j++) {
-		data[crr * num_values + crr_cpu] = bp_count[j];
-		data[crr++ * num_values + num_cpus] += bp_count[j];
-	}
+	data[crr * num_values + crr_cpu] = bp_count;
+	data[crr++ * num_values + num_cpus] += bp_count;
 }
 
 static void dpaa_get_ethtool_stats(struct net_device *net_dev,
 				   struct ethtool_stats *stats, u64 *data)
 {
-	u64 bp_count[DPAA_BPS_NUM], cg_time, cg_num;
 	struct dpaa_percpu_priv *percpu_priv;
 	struct dpaa_rx_errors rx_errors;
 	unsigned int num_cpus, offset;
+	u64 bp_count, cg_time, cg_num;
 	struct dpaa_ern_cnt ern_cnt;
 	struct dpaa_bp *dpaa_bp;
 	struct dpaa_priv *priv;
-	int total_stats, i, j;
+	int total_stats, i;
 	bool cg_status;
 
 	total_stats = dpaa_get_sset_count(net_dev, ETH_SS_STATS);
@@ -292,12 +290,10 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev,
 
 	for_each_online_cpu(i) {
 		percpu_priv = per_cpu_ptr(priv->percpu_priv, i);
-		for (j = 0; j < DPAA_BPS_NUM; j++) {
-			dpaa_bp = priv->dpaa_bps[j];
-			if (!dpaa_bp->percpu_count)
-				continue;
-			bp_count[j] = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
-		}
+		dpaa_bp = priv->dpaa_bp;
+		if (!dpaa_bp->percpu_count)
+			continue;
+		bp_count = *(per_cpu_ptr(dpaa_bp->percpu_count, i));
 		rx_errors.dme += percpu_priv->rx_errors.dme;
 		rx_errors.fpe += percpu_priv->rx_errors.fpe;
 		rx_errors.fse += percpu_priv->rx_errors.fse;
@@ -315,7 +311,7 @@ static void dpaa_get_ethtool_stats(struct net_device *net_dev,
 		copy_stats(percpu_priv, num_cpus, i, bp_count, data);
 	}
 
-	offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + DPAA_BPS_NUM);
+	offset = (num_cpus + 1) * (DPAA_STATS_PERCPU_LEN + 1);
 	memcpy(data + offset, &rx_errors, sizeof(struct dpaa_rx_errors));
 
 	offset += sizeof(struct dpaa_rx_errors) / sizeof(u64);
@@ -363,18 +359,16 @@ static void dpaa_get_strings(struct net_device *net_dev, u32 stringset,
 		memcpy(strings, string_cpu, ETH_GSTRING_LEN);
 		strings += ETH_GSTRING_LEN;
 	}
-	for (i = 0; i < DPAA_BPS_NUM; i++) {
-		for (j = 0; j < num_cpus; j++) {
-			snprintf(string_cpu, ETH_GSTRING_LEN,
-				 "bpool %c [CPU %d]", 'a' + i, j);
-			memcpy(strings, string_cpu, ETH_GSTRING_LEN);
-			strings += ETH_GSTRING_LEN;
-		}
-		snprintf(string_cpu, ETH_GSTRING_LEN, "bpool %c [TOTAL]",
-			 'a' + i);
+	for (j = 0; j < num_cpus; j++) {
+		snprintf(string_cpu, ETH_GSTRING_LEN,
+			 "bpool [CPU %d]", j);
 		memcpy(strings, string_cpu, ETH_GSTRING_LEN);
 		strings += ETH_GSTRING_LEN;
 	}
+	snprintf(string_cpu, ETH_GSTRING_LEN, "bpool [TOTAL]");
+	memcpy(strings, string_cpu, ETH_GSTRING_LEN);
+	strings += ETH_GSTRING_LEN;
+
 	memcpy(strings, dpaa_stats_global, size);
 }
 
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [net-next 02/13] dpaa_eth: use page backed rx buffers
  2019-10-31 11:36 [net-next 00/13] DPAA Ethernet changes Madalin Bucur
  2019-10-31 11:36 ` [net-next 01/13] dpaa_eth: use only one buffer pool per interface Madalin Bucur
@ 2019-10-31 11:36 ` Madalin Bucur
  2019-10-31 11:36 ` [net-next 03/13] dpaa_eth: perform DMA unmapping before read Madalin Bucur
                   ` (10 subsequent siblings)
  12 siblings, 0 replies; 16+ messages in thread
From: Madalin Bucur @ 2019-10-31 11:36 UTC (permalink / raw)
  To: davem, netdev; +Cc: roy.pledge, jakub.kicinski, Madalin Bucur

Change the buffers used for reception from netdev_frags to pages.

Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
---
 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 51 +++++++++++---------------
 1 file changed, 22 insertions(+), 29 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 60d63c1be9c6..388d3ccb5fdb 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -180,13 +180,7 @@ static struct dpaa_bp *dpaa_bp_array[BM_MAX_NUM_OF_POOLS];
 
 #define DPAA_BP_RAW_SIZE 4096
 
-/* FMan-DMA requires 16-byte alignment for Rx buffers, but SKB_DATA_ALIGN is
- * even stronger (SMP_CACHE_BYTES-aligned), so we just get away with that,
- * via SKB_WITH_OVERHEAD(). We can't rely on netdev_alloc_frag() giving us
- * half-page-aligned buffers, so we reserve some more space for start-of-buffer
- * alignment.
- */
-#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD((raw_size) - SMP_CACHE_BYTES)
+#define dpaa_bp_size(raw_size) SKB_WITH_OVERHEAD(raw_size)
 
 static int dpaa_max_frm;
 
@@ -1313,13 +1307,14 @@ static void dpaa_fd_release(const struct net_device *net_dev,
 		vaddr = phys_to_virt(qm_fd_addr(fd));
 		sgt = vaddr + qm_fd_get_offset(fd);
 
-		dma_unmap_single(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
-				 dpaa_bp->size, DMA_FROM_DEVICE);
+		dma_unmap_page(dpaa_bp->priv->rx_dma_dev, qm_fd_addr(fd),
+			       DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
 
 		dpaa_release_sgt_members(sgt);
 
-		addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, vaddr,
-				      dpaa_bp->size, DMA_FROM_DEVICE);
+		addr = dma_map_page(dpaa_bp->priv->rx_dma_dev,
+				    virt_to_page(vaddr), 0, DPAA_BP_RAW_SIZE,
+				    DMA_FROM_DEVICE);
 		if (dma_mapping_error(dpaa_bp->priv->rx_dma_dev, addr)) {
 			netdev_err(net_dev, "DMA mapping failed\n");
 			return;
@@ -1469,21 +1464,18 @@ static int dpaa_bp_add_8_bufs(const struct dpaa_bp *dpaa_bp)
 	struct net_device *net_dev = dpaa_bp->priv->net_dev;
 	struct bm_buffer bmb[8];
 	dma_addr_t addr;
-	void *new_buf;
+	struct page *p;
 	u8 i;
 
 	for (i = 0; i < 8; i++) {
-		new_buf = netdev_alloc_frag(dpaa_bp->raw_size);
-		if (unlikely(!new_buf)) {
-			netdev_err(net_dev,
-				   "netdev_alloc_frag() failed, size %zu\n",
-				   dpaa_bp->raw_size);
+		p = dev_alloc_pages(0);
+		if (unlikely(!p)) {
+			netdev_err(net_dev, "dev_alloc_pages() failed\n");
 			goto release_previous_buffs;
 		}
-		new_buf = PTR_ALIGN(new_buf, SMP_CACHE_BYTES);
 
-		addr = dma_map_single(dpaa_bp->priv->rx_dma_dev, new_buf,
-				      dpaa_bp->size, DMA_FROM_DEVICE);
+		addr = dma_map_page(dpaa_bp->priv->rx_dma_dev, p, 0,
+				    DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
 		if (unlikely(dma_mapping_error(dpaa_bp->priv->rx_dma_dev,
 					       addr))) {
 			netdev_err(net_dev, "DMA map failed\n");
@@ -1694,7 +1686,7 @@ static struct sk_buff *contig_fd_to_skb(const struct dpaa_priv *priv,
 	return skb;
 
 free_buffer:
-	skb_free_frag(vaddr);
+	free_pages((unsigned long)vaddr, 0);
 	return NULL;
 }
 
@@ -1741,8 +1733,8 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
 			goto free_buffers;
 
 		count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
-		dma_unmap_single(dpaa_bp->priv->rx_dma_dev, sg_addr,
-				 dpaa_bp->size, DMA_FROM_DEVICE);
+		dma_unmap_page(priv->rx_dma_dev, sg_addr,
+			       DPAA_BP_RAW_SIZE, DMA_FROM_DEVICE);
 		if (!skb) {
 			sz = dpaa_bp->size +
 				SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
@@ -1794,7 +1786,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
 	WARN_ONCE(i == DPAA_SGT_MAX_ENTRIES, "No final bit on SGT\n");
 
 	/* free the SG table buffer */
-	skb_free_frag(vaddr);
+	free_pages((unsigned long)vaddr, 0);
 
 	return skb;
 
@@ -1811,7 +1803,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
 	for (i = 0; i < DPAA_SGT_MAX_ENTRIES ; i++) {
 		sg_addr = qm_sg_addr(&sgt[i]);
 		sg_vaddr = phys_to_virt(sg_addr);
-		skb_free_frag(sg_vaddr);
+		free_pages((unsigned long)sg_vaddr, 0);
 		dpaa_bp = dpaa_bpid2pool(sgt[i].bpid);
 		if (dpaa_bp) {
 			count_ptr = this_cpu_ptr(dpaa_bp->percpu_count);
@@ -1822,7 +1814,7 @@ static struct sk_buff *sg_fd_to_skb(const struct dpaa_priv *priv,
 			break;
 	}
 	/* free the SGT fragment */
-	skb_free_frag(vaddr);
+	free_pages((unsigned long)vaddr, 0);
 
 	return NULL;
 }
@@ -2281,8 +2273,8 @@ static enum qman_cb_dqrr_result rx_default_dqrr(struct qman_portal *portal,
 		return qman_cb_dqrr_consume;
 	}
 
-	dma_unmap_single(dpaa_bp->priv->rx_dma_dev, addr, dpaa_bp->size,
-			 DMA_FROM_DEVICE);
+	dma_unmap_page(dpaa_bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
+		       DMA_FROM_DEVICE);
 
 	/* prefetch the first 64 bytes of the frame or the SGT start */
 	vaddr = phys_to_virt(addr);
@@ -2637,7 +2629,8 @@ static inline void dpaa_bp_free_pf(const struct dpaa_bp *bp,
 {
 	dma_addr_t addr = bm_buf_addr(bmb);
 
-	dma_unmap_single(bp->priv->rx_dma_dev, addr, bp->size, DMA_FROM_DEVICE);
+	dma_unmap_page(bp->priv->rx_dma_dev, addr, DPAA_BP_RAW_SIZE,
+		       DMA_FROM_DEVICE);
 
 	skb_free_frag(phys_to_virt(addr));
 }
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [net-next 03/13] dpaa_eth: perform DMA unmapping before read
  2019-10-31 11:36 [net-next 00/13] DPAA Ethernet changes Madalin Bucur
  2019-10-31 11:36 ` [net-next 01/13] dpaa_eth: use only one buffer pool per interface Madalin Bucur
  2019-10-31 11:36 ` [net-next 02/13] dpaa_eth: use page backed rx buffers Madalin Bucur
@ 2019-10-31 11:36 ` Madalin Bucur
  2019-10-31 11:36 ` [net-next 04/13] dpaa_eth: avoid timestamp read on error paths Madalin Bucur
                   ` (9 subsequent siblings)
  12 siblings, 0 replies; 16+ messages in thread
From: Madalin Bucur @ 2019-10-31 11:36 UTC (permalink / raw)
  To: davem, netdev; +Cc: roy.pledge, jakub.kicinski, Madalin Bucur

DMA unmapping is required before accessing the HW provided timestamping
information.

Fixes: 4664856e9ca2 ("dpaa_eth: add support for hardware timestamping")
Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
---
 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 32 ++++++++++++++------------
 1 file changed, 17 insertions(+), 15 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 388d3ccb5fdb..efb4753f0723 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1591,18 +1591,6 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 	skbh = (struct sk_buff **)phys_to_virt(addr);
 	skb = *skbh;
 
-	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
-		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-
-		if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
-					  &ns)) {
-			shhwtstamps.hwtstamp = ns_to_ktime(ns);
-			skb_tstamp_tx(skb, &shhwtstamps);
-		} else {
-			dev_warn(dev, "fman_port_get_tstamp failed!\n");
-		}
-	}
-
 	if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
 		nr_frags = skb_shinfo(skb)->nr_frags;
 		dma_unmap_single(priv->tx_dma_dev, addr,
@@ -1625,14 +1613,28 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 			dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
 				       qm_sg_entry_get_len(&sgt[i]), dma_dir);
 		}
-
-		/* Free the page frag that we allocated on Tx */
-		skb_free_frag(phys_to_virt(addr));
 	} else {
 		dma_unmap_single(priv->tx_dma_dev, addr,
 				 skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
 	}
 
+	/* DMA unmapping is required before accessing the HW provided info */
+	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
+
+		if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
+					  &ns)) {
+			shhwtstamps.hwtstamp = ns_to_ktime(ns);
+			skb_tstamp_tx(skb, &shhwtstamps);
+		} else {
+			dev_warn(dev, "fman_port_get_tstamp failed!\n");
+		}
+	}
+
+	if (qm_fd_get_format(fd) == qm_fd_sg)
+		/* Free the page frag that we allocated on Tx */
+		skb_free_frag(phys_to_virt(addr));
+
 	return skb;
 }
 
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [net-next 04/13] dpaa_eth: avoid timestamp read on error paths
  2019-10-31 11:36 [net-next 00/13] DPAA Ethernet changes Madalin Bucur
                   ` (2 preceding siblings ...)
  2019-10-31 11:36 ` [net-next 03/13] dpaa_eth: perform DMA unmapping before read Madalin Bucur
@ 2019-10-31 11:36 ` Madalin Bucur
  2019-10-31 11:36 ` [net-next 05/13] dpaa_eth: simplify variables used in dpaa_cleanup_tx_fd() Madalin Bucur
                   ` (8 subsequent siblings)
  12 siblings, 0 replies; 16+ messages in thread
From: Madalin Bucur @ 2019-10-31 11:36 UTC (permalink / raw)
  To: davem, netdev; +Cc: roy.pledge, jakub.kicinski, Madalin Bucur

The dpaa_cleanup_tx_fd() function is called by the frame transmit
confirmation callback but also on several error paths. This function
is reading the transmit timestamp value. Avoid reading an invalid
timestamp value on the error paths.

Fixes: 4664856e9ca2 ("dpaa_eth: add support for hardware timestamping")
Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
---
 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 17 ++++++++++-------
 1 file changed, 10 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index efb4753f0723..75ade6a5599a 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1571,13 +1571,15 @@ static int dpaa_eth_refill_bpools(struct dpaa_priv *priv)
  * Skb freeing is not handled here.
  *
  * This function may be called on error paths in the Tx function, so guard
- * against cases when not all fd relevant fields were filled in.
+ * against cases when not all fd relevant fields were filled in. To avoid
+ * reading the invalid transmission timestamp for the error paths set ts to
+ * false.
  *
  * Return the skb backpointer, since for S/G frames the buffer containing it
  * gets freed here.
  */
 static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
-					  const struct qm_fd *fd)
+					  const struct qm_fd *fd, bool ts)
 {
 	const enum dma_data_direction dma_dir = DMA_TO_DEVICE;
 	struct device *dev = priv->net_dev->dev.parent;
@@ -1619,7 +1621,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 	}
 
 	/* DMA unmapping is required before accessing the HW provided info */
-	if (priv->tx_tstamp && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
+	if (ts && priv->tx_tstamp &&
+	    skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 
 		if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
@@ -2085,7 +2088,7 @@ dpaa_start_xmit(struct sk_buff *skb, struct net_device *net_dev)
 	if (likely(dpaa_xmit(priv, percpu_stats, queue_mapping, &fd) == 0))
 		return NETDEV_TX_OK;
 
-	dpaa_cleanup_tx_fd(priv, &fd);
+	dpaa_cleanup_tx_fd(priv, &fd, false);
 skb_to_fd_failed:
 enomem:
 	percpu_stats->tx_errors++;
@@ -2131,7 +2134,7 @@ static void dpaa_tx_error(struct net_device *net_dev,
 
 	percpu_priv->stats.tx_errors++;
 
-	skb = dpaa_cleanup_tx_fd(priv, fd);
+	skb = dpaa_cleanup_tx_fd(priv, fd, false);
 	dev_kfree_skb(skb);
 }
 
@@ -2171,7 +2174,7 @@ static void dpaa_tx_conf(struct net_device *net_dev,
 
 	percpu_priv->tx_confirm++;
 
-	skb = dpaa_cleanup_tx_fd(priv, fd);
+	skb = dpaa_cleanup_tx_fd(priv, fd, true);
 
 	consume_skb(skb);
 }
@@ -2398,7 +2401,7 @@ static void egress_ern(struct qman_portal *portal,
 	percpu_priv->stats.tx_fifo_errors++;
 	count_ern(percpu_priv, msg);
 
-	skb = dpaa_cleanup_tx_fd(priv, fd);
+	skb = dpaa_cleanup_tx_fd(priv, fd, false);
 	dev_kfree_skb_any(skb);
 }
 
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [net-next 05/13] dpaa_eth: simplify variables used in dpaa_cleanup_tx_fd()
  2019-10-31 11:36 [net-next 00/13] DPAA Ethernet changes Madalin Bucur
                   ` (3 preceding siblings ...)
  2019-10-31 11:36 ` [net-next 04/13] dpaa_eth: avoid timestamp read on error paths Madalin Bucur
@ 2019-10-31 11:36 ` Madalin Bucur
  2019-10-31 11:36 ` [net-next 06/13] dpaa_eth: use fd information " Madalin Bucur
                   ` (7 subsequent siblings)
  12 siblings, 0 replies; 16+ messages in thread
From: Madalin Bucur @ 2019-10-31 11:36 UTC (permalink / raw)
  To: davem, netdev; +Cc: roy.pledge, jakub.kicinski, Madalin Bucur

Avoid casts and repeated conversions.

Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
---
 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 75ade6a5599a..bde125a97f51 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1585,13 +1585,13 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 	struct device *dev = priv->net_dev->dev.parent;
 	struct skb_shared_hwtstamps shhwtstamps;
 	dma_addr_t addr = qm_fd_addr(fd);
+	void *vaddr = phys_to_virt(addr);
 	const struct qm_sg_entry *sgt;
-	struct sk_buff **skbh, *skb;
+	struct sk_buff *skb;
 	int nr_frags, i;
 	u64 ns;
 
-	skbh = (struct sk_buff **)phys_to_virt(addr);
-	skb = *skbh;
+	skb = *(struct sk_buff **)vaddr;
 
 	if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
 		nr_frags = skb_shinfo(skb)->nr_frags;
@@ -1602,7 +1602,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 		/* The sgt buffer has been allocated with netdev_alloc_frag(),
 		 * it's from lowmem.
 		 */
-		sgt = phys_to_virt(addr + qm_fd_get_offset(fd));
+		sgt = vaddr + qm_fd_get_offset(fd);
 
 		/* sgt[0] is from lowmem, was dma_map_single()-ed */
 		dma_unmap_single(priv->tx_dma_dev, qm_sg_addr(&sgt[0]),
@@ -1617,7 +1617,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 		}
 	} else {
 		dma_unmap_single(priv->tx_dma_dev, addr,
-				 skb_tail_pointer(skb) - (u8 *)skbh, dma_dir);
+				 skb_tail_pointer(skb) - (u8 *)vaddr, dma_dir);
 	}
 
 	/* DMA unmapping is required before accessing the HW provided info */
@@ -1625,7 +1625,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 	    skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
 		memset(&shhwtstamps, 0, sizeof(shhwtstamps));
 
-		if (!fman_port_get_tstamp(priv->mac_dev->port[TX], (void *)skbh,
+		if (!fman_port_get_tstamp(priv->mac_dev->port[TX], vaddr,
 					  &ns)) {
 			shhwtstamps.hwtstamp = ns_to_ktime(ns);
 			skb_tstamp_tx(skb, &shhwtstamps);
@@ -1636,7 +1636,7 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 
 	if (qm_fd_get_format(fd) == qm_fd_sg)
 		/* Free the page frag that we allocated on Tx */
-		skb_free_frag(phys_to_virt(addr));
+		skb_free_frag(vaddr);
 
 	return skb;
 }
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [net-next 06/13] dpaa_eth: use fd information in dpaa_cleanup_tx_fd()
  2019-10-31 11:36 [net-next 00/13] DPAA Ethernet changes Madalin Bucur
                   ` (4 preceding siblings ...)
  2019-10-31 11:36 ` [net-next 05/13] dpaa_eth: simplify variables used in dpaa_cleanup_tx_fd() Madalin Bucur
@ 2019-10-31 11:36 ` Madalin Bucur
  2019-10-31 11:36 ` [net-next 07/13] dpaa_eth: cleanup skb_to_contig_fd() Madalin Bucur
                   ` (6 subsequent siblings)
  12 siblings, 0 replies; 16+ messages in thread
From: Madalin Bucur @ 2019-10-31 11:36 UTC (permalink / raw)
  To: davem, netdev; +Cc: roy.pledge, jakub.kicinski, Madalin Bucur

Instead of reading skb fields, use information from the DPAA frame
descriptor.

Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
---
 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 13 +++++++------
 1 file changed, 7 insertions(+), 6 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index bde125a97f51..8e36a9a789dd 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1588,13 +1588,10 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 	void *vaddr = phys_to_virt(addr);
 	const struct qm_sg_entry *sgt;
 	struct sk_buff *skb;
-	int nr_frags, i;
 	u64 ns;
-
-	skb = *(struct sk_buff **)vaddr;
+	int i;
 
 	if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
-		nr_frags = skb_shinfo(skb)->nr_frags;
 		dma_unmap_single(priv->tx_dma_dev, addr,
 				 qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
 				 dma_dir);
@@ -1609,7 +1606,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 				 qm_sg_entry_get_len(&sgt[0]), dma_dir);
 
 		/* remaining pages were mapped with skb_frag_dma_map() */
-		for (i = 1; i <= nr_frags; i++) {
+		for (i = 1; (i < DPAA_SGT_MAX_ENTRIES) &&
+		     !qm_sg_entry_is_final(&sgt[i - 1]); i++) {
 			WARN_ON(qm_sg_entry_is_ext(&sgt[i]));
 
 			dma_unmap_page(priv->tx_dma_dev, qm_sg_addr(&sgt[i]),
@@ -1617,9 +1615,12 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 		}
 	} else {
 		dma_unmap_single(priv->tx_dma_dev, addr,
-				 skb_tail_pointer(skb) - (u8 *)vaddr, dma_dir);
+				 priv->tx_headroom + qm_fd_get_length(fd),
+				 dma_dir);
 	}
 
+	skb = *(struct sk_buff **)vaddr;
+
 	/* DMA unmapping is required before accessing the HW provided info */
 	if (ts && priv->tx_tstamp &&
 	    skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) {
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [net-next 07/13] dpaa_eth: cleanup skb_to_contig_fd()
  2019-10-31 11:36 [net-next 00/13] DPAA Ethernet changes Madalin Bucur
                   ` (5 preceding siblings ...)
  2019-10-31 11:36 ` [net-next 06/13] dpaa_eth: use fd information " Madalin Bucur
@ 2019-10-31 11:36 ` Madalin Bucur
  2019-10-31 11:36 ` [net-next 08/13] dpaa_eth: use a page to store the SGT Madalin Bucur
                   ` (5 subsequent siblings)
  12 siblings, 0 replies; 16+ messages in thread
From: Madalin Bucur @ 2019-10-31 11:36 UTC (permalink / raw)
  To: davem, netdev; +Cc: roy.pledge, jakub.kicinski, Madalin Bucur

Remove cast, align variable name, simplify DMA map size computation.

Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
---
 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 14 +++++++-------
 1 file changed, 7 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 8e36a9a789dd..1084bc1b1d34 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1369,7 +1369,7 @@ static void count_ern(struct dpaa_percpu_priv *percpu_priv,
 static int dpaa_enable_tx_csum(struct dpaa_priv *priv,
 			       struct sk_buff *skb,
 			       struct qm_fd *fd,
-			       char *parse_results)
+			       void *parse_results)
 {
 	struct fman_prs_result *parse_result;
 	u16 ethertype = ntohs(skb->protocol);
@@ -1831,7 +1831,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
 {
 	struct net_device *net_dev = priv->net_dev;
 	enum dma_data_direction dma_dir;
-	unsigned char *buffer_start;
+	unsigned char *buff_start;
 	struct sk_buff **skbh;
 	dma_addr_t addr;
 	int err;
@@ -1840,10 +1840,10 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
 	 * available, so just use that for offset.
 	 */
 	fd->bpid = FSL_DPAA_BPID_INV;
-	buffer_start = skb->data - priv->tx_headroom;
+	buff_start = skb->data - priv->tx_headroom;
 	dma_dir = DMA_TO_DEVICE;
 
-	skbh = (struct sk_buff **)buffer_start;
+	skbh = (struct sk_buff **)buff_start;
 	*skbh = skb;
 
 	/* Enable L3/L4 hardware checksum computation.
@@ -1852,7 +1852,7 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
 	 * need to write into the skb.
 	 */
 	err = dpaa_enable_tx_csum(priv, skb, fd,
-				  ((char *)skbh) + DPAA_TX_PRIV_DATA_SIZE);
+				  buff_start + DPAA_TX_PRIV_DATA_SIZE);
 	if (unlikely(err < 0)) {
 		if (net_ratelimit())
 			netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
@@ -1865,8 +1865,8 @@ static int skb_to_contig_fd(struct dpaa_priv *priv,
 	fd->cmd |= cpu_to_be32(FM_FD_CMD_FCO);
 
 	/* Map the entire buffer size that may be seen by FMan, but no more */
-	addr = dma_map_single(priv->tx_dma_dev, skbh,
-			      skb_tail_pointer(skb) - buffer_start, dma_dir);
+	addr = dma_map_single(priv->tx_dma_dev, buff_start,
+			      priv->tx_headroom + skb->len, dma_dir);
 	if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
 		if (net_ratelimit())
 			netif_err(priv, tx_err, net_dev, "dma_map_single() failed\n");
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [net-next 08/13] dpaa_eth: use a page to store the SGT
  2019-10-31 11:36 [net-next 00/13] DPAA Ethernet changes Madalin Bucur
                   ` (6 preceding siblings ...)
  2019-10-31 11:36 ` [net-next 07/13] dpaa_eth: cleanup skb_to_contig_fd() Madalin Bucur
@ 2019-10-31 11:36 ` Madalin Bucur
  2019-10-31 11:36 ` [net-next 09/13] dpaa_eth: add dropped frames to percpu ethtool stats Madalin Bucur
                   ` (4 subsequent siblings)
  12 siblings, 0 replies; 16+ messages in thread
From: Madalin Bucur @ 2019-10-31 11:36 UTC (permalink / raw)
  To: davem, netdev; +Cc: roy.pledge, jakub.kicinski, Madalin Bucur

Use a page to store the scatter gather table on the transmit path.

Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
---
 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 43 +++++++++++++-------------
 1 file changed, 21 insertions(+), 22 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 1084bc1b1d34..ee22ed3207b4 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -1592,9 +1592,9 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 	int i;
 
 	if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
-		dma_unmap_single(priv->tx_dma_dev, addr,
-				 qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
-				 dma_dir);
+		dma_unmap_page(priv->tx_dma_dev, addr,
+			       qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
+			       dma_dir);
 
 		/* The sgt buffer has been allocated with netdev_alloc_frag(),
 		 * it's from lowmem.
@@ -1636,8 +1636,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
 	}
 
 	if (qm_fd_get_format(fd) == qm_fd_sg)
-		/* Free the page frag that we allocated on Tx */
-		skb_free_frag(vaddr);
+		/* Free the page that we allocated on Tx for the SGT */
+		free_pages((unsigned long)vaddr, 0);
 
 	return skb;
 }
@@ -1885,21 +1885,20 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
 	struct net_device *net_dev = priv->net_dev;
 	struct qm_sg_entry *sgt;
 	struct sk_buff **skbh;
-	int i, j, err, sz;
-	void *buffer_start;
+	void *buff_start;
 	skb_frag_t *frag;
 	dma_addr_t addr;
 	size_t frag_len;
-	void *sgt_buf;
-
-	/* get a page frag to store the SGTable */
-	sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
-	sgt_buf = netdev_alloc_frag(sz);
-	if (unlikely(!sgt_buf)) {
-		netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
-			   sz);
+	struct page *p;
+	int i, j, err;
+
+	/* get a page to store the SGTable */
+	p = dev_alloc_pages(0);
+	if (unlikely(!p)) {
+		netdev_err(net_dev, "dev_alloc_pages() failed\n");
 		return -ENOMEM;
 	}
+	buff_start = page_address(p);
 
 	/* Enable L3/L4 hardware checksum computation.
 	 *
@@ -1907,7 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
 	 * need to write into the skb.
 	 */
 	err = dpaa_enable_tx_csum(priv, skb, fd,
-				  sgt_buf + DPAA_TX_PRIV_DATA_SIZE);
+				  buff_start + DPAA_TX_PRIV_DATA_SIZE);
 	if (unlikely(err < 0)) {
 		if (net_ratelimit())
 			netif_err(priv, tx_err, net_dev, "HW csum error: %d\n",
@@ -1916,7 +1915,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
 	}
 
 	/* SGT[0] is used by the linear part */
-	sgt = (struct qm_sg_entry *)(sgt_buf + priv->tx_headroom);
+	sgt = (struct qm_sg_entry *)(buff_start + priv->tx_headroom);
 	frag_len = skb_headlen(skb);
 	qm_sg_entry_set_len(&sgt[0], frag_len);
 	sgt[0].bpid = FSL_DPAA_BPID_INV;
@@ -1954,15 +1953,15 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
 	/* Set the final bit in the last used entry of the SGT */
 	qm_sg_entry_set_f(&sgt[nr_frags], frag_len);
 
+	/* set fd offset to priv->tx_headroom */
 	qm_fd_set_sg(fd, priv->tx_headroom, skb->len);
 
 	/* DMA map the SGT page */
-	buffer_start = (void *)sgt - priv->tx_headroom;
-	skbh = (struct sk_buff **)buffer_start;
+	skbh = (struct sk_buff **)buff_start;
 	*skbh = skb;
 
-	addr = dma_map_single(priv->tx_dma_dev, buffer_start,
-			      priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
+	addr = dma_map_page(priv->tx_dma_dev, p, 0,
+			    priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
 	if (unlikely(dma_mapping_error(priv->tx_dma_dev, addr))) {
 		netdev_err(priv->net_dev, "DMA mapping failed\n");
 		err = -EINVAL;
@@ -1982,7 +1981,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
 			       qm_sg_entry_get_len(&sgt[j]), dma_dir);
 sg0_map_failed:
 csum_failed:
-	skb_free_frag(sgt_buf);
+	free_pages((unsigned long)buff_start, 0);
 
 	return err;
 }
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [net-next 09/13] dpaa_eth: add dropped frames to percpu ethtool stats
  2019-10-31 11:36 [net-next 00/13] DPAA Ethernet changes Madalin Bucur
                   ` (7 preceding siblings ...)
  2019-10-31 11:36 ` [net-next 08/13] dpaa_eth: use a page to store the SGT Madalin Bucur
@ 2019-10-31 11:36 ` Madalin Bucur
  2019-10-31 11:36 ` [net-next 10/13] dpaa_eth: remove netdev_err() for user errors Madalin Bucur
                   ` (3 subsequent siblings)
  12 siblings, 0 replies; 16+ messages in thread
From: Madalin Bucur @ 2019-10-31 11:36 UTC (permalink / raw)
  To: davem, netdev; +Cc: roy.pledge, jakub.kicinski, Madalin Bucur

Prior to this change, the frames dropped on receive or transmit
were not displayed in the ethtool statistics, leaving the dropped
frames unaccounted for.

Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
---
 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c | 8 ++++++++
 1 file changed, 8 insertions(+)

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index bc6ed1df53ca..1c689e11c61f 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -47,6 +47,8 @@ static const char dpaa_stats_percpu[][ETH_GSTRING_LEN] = {
 	"tx S/G",
 	"tx error",
 	"rx error",
+	"rx dropped",
+	"tx dropped",
 };
 
 static char dpaa_stats_global[][ETH_GSTRING_LEN] = {
@@ -262,6 +264,12 @@ static void copy_stats(struct dpaa_percpu_priv *percpu_priv, int num_cpus,
 	data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_errors;
 	data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_errors;
 
+	data[crr * num_values + crr_cpu] = percpu_priv->stats.rx_dropped;
+	data[crr++ * num_values + num_cpus] += percpu_priv->stats.rx_dropped;
+
+	data[crr * num_values + crr_cpu] = percpu_priv->stats.tx_dropped;
+	data[crr++ * num_values + num_cpus] += percpu_priv->stats.tx_dropped;
+
 	data[crr * num_values + crr_cpu] = bp_count;
 	data[crr++ * num_values + num_cpus] += bp_count;
 }
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [net-next 10/13] dpaa_eth: remove netdev_err() for user errors
  2019-10-31 11:36 [net-next 00/13] DPAA Ethernet changes Madalin Bucur
                   ` (8 preceding siblings ...)
  2019-10-31 11:36 ` [net-next 09/13] dpaa_eth: add dropped frames to percpu ethtool stats Madalin Bucur
@ 2019-10-31 11:36 ` Madalin Bucur
  2019-10-31 11:47   ` Joe Perches
  2019-10-31 11:36 ` [net-next 11/13] dpaa_eth: extend delays in ndo_stop Madalin Bucur
                   ` (2 subsequent siblings)
  12 siblings, 1 reply; 16+ messages in thread
From: Madalin Bucur @ 2019-10-31 11:36 UTC (permalink / raw)
  To: davem, netdev; +Cc: roy.pledge, jakub.kicinski, Madalin Bucur

User reports that an application making an (incorrect) call to
restart AN on a fixed link DPAA interface triggers an error in
the kernel log while the returned EINVAL should be enough.

Reported-by: Joakim Tjernlund <Joakim.Tjernlund@infinera.com>
Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
---
 drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c | 4 ----
 1 file changed, 4 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
index 1c689e11c61f..126c0f1d8442 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
@@ -81,7 +81,6 @@ static int dpaa_get_link_ksettings(struct net_device *net_dev,
 				   struct ethtool_link_ksettings *cmd)
 {
 	if (!net_dev->phydev) {
-		netdev_dbg(net_dev, "phy device not initialized\n");
 		return 0;
 	}
 
@@ -96,7 +95,6 @@ static int dpaa_set_link_ksettings(struct net_device *net_dev,
 	int err;
 
 	if (!net_dev->phydev) {
-		netdev_err(net_dev, "phy device not initialized\n");
 		return -ENODEV;
 	}
 
@@ -143,7 +141,6 @@ static int dpaa_nway_reset(struct net_device *net_dev)
 	int err;
 
 	if (!net_dev->phydev) {
-		netdev_err(net_dev, "phy device not initialized\n");
 		return -ENODEV;
 	}
 
@@ -168,7 +165,6 @@ static void dpaa_get_pauseparam(struct net_device *net_dev,
 	mac_dev = priv->mac_dev;
 
 	if (!net_dev->phydev) {
-		netdev_err(net_dev, "phy device not initialized\n");
 		return;
 	}
 
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [net-next 11/13] dpaa_eth: extend delays in ndo_stop
  2019-10-31 11:36 [net-next 00/13] DPAA Ethernet changes Madalin Bucur
                   ` (9 preceding siblings ...)
  2019-10-31 11:36 ` [net-next 10/13] dpaa_eth: remove netdev_err() for user errors Madalin Bucur
@ 2019-10-31 11:36 ` Madalin Bucur
  2019-10-31 11:36 ` [net-next 12/13] soc: fsl: qbman: allow registering a device link for the portal user Madalin Bucur
  2019-10-31 11:36 ` [net-next 13/13] dpaa_eth: register a device link for the qman portal used Madalin Bucur
  12 siblings, 0 replies; 16+ messages in thread
From: Madalin Bucur @ 2019-10-31 11:36 UTC (permalink / raw)
  To: davem, netdev; +Cc: roy.pledge, jakub.kicinski, Madalin Bucur

Make sure all the frames that are in flight have time to be processed
before the interface is completely brought down. Add a missing delay
for the Rx path.

Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
---
 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index ee22ed3207b4..9e6080aaf77a 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -266,7 +266,7 @@ static int dpaa_stop(struct net_device *net_dev)
 	/* Allow the Fman (Tx) port to process in-flight frames before we
 	 * try switching it off.
 	 */
-	usleep_range(5000, 10000);
+	msleep(200);
 
 	err = mac_dev->stop(mac_dev);
 	if (err < 0)
@@ -283,6 +283,8 @@ static int dpaa_stop(struct net_device *net_dev)
 		phy_disconnect(net_dev->phydev);
 	net_dev->phydev = NULL;
 
+	msleep(200);
+
 	return err;
 }
 
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [net-next 12/13] soc: fsl: qbman: allow registering a device link for the portal user
  2019-10-31 11:36 [net-next 00/13] DPAA Ethernet changes Madalin Bucur
                   ` (10 preceding siblings ...)
  2019-10-31 11:36 ` [net-next 11/13] dpaa_eth: extend delays in ndo_stop Madalin Bucur
@ 2019-10-31 11:36 ` Madalin Bucur
  2019-10-31 11:36 ` [net-next 13/13] dpaa_eth: register a device link for the qman portal used Madalin Bucur
  12 siblings, 0 replies; 16+ messages in thread
From: Madalin Bucur @ 2019-10-31 11:36 UTC (permalink / raw)
  To: davem, netdev; +Cc: roy.pledge, jakub.kicinski, Madalin Bucur

Introduce the API required to make sure that the devices that use
the QMan portal are unbound when the portal is unbound.

Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
---
 drivers/soc/fsl/qbman/qman.c | 13 +++++++++++++
 include/soc/fsl/qman.h       | 18 ++++++++++++++++++
 2 files changed, 31 insertions(+)

diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index bf68d86d80ee..bc75a5882b9e 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1749,6 +1749,19 @@ struct qman_portal *qman_get_affine_portal(int cpu)
 }
 EXPORT_SYMBOL(qman_get_affine_portal);
 
+int qman_start_using_portal(struct qman_portal *p, struct device *dev)
+{
+	return (!device_link_add(dev, p->config->dev,
+				 DL_FLAG_AUTOREMOVE_CONSUMER)) ? -EINVAL : 0;
+}
+EXPORT_SYMBOL(qman_start_using_portal);
+
+void qman_stop_using_portal(struct qman_portal *p, struct device *dev)
+{
+	device_link_remove(dev, p->config->dev);
+}
+EXPORT_SYMBOL(qman_stop_using_portal);
+
 int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
 {
 	return __poll_portal_fast(p, limit);
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index aa31c05a103a..c499c5cfa7c9 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -32,6 +32,7 @@
 #define __FSL_QMAN_H
 
 #include <linux/bitops.h>
+#include <linux/device.h>
 
 /* Hardware constants */
 #define QM_CHANNEL_SWPORTAL0 0
@@ -915,6 +916,23 @@ u16 qman_affine_channel(int cpu);
 struct qman_portal *qman_get_affine_portal(int cpu);
 
 /**
+ * qman_start_using_portal - register a device link for the portal user
+ * @p: the portal that will be in use
+ * @dev: the device that will use the portal
+ *
+ * Makes sure that the devices that use the portal are unbound when the
+ * portal is unbound
+ */
+int qman_start_using_portal(struct qman_portal *p, struct device *dev);
+
+/**
+ * qman_stop_using_portal - deregister a device link for the portal user
+ * @p: the portal that will no longer be in use
+ * @dev: the device that uses the portal
+ */
+void qman_stop_using_portal(struct qman_portal *p, struct device *dev);
+
+/**
  * qman_p_poll_dqrr - process DQRR (fast-path) entries
  * @limit: the maximum number of DQRR entries to process
  *
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* [net-next 13/13] dpaa_eth: register a device link for the qman portal used
  2019-10-31 11:36 [net-next 00/13] DPAA Ethernet changes Madalin Bucur
                   ` (11 preceding siblings ...)
  2019-10-31 11:36 ` [net-next 12/13] soc: fsl: qbman: allow registering a device link for the portal user Madalin Bucur
@ 2019-10-31 11:36 ` Madalin Bucur
  12 siblings, 0 replies; 16+ messages in thread
From: Madalin Bucur @ 2019-10-31 11:36 UTC (permalink / raw)
  To: davem, netdev; +Cc: roy.pledge, jakub.kicinski, Madalin Bucur

Before this change, unbinding the QMan portals did not trigger a
corresponding unbinding of the dpaa_eth making use of it; the first
QMan portal related operation issued afterwards crashed the kernel.
The device link ensures the dpaa_eth dependency upon the qman portal
used is honoured at the QMan portal removal.

Signed-off-by: Madalin Bucur <madalin.bucur@nxp.com>
---
 drivers/net/ethernet/freescale/dpaa/dpaa_eth.c | 5 +++--
 drivers/soc/fsl/qbman/qman.c                   | 6 ------
 include/soc/fsl/qman.h                         | 7 -------
 3 files changed, 3 insertions(+), 15 deletions(-)

diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 9e6080aaf77a..18d3ec0f96d9 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -750,7 +750,7 @@ static void dpaa_release_channel(void)
 	qman_release_pool(rx_pool_channel);
 }
 
-static void dpaa_eth_add_channel(u16 channel)
+static void dpaa_eth_add_channel(u16 channel, struct device *dev)
 {
 	u32 pool = QM_SDQCR_CHANNELS_POOL_CONV(channel);
 	const cpumask_t *cpus = qman_affine_cpus();
@@ -760,6 +760,7 @@ static void dpaa_eth_add_channel(u16 channel)
 	for_each_cpu_and(cpu, cpus, cpu_online_mask) {
 		portal = qman_get_affine_portal(cpu);
 		qman_p_static_dequeue_add(portal, pool);
+		qman_start_using_portal(portal, dev);
 	}
 }
 
@@ -2873,7 +2874,7 @@ static int dpaa_eth_probe(struct platform_device *pdev)
 	/* Walk the CPUs with affine portals
 	 * and add this pool channel to each's dequeue mask.
 	 */
-	dpaa_eth_add_channel(priv->channel);
+	dpaa_eth_add_channel(priv->channel, &pdev->dev);
 
 	dpaa_fq_setup(priv, &dpaa_fq_cbs, priv->mac_dev->port[TX]);
 
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index bc75a5882b9e..1e164e03410a 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1756,12 +1756,6 @@ int qman_start_using_portal(struct qman_portal *p, struct device *dev)
 }
 EXPORT_SYMBOL(qman_start_using_portal);
 
-void qman_stop_using_portal(struct qman_portal *p, struct device *dev)
-{
-	device_link_remove(dev, p->config->dev);
-}
-EXPORT_SYMBOL(qman_stop_using_portal);
-
 int qman_p_poll_dqrr(struct qman_portal *p, unsigned int limit)
 {
 	return __poll_portal_fast(p, limit);
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index c499c5cfa7c9..cfe00e08e85b 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -926,13 +926,6 @@ struct qman_portal *qman_get_affine_portal(int cpu);
 int qman_start_using_portal(struct qman_portal *p, struct device *dev);
 
 /**
- * qman_stop_using_portal - deregister a device link for the portal user
- * @p: the portal that will no longer be in use
- * @dev: the device that uses the portal
- */
-void qman_stop_using_portal(struct qman_portal *p, struct device *dev);
-
-/**
  * qman_p_poll_dqrr - process DQRR (fast-path) entries
  * @limit: the maximum number of DQRR entries to process
  *
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 16+ messages in thread

* Re: [net-next 10/13] dpaa_eth: remove netdev_err() for user errors
  2019-10-31 11:36 ` [net-next 10/13] dpaa_eth: remove netdev_err() for user errors Madalin Bucur
@ 2019-10-31 11:47   ` Joe Perches
  2019-10-31 13:58     ` Madalin Bucur
  0 siblings, 1 reply; 16+ messages in thread
From: Joe Perches @ 2019-10-31 11:47 UTC (permalink / raw)
  To: madalin.bucur, davem, netdev; +Cc: roy.pledge, jakub.kicinski

On Thu, 2019-10-31 at 13:36 +0200, Madalin Bucur wrote:
> User reports that an application making an (incorrect) call to
> restart AN on a fixed link DPAA interface triggers an error in
> the kernel log while the returned EINVAL should be enough.
[]
> diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
[]
> @@ -81,7 +81,6 @@ static int dpaa_get_link_ksettings(struct net_device *net_dev,
>  				   struct ethtool_link_ksettings *cmd)
>  {
>  	if (!net_dev->phydev) {
> -		netdev_dbg(net_dev, "phy device not initialized\n");
>  		return 0;
>  	}

ideally the now excess braces would be removed too.
 
> @@ -96,7 +95,6 @@ static int dpaa_set_link_ksettings(struct net_device *net_dev,
>  	int err;
>  
>  	if (!net_dev->phydev) {
> -		netdev_err(net_dev, "phy device not initialized\n");
>  		return -ENODEV;
>  	}

etc...



^ permalink raw reply	[flat|nested] 16+ messages in thread

* RE: [net-next 10/13] dpaa_eth: remove netdev_err() for user errors
  2019-10-31 11:47   ` Joe Perches
@ 2019-10-31 13:58     ` Madalin Bucur
  0 siblings, 0 replies; 16+ messages in thread
From: Madalin Bucur @ 2019-10-31 13:58 UTC (permalink / raw)
  To: Joe Perches, davem, netdev; +Cc: Roy Pledge, jakub.kicinski

> -----Original Message-----
> From: Joe Perches <joe@perches.com>
> Sent: Thursday, October 31, 2019 1:48 PM
> To: Madalin Bucur <madalin.bucur@nxp.com>; davem@davemloft.net;
> netdev@vger.kernel.org
> Cc: Roy Pledge <roy.pledge@nxp.com>; jakub.kicinski@netronome.com
> Subject: Re: [net-next 10/13] dpaa_eth: remove netdev_err() for user
> errors
> 
> On Thu, 2019-10-31 at 13:36 +0200, Madalin Bucur wrote:
> > User reports that an application making an (incorrect) call to
> > restart AN on a fixed link DPAA interface triggers an error in
> > the kernel log while the returned EINVAL should be enough.
> []
> > diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
> b/drivers/net/ethernet/freescale/dpaa/dpaa_ethtool.c
> []
> > @@ -81,7 +81,6 @@ static int dpaa_get_link_ksettings(struct net_device
> *net_dev,
> >  				   struct ethtool_link_ksettings *cmd)
> >  {
> >  	if (!net_dev->phydev) {
> > -		netdev_dbg(net_dev, "phy device not initialized\n");
> >  		return 0;
> >  	}
> 
> ideally the now excess braces would be removed too.

You're right, I'll send a v2
 
> > @@ -96,7 +95,6 @@ static int dpaa_set_link_ksettings(struct net_device
> *net_dev,
> >  	int err;
> >
> >  	if (!net_dev->phydev) {
> > -		netdev_err(net_dev, "phy device not initialized\n");
> >  		return -ENODEV;
> >  	}
> 
> etc...
> 


^ permalink raw reply	[flat|nested] 16+ messages in thread

end of thread, other threads:[~2019-10-31 13:58 UTC | newest]

Thread overview: 16+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-10-31 11:36 [net-next 00/13] DPAA Ethernet changes Madalin Bucur
2019-10-31 11:36 ` [net-next 01/13] dpaa_eth: use only one buffer pool per interface Madalin Bucur
2019-10-31 11:36 ` [net-next 02/13] dpaa_eth: use page backed rx buffers Madalin Bucur
2019-10-31 11:36 ` [net-next 03/13] dpaa_eth: perform DMA unmapping before read Madalin Bucur
2019-10-31 11:36 ` [net-next 04/13] dpaa_eth: avoid timestamp read on error paths Madalin Bucur
2019-10-31 11:36 ` [net-next 05/13] dpaa_eth: simplify variables used in dpaa_cleanup_tx_fd() Madalin Bucur
2019-10-31 11:36 ` [net-next 06/13] dpaa_eth: use fd information " Madalin Bucur
2019-10-31 11:36 ` [net-next 07/13] dpaa_eth: cleanup skb_to_contig_fd() Madalin Bucur
2019-10-31 11:36 ` [net-next 08/13] dpaa_eth: use a page to store the SGT Madalin Bucur
2019-10-31 11:36 ` [net-next 09/13] dpaa_eth: add dropped frames to percpu ethtool stats Madalin Bucur
2019-10-31 11:36 ` [net-next 10/13] dpaa_eth: remove netdev_err() for user errors Madalin Bucur
2019-10-31 11:47   ` Joe Perches
2019-10-31 13:58     ` Madalin Bucur
2019-10-31 11:36 ` [net-next 11/13] dpaa_eth: extend delays in ndo_stop Madalin Bucur
2019-10-31 11:36 ` [net-next 12/13] soc: fsl: qbman: allow registering a device link for the portal user Madalin Bucur
2019-10-31 11:36 ` [net-next 13/13] dpaa_eth: register a device link for the qman portal used Madalin Bucur

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.