linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/4] net: thunderx: Miscellaneous fixes
@ 2017-03-03 10:47 sunil.kovvuri
  2017-03-03 10:47 ` [PATCH 1/4] net: thunderx: Fix IOMMU translation faults sunil.kovvuri
                   ` (3 more replies)
  0 siblings, 4 replies; 11+ messages in thread
From: sunil.kovvuri @ 2017-03-03 10:47 UTC (permalink / raw)
  To: netdev; +Cc: linux-kernel, linux-arm-kernel, Sunil Goutham

From: Sunil Goutham <sgoutham@cavium.com>

This patch set fixes multiples issues such as IOMMU
translation faults when kernel is booted with IOMMU enabled
on host, incorrect MAC ID reading from ACPI tables and IPv6
UDP packet drop due to failure of checksum validation.

Sunil Goutham (3):
  net: thunderx: Fix IOMMU translation faults
  net: thunderx: Fix LMAC mode debug prints for QSGMII mode
  net: thunderx: Fix invalid mac addresses for node1 interfaces

Thanneeru Srinivasulu (1):
  net: thunderx: Allow IPv6 frames with zero UDP checksum

 drivers/net/ethernet/cavium/thunder/nic.h          |   1 +
 drivers/net/ethernet/cavium/thunder/nicvf_main.c   |  12 +-
 drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 193 +++++++++++++++++----
 drivers/net/ethernet/cavium/thunder/nicvf_queues.h |   2 +
 drivers/net/ethernet/cavium/thunder/thunder_bgx.c  |  64 +++++--
 drivers/net/ethernet/cavium/thunder/thunder_bgx.h  |   1 -
 6 files changed, 223 insertions(+), 50 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH 1/4] net: thunderx: Fix IOMMU translation faults
  2017-03-03 10:47 [PATCH 0/4] net: thunderx: Miscellaneous fixes sunil.kovvuri
@ 2017-03-03 10:47 ` sunil.kovvuri
  2017-03-03 17:56   ` David Miller
  2017-03-03 10:47 ` [PATCH 2/4] net: thunderx: Fix LMAC mode debug prints for QSGMII mode sunil.kovvuri
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 11+ messages in thread
From: sunil.kovvuri @ 2017-03-03 10:47 UTC (permalink / raw)
  To: netdev; +Cc: linux-kernel, linux-arm-kernel, Sunil Goutham

From: Sunil Goutham <sgoutham@cavium.com>

ACPI support has been added to ARM IOMMU driver in 4.10
kernel and that has resulted in VNIC interfaces throwing
translation faults when kernel is booted with ACPI as
driver was not using DMA API.

On T88 HW takes care of data coherency when performing
DMA operations hence in non-iommu case using DMA API
simply wastes CPU cycles. This patch fixes translation
faults issue by doing a buffer dma_map/dma_unmap when
the corresponding PCI device is attached to a IOMMU i.e
when iommu_domain is set.

Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
---
 drivers/net/ethernet/cavium/thunder/nic.h          |   1 +
 drivers/net/ethernet/cavium/thunder/nicvf_main.c   |  12 +-
 drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 187 ++++++++++++++++++---
 drivers/net/ethernet/cavium/thunder/nicvf_queues.h |   2 +
 4 files changed, 174 insertions(+), 28 deletions(-)

diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index e739c71..2269ff5 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -269,6 +269,7 @@ struct nicvf {
 #define	MAX_QUEUES_PER_QSET			8
 	struct queue_set	*qs;
 	struct nicvf_cq_poll	*napi[8];
+	void			*iommu_domain;
 	u8			vf_id;
 	u8			sqs_id;
 	bool                    sqs_mode;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 6feaa24..8d60c3b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -16,6 +16,7 @@
 #include <linux/log2.h>
 #include <linux/prefetch.h>
 #include <linux/irq.h>
+#include <linux/iommu.h>
 
 #include "nic_reg.h"
 #include "nic.h"
@@ -525,7 +526,12 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
 			/* Get actual TSO descriptors and free them */
 			tso_sqe =
 			 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+			nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
+						 tso_sqe->subdesc_cnt);
 			nicvf_put_sq_desc(sq, tso_sqe->subdesc_cnt + 1);
+		} else {
+			nicvf_unmap_sndq_buffers(nic, sq, cqe_tx->sqe_ptr,
+						 hdr->subdesc_cnt);
 		}
 		nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
 		prefetch(skb);
@@ -576,6 +582,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
 {
 	struct sk_buff *skb;
 	struct nicvf *nic = netdev_priv(netdev);
+	struct nicvf *snic = nic;
 	int err = 0;
 	int rq_idx;
 
@@ -592,7 +599,7 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
 	if (err && !cqe_rx->rb_cnt)
 		return;
 
-	skb = nicvf_get_rcv_skb(nic, cqe_rx);
+	skb = nicvf_get_rcv_skb(snic, cqe_rx);
 	if (!skb) {
 		netdev_dbg(nic->netdev, "Packet not received\n");
 		return;
@@ -1643,6 +1650,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 	if (!pass1_silicon(nic->pdev))
 		nic->hw_tso = true;
 
+	/* Check if we are attached to IOMMU */
+	nic->iommu_domain = iommu_get_domain_for_dev(dev);
+
 	pci_read_config_word(nic->pdev, PCI_SUBSYSTEM_ID, &sdevid);
 	if (sdevid == 0xA134)
 		nic->t88 = true;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index ac0390b..ee3aa16 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -10,6 +10,7 @@
 #include <linux/netdevice.h>
 #include <linux/ip.h>
 #include <linux/etherdevice.h>
+#include <linux/iommu.h>
 #include <net/ip.h>
 #include <net/tso.h>
 
@@ -18,6 +19,49 @@
 #include "q_struct.h"
 #include "nicvf_queues.h"
 
+#define NICVF_PAGE_ORDER ((PAGE_SIZE <= 4096) ?  PAGE_ALLOC_COSTLY_ORDER : 0)
+
+static inline u64 nicvf_iova_to_phys(struct nicvf *nic, dma_addr_t dma_addr)
+{
+	/* Tranlation is installed only when IOMMU is present */
+	if (nic->iommu_domain)
+		return iommu_iova_to_phys(nic->iommu_domain, dma_addr);
+	return dma_addr;
+}
+
+static inline u64 nicvf_dma_map(struct nicvf *nic, struct page *page,
+				int offset, int len, int dir)
+{
+	/* Since HW ensures data coherency, calling DMA apis when there
+	 * is no IOMMU would only result in wasting CPU cycles.
+	 */
+	if (!nic->iommu_domain)
+		return virt_to_phys(page_address(page) + offset);
+
+	/* CPU sync not required */
+	return (u64)dma_map_page_attrs(&nic->pdev->dev, page,
+				       offset, len, dir,
+				       DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static inline void nicvf_dma_unmap(struct nicvf *nic, dma_addr_t dma_addr,
+				   int len, int dir)
+{
+	if (!nic->iommu_domain)
+		return;
+
+	/* HW will ensure data coherency, CPU sync not required */
+	dma_unmap_page_attrs(&nic->pdev->dev, dma_addr, len,
+			     dir, DMA_ATTR_SKIP_CPU_SYNC);
+}
+
+static inline int nicvf_dma_map_error(struct nicvf *nic, dma_addr_t dma_addr)
+{
+	if (!nic->iommu_domain)
+		return 0;
+	return dma_mapping_error(&nic->pdev->dev, dma_addr);
+}
+
 static void nicvf_get_page(struct nicvf *nic)
 {
 	if (!nic->rb_pageref || !nic->rb_page)
@@ -87,7 +131,7 @@ static void nicvf_free_q_desc_mem(struct nicvf *nic, struct q_desc_mem *dmem)
 static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
 					 u32 buf_len, u64 **rbuf)
 {
-	int order = (PAGE_SIZE <= 4096) ?  PAGE_ALLOC_COSTLY_ORDER : 0;
+	int order = NICVF_PAGE_ORDER;
 
 	/* Check if request can be accomodated in previous allocated page */
 	if (nic->rb_page &&
@@ -112,7 +156,14 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
 	}
 
 ret:
-	*rbuf = (u64 *)((u64)page_address(nic->rb_page) + nic->rb_page_offset);
+	*rbuf = (u64 *)nicvf_dma_map(nic, nic->rb_page, nic->rb_page_offset,
+				     buf_len, DMA_FROM_DEVICE);
+	if (nicvf_dma_map_error(nic, (dma_addr_t)*rbuf)) {
+		if (!nic->rb_page_offset)
+			__free_pages(nic->rb_page, order);
+		nic->rb_page = NULL;
+		return -ENOMEM;
+	}
 	nic->rb_page_offset += buf_len;
 
 	return 0;
@@ -158,16 +209,21 @@ static int  nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
 	rbdr->dma_size = buf_size;
 	rbdr->enable = true;
 	rbdr->thresh = RBDR_THRESH;
+	rbdr->head = 0;
+	rbdr->tail = 0;
 
 	nic->rb_page = NULL;
 	for (idx = 0; idx < ring_len; idx++) {
 		err = nicvf_alloc_rcv_buffer(nic, GFP_KERNEL, RCV_FRAG_LEN,
 					     &rbuf);
-		if (err)
+		if (err) {
+			/* To free already allocated and mapped ones */
+			rbdr->tail = idx - 1;
 			return err;
+		}
 
 		desc = GET_RBDR_DESC(rbdr, idx);
-		desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+		desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
 	}
 
 	nicvf_get_page(nic);
@@ -179,7 +235,7 @@ static int  nicvf_init_rbdr(struct nicvf *nic, struct rbdr *rbdr,
 static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
 {
 	int head, tail;
-	u64 buf_addr;
+	u64 buf_addr, phys_addr;
 	struct rbdr_entry_t *desc;
 
 	if (!rbdr)
@@ -192,18 +248,26 @@ static void nicvf_free_rbdr(struct nicvf *nic, struct rbdr *rbdr)
 	head = rbdr->head;
 	tail = rbdr->tail;
 
-	/* Free SKBs */
+	/* Release page references */
 	while (head != tail) {
 		desc = GET_RBDR_DESC(rbdr, head);
-		buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
-		put_page(virt_to_page(phys_to_virt(buf_addr)));
+		buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
+		phys_addr = nicvf_iova_to_phys(nic, buf_addr);
+		nicvf_dma_unmap(nic, buf_addr,
+				RCV_FRAG_LEN, DMA_FROM_DEVICE);
+		if (phys_addr)
+			put_page(virt_to_page(phys_to_virt(phys_addr)));
 		head++;
 		head &= (rbdr->dmem.q_len - 1);
 	}
-	/* Free SKB of tail desc */
+	/* Release buffer of tail desc */
 	desc = GET_RBDR_DESC(rbdr, tail);
-	buf_addr = desc->buf_addr << NICVF_RCV_BUF_ALIGN;
-	put_page(virt_to_page(phys_to_virt(buf_addr)));
+	buf_addr = ((u64)desc->buf_addr) << NICVF_RCV_BUF_ALIGN;
+	phys_addr = nicvf_iova_to_phys(nic, buf_addr);
+	nicvf_dma_unmap(nic, buf_addr,
+			RCV_FRAG_LEN, DMA_FROM_DEVICE);
+	if (phys_addr)
+		put_page(virt_to_page(phys_to_virt(phys_addr)));
 
 	/* Free RBDR ring */
 	nicvf_free_q_desc_mem(nic, &rbdr->dmem);
@@ -250,7 +314,7 @@ static void nicvf_refill_rbdr(struct nicvf *nic, gfp_t gfp)
 			break;
 
 		desc = GET_RBDR_DESC(rbdr, tail);
-		desc->buf_addr = virt_to_phys(rbuf) >> NICVF_RCV_BUF_ALIGN;
+		desc->buf_addr = (u64)rbuf >> NICVF_RCV_BUF_ALIGN;
 		refill_rb_cnt--;
 		new_rb++;
 	}
@@ -361,9 +425,29 @@ static int nicvf_init_snd_queue(struct nicvf *nic,
 	return 0;
 }
 
+void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
+			      int hdr_sqe, u8 subdesc_cnt)
+{
+	u8 idx;
+	struct sq_gather_subdesc *gather;
+
+	if (!nic->iommu_domain)
+		return;
+
+	/* Unmap DMA mapped skb data buffers */
+	for (idx = 0; idx < subdesc_cnt; idx++) {
+		hdr_sqe++;
+		hdr_sqe &= (sq->dmem.q_len - 1);
+		gather = (struct sq_gather_subdesc *)GET_SQ_DESC(sq, hdr_sqe);
+		nicvf_dma_unmap(nic, gather->addr, gather->size, DMA_TO_DEVICE);
+	}
+}
+
 static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
 {
 	struct sk_buff *skb;
+	struct sq_hdr_subdesc *hdr;
+	struct sq_hdr_subdesc *tso_sqe;
 
 	if (!sq)
 		return;
@@ -379,8 +463,22 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
 	smp_rmb();
 	while (sq->head != sq->tail) {
 		skb = (struct sk_buff *)sq->skbuff[sq->head];
-		if (skb)
-			dev_kfree_skb_any(skb);
+		if (!skb)
+			goto next;
+		hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, sq->head);
+		/* Check for dummy descriptor used for HW TSO offload on 88xx */
+		if (hdr->dont_send) {
+			/* Get actual TSO descriptors and unmap them */
+			tso_sqe =
+			 (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, hdr->rsvd2);
+			nicvf_unmap_sndq_buffers(nic, sq, hdr->rsvd2,
+						 tso_sqe->subdesc_cnt);
+		} else {
+			nicvf_unmap_sndq_buffers(nic, sq, sq->head,
+						 hdr->subdesc_cnt);
+		}
+		dev_kfree_skb_any(skb);
+next:
 		sq->head++;
 		sq->head &= (sq->dmem.q_len - 1);
 	}
@@ -882,6 +980,14 @@ static inline int nicvf_get_sq_desc(struct snd_queue *sq, int desc_cnt)
 	return qentry;
 }
 
+/* Rollback to previous tail pointer when descriptors not used */
+static inline void nicvf_rollback_sq_desc(struct snd_queue *sq,
+					  int qentry, int desc_cnt)
+{
+	sq->tail = qentry;
+	atomic_add(desc_cnt, &sq->free_cnt);
+}
+
 /* Free descriptor back to SQ for future use */
 void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt)
 {
@@ -1207,8 +1313,9 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
 			struct sk_buff *skb, u8 sq_num)
 {
 	int i, size;
-	int subdesc_cnt, tso_sqe = 0;
+	int subdesc_cnt, hdr_sqe = 0;
 	int qentry;
+	u64 dma_addr;
 
 	subdesc_cnt = nicvf_sq_subdesc_required(nic, skb);
 	if (subdesc_cnt > atomic_read(&sq->free_cnt))
@@ -1223,12 +1330,20 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
 	/* Add SQ header subdesc */
 	nicvf_sq_add_hdr_subdesc(nic, sq, qentry, subdesc_cnt - 1,
 				 skb, skb->len);
-	tso_sqe = qentry;
+	hdr_sqe = qentry;
 
 	/* Add SQ gather subdescs */
 	qentry = nicvf_get_nxt_sqentry(sq, qentry);
 	size = skb_is_nonlinear(skb) ? skb_headlen(skb) : skb->len;
-	nicvf_sq_add_gather_subdesc(sq, qentry, size, virt_to_phys(skb->data));
+	dma_addr = nicvf_dma_map(nic, virt_to_page(skb->data),
+				 offset_in_page(skb->data),
+				 size, DMA_TO_DEVICE);
+	if (nicvf_dma_map_error(nic, dma_addr)) {
+		nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
+		return 0;
+	}
+
+	nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
 
 	/* Check for scattered buffer */
 	if (!skb_is_nonlinear(skb))
@@ -1241,15 +1356,24 @@ int nicvf_sq_append_skb(struct nicvf *nic, struct snd_queue *sq,
 
 		qentry = nicvf_get_nxt_sqentry(sq, qentry);
 		size = skb_frag_size(frag);
-		nicvf_sq_add_gather_subdesc(sq, qentry, size,
-					    virt_to_phys(
-					    skb_frag_address(frag)));
+		dma_addr = nicvf_dma_map(nic, skb_frag_page(frag),
+					 frag->page_offset,
+					 size, DMA_TO_DEVICE);
+		if (nicvf_dma_map_error(nic, dma_addr)) {
+			/* Free entire chain of mapped buffers
+			 * here 'i' = frags mapped + above mapped skb->data
+			 */
+			nicvf_unmap_sndq_buffers(nic, sq, hdr_sqe, i);
+			nicvf_rollback_sq_desc(sq, qentry, subdesc_cnt);
+			return 0;
+		}
+		nicvf_sq_add_gather_subdesc(sq, qentry, size, dma_addr);
 	}
 
 doorbell:
 	if (nic->t88 && skb_shinfo(skb)->gso_size) {
 		qentry = nicvf_get_nxt_sqentry(sq, qentry);
-		nicvf_sq_add_cqe_subdesc(sq, qentry, tso_sqe, skb);
+		nicvf_sq_add_cqe_subdesc(sq, qentry, hdr_sqe, skb);
 	}
 
 	nicvf_sq_doorbell(nic, skb, sq_num, subdesc_cnt);
@@ -1282,6 +1406,7 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
 	int offset;
 	u16 *rb_lens = NULL;
 	u64 *rb_ptrs = NULL;
+	u64 phys_addr;
 
 	rb_lens = (void *)cqe_rx + (3 * sizeof(u64));
 	/* Except 88xx pass1 on all other chips CQE_RX2_S is added to
@@ -1296,15 +1421,21 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
 	else
 		rb_ptrs = (void *)cqe_rx + (7 * sizeof(u64));
 
-	netdev_dbg(nic->netdev, "%s rb_cnt %d rb0_ptr %llx rb0_sz %d\n",
-		   __func__, cqe_rx->rb_cnt, cqe_rx->rb0_ptr, cqe_rx->rb0_sz);
-
 	for (frag = 0; frag < cqe_rx->rb_cnt; frag++) {
 		payload_len = rb_lens[frag_num(frag)];
+		phys_addr = nicvf_iova_to_phys(nic, *rb_ptrs);
+		if (!phys_addr) {
+			if (skb)
+				dev_kfree_skb_any(skb);
+			return NULL;
+		}
+
 		if (!frag) {
 			/* First fragment */
+			nicvf_dma_unmap(nic, *rb_ptrs - cqe_rx->align_pad,
+					RCV_FRAG_LEN, DMA_FROM_DEVICE);
 			skb = nicvf_rb_ptr_to_skb(nic,
-						  *rb_ptrs - cqe_rx->align_pad,
+						  phys_addr - cqe_rx->align_pad,
 						  payload_len);
 			if (!skb)
 				return NULL;
@@ -1312,8 +1443,10 @@ struct sk_buff *nicvf_get_rcv_skb(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
 			skb_put(skb, payload_len);
 		} else {
 			/* Add fragments */
-			page = virt_to_page(phys_to_virt(*rb_ptrs));
-			offset = phys_to_virt(*rb_ptrs) - page_address(page);
+			nicvf_dma_unmap(nic, *rb_ptrs,
+					RCV_FRAG_LEN, DMA_FROM_DEVICE);
+			page = virt_to_page(phys_to_virt(phys_addr));
+			offset = phys_to_virt(phys_addr) - page_address(page);
 			skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
 					offset, payload_len, RCV_FRAG_LEN);
 		}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 5cb84da..6ab6bfd 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -301,6 +301,8 @@ struct queue_set {
 
 #define	CQ_ERR_MASK	(CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
 
+void nicvf_unmap_sndq_buffers(struct nicvf *nic, struct snd_queue *sq,
+			      int hdr_sqe, u8 subdesc_cnt);
 void nicvf_config_vlan_stripping(struct nicvf *nic,
 				 netdev_features_t features);
 int nicvf_set_qset_resources(struct nicvf *nic);
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 2/4] net: thunderx: Fix LMAC mode debug prints for QSGMII mode
  2017-03-03 10:47 [PATCH 0/4] net: thunderx: Miscellaneous fixes sunil.kovvuri
  2017-03-03 10:47 ` [PATCH 1/4] net: thunderx: Fix IOMMU translation faults sunil.kovvuri
@ 2017-03-03 10:47 ` sunil.kovvuri
  2017-03-03 10:47 ` [PATCH 3/4] net: thunderx: Fix invalid mac addresses for node1 interfaces sunil.kovvuri
  2017-03-03 10:47 ` [PATCH 4/4] net: thunderx: Allow IPv6 frames with zero UDP checksum sunil.kovvuri
  3 siblings, 0 replies; 11+ messages in thread
From: sunil.kovvuri @ 2017-03-03 10:47 UTC (permalink / raw)
  To: netdev; +Cc: linux-kernel, linux-arm-kernel, Sunil Goutham

From: Sunil Goutham <sgoutham@cavium.com>

When BGX/LMACs are in QSGMII mode, for some LMACs, mode info is
not being printed. This patch will fix that. With changes already
done to not do any sort of serdes 2 lane mapping config calculation
in kernel driver, we can get rid of this logic.

Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
---
 drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 6 ------
 1 file changed, 6 deletions(-)

diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 4c8e8cf..9b8a53e 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -1011,12 +1011,6 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
 			dev_info(dev, "%s: 40G_KR4\n", (char *)str);
 		break;
 	case BGX_MODE_QSGMII:
-		if ((lmacid == 0) &&
-		    (bgx_get_lane2sds_cfg(bgx, lmac) != lmacid))
-			return;
-		if ((lmacid == 2) &&
-		    (bgx_get_lane2sds_cfg(bgx, lmac) == lmacid))
-			return;
 		dev_info(dev, "%s: QSGMII\n", (char *)str);
 		break;
 	case BGX_MODE_RGMII:
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 3/4] net: thunderx: Fix invalid mac addresses for node1 interfaces
  2017-03-03 10:47 [PATCH 0/4] net: thunderx: Miscellaneous fixes sunil.kovvuri
  2017-03-03 10:47 ` [PATCH 1/4] net: thunderx: Fix IOMMU translation faults sunil.kovvuri
  2017-03-03 10:47 ` [PATCH 2/4] net: thunderx: Fix LMAC mode debug prints for QSGMII mode sunil.kovvuri
@ 2017-03-03 10:47 ` sunil.kovvuri
  2017-03-03 10:47 ` [PATCH 4/4] net: thunderx: Allow IPv6 frames with zero UDP checksum sunil.kovvuri
  3 siblings, 0 replies; 11+ messages in thread
From: sunil.kovvuri @ 2017-03-03 10:47 UTC (permalink / raw)
  To: netdev; +Cc: linux-kernel, linux-arm-kernel, Sunil Goutham

From: Sunil Goutham <sgoutham@cavium.com>

When booted with ACPI, random mac addresses are being
assigned to node1 interfaces due to mismatch of bgx_id
in BGX driver and ACPI tables.

This patch fixes this issue by setting maximum BGX devices
per node based on platform/soc instead of a macro. This
change will set the bgx_id appropriately.

Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
---
 drivers/net/ethernet/cavium/thunder/thunder_bgx.c | 58 ++++++++++++++++++-----
 drivers/net/ethernet/cavium/thunder/thunder_bgx.h |  1 -
 2 files changed, 45 insertions(+), 14 deletions(-)

diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 9b8a53e..64a1095 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -123,14 +123,44 @@ static int bgx_poll_reg(struct bgx *bgx, u8 lmac, u64 reg, u64 mask, bool zero)
 	return 1;
 }
 
+static int max_bgx_per_node;
+static void set_max_bgx_per_node(struct pci_dev *pdev)
+{
+	u16 sdevid;
+
+	if (max_bgx_per_node)
+		return;
+
+	pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sdevid);
+	switch (sdevid) {
+	case PCI_SUBSYS_DEVID_81XX_BGX:
+		max_bgx_per_node = MAX_BGX_PER_CN81XX;
+		break;
+	case PCI_SUBSYS_DEVID_83XX_BGX:
+		max_bgx_per_node = MAX_BGX_PER_CN83XX;
+		break;
+	case PCI_SUBSYS_DEVID_88XX_BGX:
+	default:
+		max_bgx_per_node = MAX_BGX_PER_CN88XX;
+		break;
+	}
+}
+
+static struct bgx *get_bgx(int node, int bgx_idx)
+{
+	int idx = (node * max_bgx_per_node) + bgx_idx;
+
+	return bgx_vnic[idx];
+}
+
 /* Return number of BGX present in HW */
 unsigned bgx_get_map(int node)
 {
 	int i;
 	unsigned map = 0;
 
-	for (i = 0; i < MAX_BGX_PER_NODE; i++) {
-		if (bgx_vnic[(node * MAX_BGX_PER_NODE) + i])
+	for (i = 0; i < max_bgx_per_node; i++) {
+		if (bgx_vnic[(node * max_bgx_per_node) + i])
 			map |= (1 << i);
 	}
 
@@ -143,7 +173,7 @@ int bgx_get_lmac_count(int node, int bgx_idx)
 {
 	struct bgx *bgx;
 
-	bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+	bgx = get_bgx(node, bgx_idx);
 	if (bgx)
 		return bgx->lmac_count;
 
@@ -158,7 +188,7 @@ void bgx_get_lmac_link_state(int node, int bgx_idx, int lmacid, void *status)
 	struct bgx *bgx;
 	struct lmac *lmac;
 
-	bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+	bgx = get_bgx(node, bgx_idx);
 	if (!bgx)
 		return;
 
@@ -172,7 +202,7 @@ EXPORT_SYMBOL(bgx_get_lmac_link_state);
 
 const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid)
 {
-	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+	struct bgx *bgx = get_bgx(node, bgx_idx);
 
 	if (bgx)
 		return bgx->lmac[lmacid].mac;
@@ -183,7 +213,7 @@ EXPORT_SYMBOL(bgx_get_lmac_mac);
 
 void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac)
 {
-	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+	struct bgx *bgx = get_bgx(node, bgx_idx);
 
 	if (!bgx)
 		return;
@@ -194,7 +224,7 @@ EXPORT_SYMBOL(bgx_set_lmac_mac);
 
 void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
 {
-	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+	struct bgx *bgx = get_bgx(node, bgx_idx);
 	struct lmac *lmac;
 	u64 cfg;
 
@@ -217,7 +247,7 @@ EXPORT_SYMBOL(bgx_lmac_rx_tx_enable);
 void bgx_lmac_get_pfc(int node, int bgx_idx, int lmacid, void *pause)
 {
 	struct pfc *pfc = (struct pfc *)pause;
-	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+	struct bgx *bgx = get_bgx(node, bgx_idx);
 	struct lmac *lmac;
 	u64 cfg;
 
@@ -237,7 +267,7 @@ EXPORT_SYMBOL(bgx_lmac_get_pfc);
 void bgx_lmac_set_pfc(int node, int bgx_idx, int lmacid, void *pause)
 {
 	struct pfc *pfc = (struct pfc *)pause;
-	struct bgx *bgx = bgx_vnic[(node * MAX_BGX_PER_CN88XX) + bgx_idx];
+	struct bgx *bgx = get_bgx(node, bgx_idx);
 	struct lmac *lmac;
 	u64 cfg;
 
@@ -369,7 +399,7 @@ u64 bgx_get_rx_stats(int node, int bgx_idx, int lmac, int idx)
 {
 	struct bgx *bgx;
 
-	bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+	bgx = get_bgx(node, bgx_idx);
 	if (!bgx)
 		return 0;
 
@@ -383,7 +413,7 @@ u64 bgx_get_tx_stats(int node, int bgx_idx, int lmac, int idx)
 {
 	struct bgx *bgx;
 
-	bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+	bgx = get_bgx(node, bgx_idx);
 	if (!bgx)
 		return 0;
 
@@ -411,7 +441,7 @@ void bgx_lmac_internal_loopback(int node, int bgx_idx,
 	struct lmac *lmac;
 	u64    cfg;
 
-	bgx = bgx_vnic[(node * MAX_BGX_PER_NODE) + bgx_idx];
+	bgx = get_bgx(node, bgx_idx);
 	if (!bgx)
 		return;
 
@@ -1328,11 +1358,13 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 		goto err_release_regions;
 	}
 
+	set_max_bgx_per_node(pdev);
+
 	pci_read_config_word(pdev, PCI_DEVICE_ID, &sdevid);
 	if (sdevid != PCI_DEVICE_ID_THUNDER_RGX) {
 		bgx->bgx_id = (pci_resource_start(pdev,
 			PCI_CFG_REG_BAR_NUM) >> 24) & BGX_ID_MASK;
-		bgx->bgx_id += nic_get_node_id(pdev) * MAX_BGX_PER_NODE;
+		bgx->bgx_id += nic_get_node_id(pdev) * max_bgx_per_node;
 		bgx->max_lmac = MAX_LMAC_PER_BGX;
 		bgx_vnic[bgx->bgx_id] = bgx;
 	} else {
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index a60f189..c5080f2c 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -22,7 +22,6 @@
 #define    MAX_BGX_PER_CN88XX			2
 #define    MAX_BGX_PER_CN81XX			3 /* 2 BGXs + 1 RGX */
 #define    MAX_BGX_PER_CN83XX			4
-#define    MAX_BGX_PER_NODE			4
 #define    MAX_LMAC_PER_BGX			4
 #define    MAX_BGX_CHANS_PER_LMAC		16
 #define    MAX_DMAC_PER_LMAC			8
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* [PATCH 4/4] net: thunderx: Allow IPv6 frames with zero UDP checksum
  2017-03-03 10:47 [PATCH 0/4] net: thunderx: Miscellaneous fixes sunil.kovvuri
                   ` (2 preceding siblings ...)
  2017-03-03 10:47 ` [PATCH 3/4] net: thunderx: Fix invalid mac addresses for node1 interfaces sunil.kovvuri
@ 2017-03-03 10:47 ` sunil.kovvuri
  3 siblings, 0 replies; 11+ messages in thread
From: sunil.kovvuri @ 2017-03-03 10:47 UTC (permalink / raw)
  To: netdev
  Cc: linux-kernel, linux-arm-kernel, Thanneeru Srinivasulu, Sunil Goutham

From: Thanneeru Srinivasulu <tsrinivasulu@cavium.com>

Do not consider IPv6 frames with zero UDP checksum as frames
with bad checksum and drop them.

Signed-off-by: Thanneeru Srinivasulu <tsrinivasulu@cavium.com>
Signed-off-by: Sunil Goutham <sgoutham@cavium.com>
---
 drivers/net/ethernet/cavium/thunder/nicvf_queues.c | 6 ++++--
 1 file changed, 4 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index ee3aa16..77a86a6 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -657,9 +657,11 @@ static void nicvf_rcv_queue_config(struct nicvf *nic, struct queue_set *qs,
 	nicvf_send_msg_to_pf(nic, &mbx);
 
 	if (!nic->sqs_mode && (qidx == 0)) {
-		/* Enable checking L3/L4 length and TCP/UDP checksums */
+		/* Enable checking L3/L4 length and TCP/UDP checksums
+		 * Also allow IPv6 pkts with zero UDP checksum.
+		 */
 		nicvf_queue_reg_write(nic, NIC_QSET_RQ_GEN_CFG, 0,
-				      (BIT(24) | BIT(23) | BIT(21)));
+				      (BIT(24) | BIT(23) | BIT(21) | BIT(20)));
 		nicvf_config_vlan_stripping(nic, nic->netdev->features);
 	}
 
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/4] net: thunderx: Fix IOMMU translation faults
  2017-03-03 10:47 ` [PATCH 1/4] net: thunderx: Fix IOMMU translation faults sunil.kovvuri
@ 2017-03-03 17:56   ` David Miller
  2017-03-04  5:54     ` Sunil Kovvuri
  0 siblings, 1 reply; 11+ messages in thread
From: David Miller @ 2017-03-03 17:56 UTC (permalink / raw)
  To: sunil.kovvuri; +Cc: netdev, linux-kernel, linux-arm-kernel, sgoutham

From: sunil.kovvuri@gmail.com
Date: Fri,  3 Mar 2017 16:17:47 +0530

> @@ -1643,6 +1650,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
>  	if (!pass1_silicon(nic->pdev))
>  		nic->hw_tso = true;
>  
> +	/* Check if we are attached to IOMMU */
> +	nic->iommu_domain = iommu_get_domain_for_dev(dev);

This function is not universally available.

This looks very hackish to me anyways, how all of this stuff is supposed
to work is that you simply use the DMA interfaces unconditionally and
whatever is behind the operations takes care of everything.

Doing it conditionally in the driver with all of this special IOMMU
domain et al. knowledge makes no sense to me at all.

I don't see other drivers doing stuff like this at all, so if you're
going to handle this in a unique way like this you better write
several paragraphs in your commit message explaining why this weird
crap is necessary.

There is no way I can apply this series as it is current written.

Thanks.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/4] net: thunderx: Fix IOMMU translation faults
  2017-03-03 17:56   ` David Miller
@ 2017-03-04  5:54     ` Sunil Kovvuri
  2017-03-06 12:46       ` Robin Murphy
  0 siblings, 1 reply; 11+ messages in thread
From: Sunil Kovvuri @ 2017-03-04  5:54 UTC (permalink / raw)
  To: David Miller; +Cc: Linux Netdev List, LKML, LAKML, Sunil Goutham

On Fri, Mar 3, 2017 at 11:26 PM, David Miller <davem@davemloft.net> wrote:
> From: sunil.kovvuri@gmail.com
> Date: Fri,  3 Mar 2017 16:17:47 +0530
>
>> @@ -1643,6 +1650,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
>>       if (!pass1_silicon(nic->pdev))
>>               nic->hw_tso = true;
>>
>> +     /* Check if we are attached to IOMMU */
>> +     nic->iommu_domain = iommu_get_domain_for_dev(dev);
>
> This function is not universally available.

Even if CONFIG_IOMMU_API is not enabled, it will return NULL and will be okay.
http://lxr.free-electrons.com/source/include/linux/iommu.h#L400

>
> This looks very hackish to me anyways, how all of this stuff is supposed
> to work is that you simply use the DMA interfaces unconditionally and
> whatever is behind the operations takes care of everything.
>
> Doing it conditionally in the driver with all of this special IOMMU
> domain et al. knowledge makes no sense to me at all.
>
> I don't see other drivers doing stuff like this at all, so if you're
> going to handle this in a unique way like this you better write
> several paragraphs in your commit message explaining why this weird
> crap is necessary.

I already tried to explain in the commit message that HW anyway takes care
of data coherency, so calling DMA interfaces when there is no IOMMU will
only result in performance drop.

We are seeing a 0.75Mpps drop with IP forwarding rate due to that.
Hence I have restricted calling DMA interfaces to only when IOMMU is enabled.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/4] net: thunderx: Fix IOMMU translation faults
  2017-03-04  5:54     ` Sunil Kovvuri
@ 2017-03-06 12:46       ` Robin Murphy
  2017-03-06 12:57         ` Sunil Kovvuri
  0 siblings, 1 reply; 11+ messages in thread
From: Robin Murphy @ 2017-03-06 12:46 UTC (permalink / raw)
  To: Sunil Kovvuri, David Miller; +Cc: Linux Netdev List, Sunil Goutham, LKML, LAKML

On 04/03/17 05:54, Sunil Kovvuri wrote:
> On Fri, Mar 3, 2017 at 11:26 PM, David Miller <davem@davemloft.net> wrote:
>> From: sunil.kovvuri@gmail.com
>> Date: Fri,  3 Mar 2017 16:17:47 +0530
>>
>>> @@ -1643,6 +1650,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
>>>       if (!pass1_silicon(nic->pdev))
>>>               nic->hw_tso = true;
>>>
>>> +     /* Check if we are attached to IOMMU */
>>> +     nic->iommu_domain = iommu_get_domain_for_dev(dev);
>>
>> This function is not universally available.
> 
> Even if CONFIG_IOMMU_API is not enabled, it will return NULL and will be okay.
> http://lxr.free-electrons.com/source/include/linux/iommu.h#L400
> 
>>
>> This looks very hackish to me anyways, how all of this stuff is supposed
>> to work is that you simply use the DMA interfaces unconditionally and
>> whatever is behind the operations takes care of everything.
>>
>> Doing it conditionally in the driver with all of this special IOMMU
>> domain et al. knowledge makes no sense to me at all.
>>
>> I don't see other drivers doing stuff like this at all, so if you're
>> going to handle this in a unique way like this you better write
>> several paragraphs in your commit message explaining why this weird
>> crap is necessary.
> 
> I already tried to explain in the commit message that HW anyway takes care
> of data coherency, so calling DMA interfaces when there is no IOMMU will
> only result in performance drop.
> 
> We are seeing a 0.75Mpps drop with IP forwarding rate due to that.
> Hence I have restricted calling DMA interfaces to only when IOMMU is enabled.

What's 0.07Mpps as a percentage of baseline? On a correctly configured
coherent arm64 system, in the absence of an IOMMU, dma_map_*() is
essentially just virt_to_phys() behind a function call or two, so I'd be
interested to know where any non-trivial overhead might be coming from.

Robin.

> 
> _______________________________________________
> linux-arm-kernel mailing list
> linux-arm-kernel@lists.infradead.org
> http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/4] net: thunderx: Fix IOMMU translation faults
  2017-03-06 12:46       ` Robin Murphy
@ 2017-03-06 12:57         ` Sunil Kovvuri
  2017-03-06 16:32           ` Robin Murphy
  0 siblings, 1 reply; 11+ messages in thread
From: Sunil Kovvuri @ 2017-03-06 12:57 UTC (permalink / raw)
  To: Robin Murphy; +Cc: David Miller, Linux Netdev List, Sunil Goutham, LKML, LAKML

>>
>> We are seeing a 0.75Mpps drop with IP forwarding rate due to that.
>> Hence I have restricted calling DMA interfaces to only when IOMMU is enabled.
>
> What's 0.07Mpps as a percentage of baseline? On a correctly configured
> coherent arm64 system, in the absence of an IOMMU, dma_map_*() is
> essentially just virt_to_phys() behind a function call or two, so I'd be
> interested to know where any non-trivial overhead might be coming from.

It's a 5% drop and yes device is configured as coherent.
And the drop is due to additional function calls.

Thanks,
Sunil.

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/4] net: thunderx: Fix IOMMU translation faults
  2017-03-06 12:57         ` Sunil Kovvuri
@ 2017-03-06 16:32           ` Robin Murphy
  2017-03-07 12:44             ` Sunil Kovvuri
  0 siblings, 1 reply; 11+ messages in thread
From: Robin Murphy @ 2017-03-06 16:32 UTC (permalink / raw)
  To: Sunil Kovvuri; +Cc: David Miller, Linux Netdev List, Sunil Goutham, LKML, LAKML

On 06/03/17 12:57, Sunil Kovvuri wrote:
>>>
>>> We are seeing a 0.75Mpps drop with IP forwarding rate due to that.
>>> Hence I have restricted calling DMA interfaces to only when IOMMU is enabled.
>>
>> What's 0.07Mpps as a percentage of baseline? On a correctly configured
>> coherent arm64 system, in the absence of an IOMMU, dma_map_*() is
>> essentially just virt_to_phys() behind a function call or two, so I'd be
>> interested to know where any non-trivial overhead might be coming from.
> 
> It's a 5% drop and yes device is configured as coherent.
> And the drop is due to additional function calls.

OK, interesting - sounds like there's potential for some optimisation
there as well. AFAICS the callchain goes:

dma_map_single_attrs (inline)
- ops->map_page (__swiotlb_map_page)
  - swiotlb_map_page
    - phys_to_dma (inline)
    - dma_capable (inline)

Do you happen to have a breakdown of where the time goes? If it's mostly
just in the indirect branch our options are limited (I'm guessing
ThunderX doesn't have a particularly fancy branch predictor, if it's not
even got a data prefetcher), but if it's in the SWIOTLB code then
there's certainly room for improvement (which will hopefully tie in with
some DMA ops work I'm planning to do soon anyway).

Thanks,
Robin.

> 
> Thanks,
> Sunil.
> 

^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH 1/4] net: thunderx: Fix IOMMU translation faults
  2017-03-06 16:32           ` Robin Murphy
@ 2017-03-07 12:44             ` Sunil Kovvuri
  0 siblings, 0 replies; 11+ messages in thread
From: Sunil Kovvuri @ 2017-03-07 12:44 UTC (permalink / raw)
  To: Robin Murphy; +Cc: David Miller, Linux Netdev List, Sunil Goutham, LKML, LAKML

On Mon, Mar 6, 2017 at 10:02 PM, Robin Murphy <robin.murphy@arm.com> wrote:
> On 06/03/17 12:57, Sunil Kovvuri wrote:
>>>>
>>>> We are seeing a 0.75Mpps drop with IP forwarding rate due to that.
>>>> Hence I have restricted calling DMA interfaces to only when IOMMU is enabled.
>>>
>>> What's 0.07Mpps as a percentage of baseline? On a correctly configured
>>> coherent arm64 system, in the absence of an IOMMU, dma_map_*() is
>>> essentially just virt_to_phys() behind a function call or two, so I'd be
>>> interested to know where any non-trivial overhead might be coming from.
>>
>> It's a 5% drop and yes device is configured as coherent.
>> And the drop is due to additional function calls.
>
> OK, interesting - sounds like there's potential for some optimisation
> there as well. AFAICS the callchain goes:
>
> dma_map_single_attrs (inline)
> - ops->map_page (__swiotlb_map_page)
>   - swiotlb_map_page
>     - phys_to_dma (inline)
>     - dma_capable (inline)
>
> Do you happen to have a breakdown of where the time goes? If it's mostly
> just in the indirect branch our options are limited (I'm guessing
> ThunderX doesn't have a particularly fancy branch predictor, if it's not
> even got a data prefetcher), but if it's in the SWIOTLB code then
> there's certainly room for improvement (which will hopefully tie in with
> some DMA ops work I'm planning to do soon anyway).

It's the branching which is costing the performance, as you said nothing
much can be done in the common code for this. Anyway I have submitted
new patch without conditional calling of DMA APIs, will look into reducing
performance impact (if possible implement recycling) a bit later.

Thanks,
Sunil.

^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2017-03-07 12:45 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-03-03 10:47 [PATCH 0/4] net: thunderx: Miscellaneous fixes sunil.kovvuri
2017-03-03 10:47 ` [PATCH 1/4] net: thunderx: Fix IOMMU translation faults sunil.kovvuri
2017-03-03 17:56   ` David Miller
2017-03-04  5:54     ` Sunil Kovvuri
2017-03-06 12:46       ` Robin Murphy
2017-03-06 12:57         ` Sunil Kovvuri
2017-03-06 16:32           ` Robin Murphy
2017-03-07 12:44             ` Sunil Kovvuri
2017-03-03 10:47 ` [PATCH 2/4] net: thunderx: Fix LMAC mode debug prints for QSGMII mode sunil.kovvuri
2017-03-03 10:47 ` [PATCH 3/4] net: thunderx: Fix invalid mac addresses for node1 interfaces sunil.kovvuri
2017-03-03 10:47 ` [PATCH 4/4] net: thunderx: Allow IPv6 frames with zero UDP checksum sunil.kovvuri

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).