All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 1/4] net/ixgbevf: unregister irq handler when other interrupts not allowed.
@ 2017-10-26 13:44 xiangxia.m.yue
  2017-10-26 13:44 ` [PATCH 2/4] net/ixgbevf: set the inter-interrupt interval for EITR xiangxia.m.yue
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: xiangxia.m.yue @ 2017-10-26 13:44 UTC (permalink / raw)
  To: dev; +Cc: Tonghao Zhang

From: Tonghao Zhang <zhangtonghao@didichuxing.com>

When we bind the ixgbe VF (e.g 82599 card) to igb_uio and enable the rx-interrupt,
there will be more than one epoll_wait on intr_handle.fd. One is in "eal-intr-thread"
thread, and the others are in the thread which call the "rte_epoll_wait". The problem
is that  sometiems "eal-intr-thread" thread will process the rx interrupt, and
rte_epoll_wait can’t get the event any more, and the packets may be lost.

We should unregister the status interrupt handler in "eal-intr-thread" thread
and the ixgbe is in the same case.

Signed-off-by: Tonghao Zhang <zhangtonghao@didichuxing.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 15 +++++++++++++++
 1 file changed, 15 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 4339347..5994e55 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -5070,6 +5070,15 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	}
 	ixgbevf_configure_msix(dev);
 
+	if (!rte_intr_allow_others(intr_handle)) {
+		rte_intr_callback_unregister(intr_handle,
+					     ixgbevf_dev_interrupt_handler,
+					     dev);
+		if (dev->data->dev_conf.intr_conf.lsc != 0)
+			PMD_INIT_LOG(INFO, "lsc won't enable because of"
+				     " no intr multiplex");
+	}
+
 	/* When a VF port is bound to VFIO-PCI, only miscellaneous interrupt
 	 * is mapped to VFIO vector 0 in eth_ixgbevf_dev_init( ).
 	 * If previous VFIO interrupt mapping setting in eth_ixgbevf_dev_init( )
@@ -5111,6 +5120,12 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	dev->data->scattered_rx = 0;
 
 	ixgbe_dev_clear_queues(dev);
+	
+	if (!rte_intr_allow_others(intr_handle))
+		/* resume to the default handler */
+		rte_intr_callback_register(intr_handle,
+					   ixgbevf_dev_interrupt_handler,
+					   (void *)dev);
 
 	/* Clean datapath event and queue/vec mapping */
 	rte_intr_efd_disable(intr_handle);
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 2/4] net/ixgbevf: set the inter-interrupt interval for EITR.
  2017-10-26 13:44 [PATCH 1/4] net/ixgbevf: unregister irq handler when other interrupts not allowed xiangxia.m.yue
@ 2017-10-26 13:44 ` xiangxia.m.yue
  2017-10-26 13:44 ` [PATCH 3/4] net/ixgbevf: save IXGBE_VTEIMS to intr->mask for performance xiangxia.m.yue
  2017-10-26 13:44 ` [PATCH 4/4] net/ixgbevf: add check for rte_intr_enable xiangxia.m.yue
  2 siblings, 0 replies; 4+ messages in thread
From: xiangxia.m.yue @ 2017-10-26 13:44 UTC (permalink / raw)
  To: dev; +Cc: Tonghao Zhang, Tonghao Zhang

From: Tonghao Zhang <zhangtonghao@didichuxing.com>

Set EITR interval as default. This patch can improve the performance
when we enable the rx-intrrupt to process the packets because we hope
rx-intrrupt reduce CPU.

Signed-off-by: Tonghao Zhang <zhangtonghao@didichuxing.com>
Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 3 +++
 1 file changed, 3 insertions(+)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 5994e55..6eb90e4 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -5801,6 +5801,9 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
 		if (vector_idx < base + intr_handle->nb_efd - 1)
 			vector_idx++;
 	}
+
+	IXGBE_WRITE_REG(hw, IXGBE_VTEITR(IXGBE_MISC_VEC_ID),
+			IXGBE_MIN_INTER_INTERRUPT_INTERVAL_DEFAULT & 0xFFF);
 }
 
 /**
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 3/4] net/ixgbevf: save IXGBE_VTEIMS to intr->mask for performance.
  2017-10-26 13:44 [PATCH 1/4] net/ixgbevf: unregister irq handler when other interrupts not allowed xiangxia.m.yue
  2017-10-26 13:44 ` [PATCH 2/4] net/ixgbevf: set the inter-interrupt interval for EITR xiangxia.m.yue
@ 2017-10-26 13:44 ` xiangxia.m.yue
  2017-10-26 13:44 ` [PATCH 4/4] net/ixgbevf: add check for rte_intr_enable xiangxia.m.yue
  2 siblings, 0 replies; 4+ messages in thread
From: xiangxia.m.yue @ 2017-10-26 13:44 UTC (permalink / raw)
  To: dev; +Cc: Tonghao Zhang, Tonghao Zhang

From: Tonghao Zhang <zhangtonghao@didichuxing.com>

If dpdk APPs call the rte_eth_dev_rx_intr_enable or rte_eth_dev_rx_intr_disable
frequently, and ixgbe vf will read the IXGBE_VTEIMS register everytime. We can
optimize the driver function. The patch save the IXGBE_VTEIMS to mask to avoid
read frequently.

Signed-off-by: Tonghao Zhang <zhangtonghao@didichuxing.com>
Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 55 +++++++++++++++++++++++++---------------
 1 file changed, 35 insertions(+), 20 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index 6eb90e4..f534f20 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -268,8 +268,8 @@ static int ixgbevf_dev_link_update(struct rte_eth_dev *dev,
 static void ixgbevf_dev_stop(struct rte_eth_dev *dev);
 static void ixgbevf_dev_close(struct rte_eth_dev *dev);
 static int  ixgbevf_dev_reset(struct rte_eth_dev *dev);
-static void ixgbevf_intr_disable(struct ixgbe_hw *hw);
-static void ixgbevf_intr_enable(struct ixgbe_hw *hw);
+static void ixgbevf_intr_disable(struct rte_eth_dev *dev);
+static void ixgbevf_intr_enable(struct rte_eth_dev *dev);
 static int ixgbevf_dev_stats_get(struct rte_eth_dev *dev,
 		struct rte_eth_stats *stats);
 static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev);
@@ -1659,7 +1659,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
 	ixgbevf_dev_stats_reset(eth_dev);
 
 	/* Disable the interrupts for VF */
-	ixgbevf_intr_disable(hw);
+	ixgbevf_intr_disable(eth_dev);
 
 	hw->mac.num_rar_entries = 128; /* The MAX of the underlying PF */
 	diag = hw->mac.ops.reset_hw(hw);
@@ -1728,7 +1728,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
 	rte_intr_callback_register(intr_handle,
 				   ixgbevf_dev_interrupt_handler, eth_dev);
 	rte_intr_enable(intr_handle);
-	ixgbevf_intr_enable(hw);
+	ixgbevf_intr_enable(eth_dev);
 
 	PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s",
 		     eth_dev->data->port_id, pci_dev->id.vendor_id,
@@ -1761,7 +1761,7 @@ static int ixgbe_l2_tn_filter_init(struct rte_eth_dev *eth_dev)
 	eth_dev->tx_pkt_burst = NULL;
 
 	/* Disable the interrupts for VF */
-	ixgbevf_intr_disable(hw);
+	ixgbevf_intr_disable(eth_dev);
 
 	rte_free(eth_dev->data->mac_addrs);
 	eth_dev->data->mac_addrs = NULL;
@@ -4945,19 +4945,32 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
  * Virtual Function operations
  */
 static void
-ixgbevf_intr_disable(struct ixgbe_hw *hw)
+ixgbevf_intr_disable(struct rte_eth_dev *dev)
 {
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+
 	PMD_INIT_FUNC_TRACE();
 
 	/* Clear interrupt mask to stop from interrupts being generated */
 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMC, IXGBE_VF_IRQ_CLEAR_MASK);
 
 	IXGBE_WRITE_FLUSH(hw);
+	
+	/* Clear mask value. */
+	intr->mask = 0;
 }
 
 static void
-ixgbevf_intr_enable(struct ixgbe_hw *hw)
+ixgbevf_intr_enable(struct rte_eth_dev *dev)
 {
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
+	struct ixgbe_hw *hw =
+		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
+	
 	PMD_INIT_FUNC_TRACE();
 
 	/* VF enable interrupt autoclean */
@@ -4966,6 +4979,9 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, IXGBE_VF_IRQ_ENABLE_MASK);
 
 	IXGBE_WRITE_FLUSH(hw);
+
+	/* Save IXGBE_VTEIMS value to mask. */
+	intr->mask = IXGBE_VF_IRQ_ENABLE_MASK;
 }
 
 static int
@@ -5091,7 +5107,7 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 	rte_intr_enable(intr_handle);
 
 	/* Re-enable interrupt for VF */
-	ixgbevf_intr_enable(hw);
+	ixgbevf_intr_enable(dev);
 
 	return 0;
 }
@@ -5105,7 +5121,7 @@ static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev,
 
 	PMD_INIT_FUNC_TRACE();
 
-	ixgbevf_intr_disable(hw);
+	ixgbevf_intr_disable(dev);
 
 	hw->adapter_stopped = 1;
 	ixgbe_stop_adapter(hw);
@@ -5603,17 +5619,17 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
 {
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
-	uint32_t mask;
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 	struct ixgbe_hw *hw =
 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	uint32_t vec = IXGBE_MISC_VEC_ID;
 
-	mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
 	if (rte_intr_allow_others(intr_handle))
 		vec = IXGBE_RX_VEC_START;
-	mask |= (1 << vec);
+	intr->mask |= (1 << vec);
 	RTE_SET_USED(queue_id);
-	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
 
 	rte_intr_enable(intr_handle);
 
@@ -5623,19 +5639,19 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
 static int
 ixgbevf_dev_rx_queue_intr_disable(struct rte_eth_dev *dev, uint16_t queue_id)
 {
-	uint32_t mask;
+	struct ixgbe_interrupt *intr =
+		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 	struct ixgbe_hw *hw =
 		IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct rte_pci_device *pci_dev = RTE_ETH_DEV_TO_PCI(dev);
 	struct rte_intr_handle *intr_handle = &pci_dev->intr_handle;
 	uint32_t vec = IXGBE_MISC_VEC_ID;
 
-	mask = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
 	if (rte_intr_allow_others(intr_handle))
 		vec = IXGBE_RX_VEC_START;
-	mask &= ~(1 << vec);
+	intr->mask &= ~(1 << vec);
 	RTE_SET_USED(queue_id);
-	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, mask);
+	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
 
 	return 0;
 }
@@ -8191,7 +8207,7 @@ static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
 	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_interrupt *intr =
 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
-	ixgbevf_intr_disable(hw);
+	ixgbevf_intr_disable(dev);
 
 	/* read-on-clear nic registers here */
 	eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR);
@@ -8208,7 +8224,6 @@ static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
 static int
 ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev)
 {
-	struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private);
 	struct ixgbe_interrupt *intr =
 		IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private);
 
@@ -8217,7 +8232,7 @@ static void ixgbevf_mbx_process(struct rte_eth_dev *dev)
 		intr->flags &= ~IXGBE_FLAG_MAILBOX;
 	}
 
-	ixgbevf_intr_enable(hw);
+	ixgbevf_intr_enable(dev);
 
 	return 0;
 }
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH 4/4] net/ixgbevf: add check for rte_intr_enable.
  2017-10-26 13:44 [PATCH 1/4] net/ixgbevf: unregister irq handler when other interrupts not allowed xiangxia.m.yue
  2017-10-26 13:44 ` [PATCH 2/4] net/ixgbevf: set the inter-interrupt interval for EITR xiangxia.m.yue
  2017-10-26 13:44 ` [PATCH 3/4] net/ixgbevf: save IXGBE_VTEIMS to intr->mask for performance xiangxia.m.yue
@ 2017-10-26 13:44 ` xiangxia.m.yue
  2 siblings, 0 replies; 4+ messages in thread
From: xiangxia.m.yue @ 2017-10-26 13:44 UTC (permalink / raw)
  To: dev; +Cc: Tonghao Zhang, Tonghao Zhang

From: Tonghao Zhang <zhangtonghao@didichuxing.com>

When we bind the ixgbevf to vfio and call the rte_eth_dev_rx_intr_enable
and rte_eth_dev_rx_intr_disable frequently, the interrupt setting
(msi_set_mask_bit) will take more CPU as show below. rte_intr_enable
call the ioctl to map the fd to interrupts frequently.

perf top:
5.45%  [kernel]   [k] msi_set_mask_bit

It is unnecessary to call the rte_intr_enable in ixgbe_dev_rx_queue_intr_enable.
because the fds has been mapped to interrupt and not unmapped in
ixgbe_dev_rx_queue_intr_disable.

This patch add checks for using VFIO.  With the patch, msi_set_mask_bit is not listed
in perl any more. Any suggestion will be welcome.

Signed-off-by: Tonghao Zhang <zhangtonghao@didichuxing.com>
Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
---
 drivers/net/ixgbe/ixgbe_ethdev.c | 9 +++++++--
 1 file changed, 7 insertions(+), 2 deletions(-)

diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c
index f534f20..b958734 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.c
+++ b/drivers/net/ixgbe/ixgbe_ethdev.c
@@ -5631,7 +5631,9 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
 	RTE_SET_USED(queue_id);
 	IXGBE_WRITE_REG(hw, IXGBE_VTEIMS, intr->mask);
 
-	rte_intr_enable(intr_handle);
+	if (intr_handle->type == RTE_INTR_HANDLE_UIO ||
+	    intr_handle->type == RTE_INTR_HANDLE_UIO_INTX)
+		rte_intr_enable(intr_handle);
 
 	return 0;
 }
@@ -5680,7 +5682,10 @@ static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on)
 		mask &= (1 << (queue_id - 32));
 		IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
 	}
-	rte_intr_enable(intr_handle);
+	
+	if (intr_handle->type == RTE_INTR_HANDLE_UIO ||
+	    intr_handle->type == RTE_INTR_HANDLE_UIO_INTX)
+		rte_intr_enable(intr_handle);
 
 	return 0;
 }
-- 
1.8.3.1

^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2017-10-26 13:45 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-10-26 13:44 [PATCH 1/4] net/ixgbevf: unregister irq handler when other interrupts not allowed xiangxia.m.yue
2017-10-26 13:44 ` [PATCH 2/4] net/ixgbevf: set the inter-interrupt interval for EITR xiangxia.m.yue
2017-10-26 13:44 ` [PATCH 3/4] net/ixgbevf: save IXGBE_VTEIMS to intr->mask for performance xiangxia.m.yue
2017-10-26 13:44 ` [PATCH 4/4] net/ixgbevf: add check for rte_intr_enable xiangxia.m.yue

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.