All of lore.kernel.org
 help / color / mirror / Atom feed
From: Spike Du <spiked@nvidia.com>
To: <matan@nvidia.com>, <viacheslavo@nvidia.com>, <orika@nvidia.com>,
	<thomas@monjalon.net>, Xiaoyun Li <xiaoyun.li@intel.com>,
	Aman Singh <aman.deep.singh@intel.com>,
	Yuying Zhang <yuying.zhang@intel.com>,
	"Ferruh Yigit" <ferruh.yigit@xilinx.com>,
	Andrew Rybchenko <andrew.rybchenko@oktetlabs.ru>,
	Ray Kinsella <mdr@ashroe.eu>
Cc: <stephen@networkplumber.org>, <mb@smartsharesystems.com>,
	<dev@dpdk.org>,  <rasland@nvidia.com>
Subject: [PATCH v5 3/7] ethdev: introduce Rx queue based available descriptor threshold
Date: Tue, 7 Jun 2022 15:59:38 +0300	[thread overview]
Message-ID: <20220607125942.241379-4-spiked@nvidia.com> (raw)
In-Reply-To: <20220607125942.241379-1-spiked@nvidia.com>

available descriptor threshold describes the availability of a Rx queue
for hardware.
If the availability is below the threshold, the device will trigger the
event RTE_ETH_EVENT_RX_AVAIL_THRESH.
available descriptor threshold is defined as a percentage of Rx queue
size with valid value of [0,99].
Setting available descriptor threshold to 0 means disable it, which is
the default.
Add available descriptor threshold configuration and query driver
callbacks in eth_dev_ops.
Add command line options to support avail_thresh per-rxq configure.
- Command syntax:
  set port <port_id> rxq <rxq_id> avail_thresh <avail_thresh_num>

- Example commands:
To configure avail_thresh as 30% of rxq size on port 1 rxq 0:
testpmd> set port 1 rxq 0 avail_thresh 30

To disable avail_thresh on port 1 rxq 0:
testpmd> set port 1 rxq 0 avail_thresh 0

Signed-off-by: Spike Du <spiked@nvidia.com>
---
 app/test-pmd/cmdline.c     | 68 ++++++++++++++++++++++++++++++++++++++++++
 app/test-pmd/config.c      | 20 +++++++++++++
 app/test-pmd/testpmd.c     | 14 +++++++++
 app/test-pmd/testpmd.h     |  2 ++
 lib/ethdev/ethdev_driver.h | 22 ++++++++++++++
 lib/ethdev/rte_ethdev.c    | 44 ++++++++++++++++++++++++++++
 lib/ethdev/rte_ethdev.h    | 73 ++++++++++++++++++++++++++++++++++++++++++++++
 lib/ethdev/version.map     |  2 ++
 8 files changed, 245 insertions(+)

diff --git a/app/test-pmd/cmdline.c b/app/test-pmd/cmdline.c
index 0410bad..bbf5835 100644
--- a/app/test-pmd/cmdline.c
+++ b/app/test-pmd/cmdline.c
@@ -17823,6 +17823,73 @@ struct cmd_show_port_flow_transfer_proxy_result {
 	}
 };
 
+/* *** SET AVAIL THRESHOLD FOR A RXQ OF A PORT *** */
+struct cmd_rxq_avail_thresh_result {
+	cmdline_fixed_string_t set;
+	cmdline_fixed_string_t port;
+	uint16_t port_num;
+	cmdline_fixed_string_t rxq;
+	uint16_t rxq_num;
+	cmdline_fixed_string_t avail_thresh;
+	uint8_t avail_thresh_num;
+};
+
+static void cmd_rxq_avail_thresh_parsed(void *parsed_result,
+		__rte_unused struct cmdline *cl,
+		__rte_unused void *data)
+{
+	struct cmd_rxq_avail_thresh_result *res = parsed_result;
+	int ret = 0;
+
+	if ((strcmp(res->set, "set") == 0) && (strcmp(res->port, "port") == 0)
+	    && (strcmp(res->rxq, "rxq") == 0)
+	    && (strcmp(res->avail_thresh, "avail_thresh") == 0))
+		ret = set_rxq_avail_thresh(res->port_num, res->rxq_num,
+				  res->avail_thresh_num);
+	if (ret < 0)
+		printf("rxq_avail_thresh_cmd error: (%s)\n", strerror(-ret));
+
+}
+
+static cmdline_parse_token_string_t cmd_rxq_avail_thresh_set =
+	TOKEN_STRING_INITIALIZER(struct cmd_rxq_avail_thresh_result,
+				set, "set");
+static cmdline_parse_token_string_t cmd_rxq_avail_thresh_port =
+	TOKEN_STRING_INITIALIZER(struct cmd_rxq_avail_thresh_result,
+				port, "port");
+static cmdline_parse_token_num_t cmd_rxq_avail_thresh_portnum =
+	TOKEN_NUM_INITIALIZER(struct cmd_rxq_avail_thresh_result,
+				port_num, RTE_UINT16);
+static cmdline_parse_token_string_t cmd_rxq_avail_thresh_rxq =
+	TOKEN_STRING_INITIALIZER(struct cmd_rxq_avail_thresh_result,
+				rxq, "rxq");
+static cmdline_parse_token_num_t cmd_rxq_avail_thresh_rxqnum =
+	TOKEN_NUM_INITIALIZER(struct cmd_rxq_avail_thresh_result,
+				rxq_num, RTE_UINT16);
+static cmdline_parse_token_string_t cmd_rxq_avail_thresh_avail_thresh =
+	TOKEN_STRING_INITIALIZER(struct cmd_rxq_avail_thresh_result,
+				avail_thresh, "avail_thresh");
+static cmdline_parse_token_num_t cmd_rxq_avail_thresh_avail_threshnum =
+	TOKEN_NUM_INITIALIZER(struct cmd_rxq_avail_thresh_result,
+				avail_thresh_num, RTE_UINT8);
+
+static cmdline_parse_inst_t cmd_rxq_avail_thresh = {
+	.f = cmd_rxq_avail_thresh_parsed,
+	.data = (void *)0,
+	.help_str = "set port <port_id> rxq <rxq_id> avail_thresh <avail_thresh_num>"
+		"Set avail_thresh for rxq on port_id",
+	.tokens = {
+		(void *)&cmd_rxq_avail_thresh_set,
+		(void *)&cmd_rxq_avail_thresh_port,
+		(void *)&cmd_rxq_avail_thresh_portnum,
+		(void *)&cmd_rxq_avail_thresh_rxq,
+		(void *)&cmd_rxq_avail_thresh_rxqnum,
+		(void *)&cmd_rxq_avail_thresh_avail_thresh,
+		(void *)&cmd_rxq_avail_thresh_avail_threshnum,
+		NULL,
+	},
+};
+
 /* ******************************************************************************** */
 
 /* list of instructions */
@@ -18110,6 +18177,7 @@ struct cmd_show_port_flow_transfer_proxy_result {
 	(cmdline_parse_inst_t *)&cmd_show_capability,
 	(cmdline_parse_inst_t *)&cmd_set_flex_is_pattern,
 	(cmdline_parse_inst_t *)&cmd_set_flex_spec_pattern,
+	(cmdline_parse_inst_t *)&cmd_rxq_avail_thresh,
 	NULL,
 };
 
diff --git a/app/test-pmd/config.c b/app/test-pmd/config.c
index 1b1e738..b754091 100644
--- a/app/test-pmd/config.c
+++ b/app/test-pmd/config.c
@@ -6342,3 +6342,23 @@ struct igb_ring_desc_16_bytes {
 		printf("  %s\n", buf);
 	}
 }
+
+int
+set_rxq_avail_thresh(portid_t port_id, uint16_t queue_id, uint8_t avail_thresh)
+{
+	struct rte_eth_link link;
+	int ret;
+
+	if (port_id_is_invalid(port_id, ENABLED_WARN))
+		return -EINVAL;
+	ret = eth_link_get_nowait_print_err(port_id, &link);
+	if (ret < 0)
+		return -EINVAL;
+	if (avail_thresh > 99)
+		return -EINVAL;
+	ret = rte_eth_rx_avail_thresh_set(port_id, queue_id, avail_thresh);
+	if (ret != 0)
+		return ret;
+	return 0;
+}
+
diff --git a/app/test-pmd/testpmd.c b/app/test-pmd/testpmd.c
index 767765d..33d9b85 100644
--- a/app/test-pmd/testpmd.c
+++ b/app/test-pmd/testpmd.c
@@ -420,6 +420,7 @@ struct fwd_engine * fwd_engines[] = {
 	[RTE_ETH_EVENT_NEW] = "device probed",
 	[RTE_ETH_EVENT_DESTROY] = "device released",
 	[RTE_ETH_EVENT_FLOW_AGED] = "flow aged",
+	[RTE_ETH_EVENT_RX_AVAIL_THRESH] = "rxq available threshold reached",
 	[RTE_ETH_EVENT_MAX] = NULL,
 };
 
@@ -3616,6 +3617,9 @@ struct pmd_test_command {
 eth_event_callback(portid_t port_id, enum rte_eth_event_type type, void *param,
 		  void *ret_param)
 {
+	uint16_t rxq_id;
+	uint8_t avail_thresh;
+	int ret;
 	RTE_SET_USED(param);
 	RTE_SET_USED(ret_param);
 
@@ -3647,6 +3651,16 @@ struct pmd_test_command {
 		ports[port_id].port_status = RTE_PORT_CLOSED;
 		printf("Port %u is closed\n", port_id);
 		break;
+	case RTE_ETH_EVENT_RX_AVAIL_THRESH:
+		/* avail_thresh query API rewinds rxq_id, no need to check max rxq num. */
+		for (rxq_id = 0; ; rxq_id++) {
+			ret = rte_eth_rx_avail_thresh_query(port_id, &rxq_id, &avail_thresh);
+			if (ret <= 0)
+				break;
+			printf("Received avail_thresh event, port:%d rxq_id:%d\n",
+			       port_id, rxq_id);
+		}
+		break;
 	default:
 		break;
 	}
diff --git a/app/test-pmd/testpmd.h b/app/test-pmd/testpmd.h
index 78a5f4e..5b268f4 100644
--- a/app/test-pmd/testpmd.h
+++ b/app/test-pmd/testpmd.h
@@ -1173,6 +1173,8 @@ uint16_t tx_pkt_set_dynf(uint16_t port_id, __rte_unused uint16_t queue,
 void flex_item_create(portid_t port_id, uint16_t flex_id, const char *filename);
 void flex_item_destroy(portid_t port_id, uint16_t flex_id);
 void port_flex_item_flush(portid_t port_id);
+int set_rxq_avail_thresh(portid_t port_id, uint16_t queue_id,
+			 uint8_t avail_thresh);
 
 extern int flow_parse(const char *src, void *result, unsigned int size,
 		      struct rte_flow_attr **attr,
diff --git a/lib/ethdev/ethdev_driver.h b/lib/ethdev/ethdev_driver.h
index 69d9dc2..847f86a 100644
--- a/lib/ethdev/ethdev_driver.h
+++ b/lib/ethdev/ethdev_driver.h
@@ -1074,6 +1074,23 @@ typedef int (*eth_ip_reassembly_conf_set_t)(struct rte_eth_dev *dev,
 typedef int (*eth_dev_priv_dump_t)(struct rte_eth_dev *dev, FILE *file);
 
 /**
+ * @internal Set Rx queue available descriptor threshold.
+ * @see rte_eth_rx_avail_thresh_set()
+ */
+typedef int (*eth_rx_queue_avail_thresh_set_t)(struct rte_eth_dev *dev,
+				      uint16_t rx_queue_id,
+				      uint8_t avail_thresh);
+
+/**
+ * @internal Query queue available descriptor threshold event.
+ * @see rte_eth_rx_avail_thresh_query()
+ */
+
+typedef int (*eth_rx_queue_avail_thresh_query_t)(struct rte_eth_dev *dev,
+					uint16_t *rx_queue_id,
+					uint8_t *avail_thresh);
+
+/**
  * @internal A structure containing the functions exported by an Ethernet driver.
  */
 struct eth_dev_ops {
@@ -1283,6 +1300,11 @@ struct eth_dev_ops {
 
 	/** Dump private info from device */
 	eth_dev_priv_dump_t eth_dev_priv_dump;
+
+	/** Set Rx queue available descriptor threshold. */
+	eth_rx_queue_avail_thresh_set_t rx_queue_avail_thresh_set;
+	/** Query Rx queue available descriptor threshold event. */
+	eth_rx_queue_avail_thresh_query_t rx_queue_avail_thresh_query;
 };
 
 /**
diff --git a/lib/ethdev/rte_ethdev.c b/lib/ethdev/rte_ethdev.c
index a175867..92fb282 100644
--- a/lib/ethdev/rte_ethdev.c
+++ b/lib/ethdev/rte_ethdev.c
@@ -4424,6 +4424,50 @@ int rte_eth_set_queue_rate_limit(uint16_t port_id, uint16_t queue_idx,
 							queue_idx, tx_rate));
 }
 
+int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
+			       uint8_t avail_thresh)
+{
+	struct rte_eth_dev *dev;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+	dev = &rte_eth_devices[port_id];
+
+	if (queue_id > dev->data->nb_rx_queues) {
+		RTE_ETHDEV_LOG(ERR,
+			"Set queue avail thresh: port %u: invalid queue ID=%u.\n",
+			port_id, queue_id);
+		return -EINVAL;
+	}
+
+	if (avail_thresh > 99) {
+		RTE_ETHDEV_LOG(ERR,
+			"Set queue avail thresh: port %u: threshold should be <= 99.\n",
+			port_id);
+		return -EINVAL;
+	}
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_avail_thresh_set, -ENOTSUP);
+	return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_set)(dev,
+							     queue_id, avail_thresh));
+}
+
+int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
+				 uint8_t *avail_thresh)
+{
+	struct rte_eth_dev *dev;
+
+	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
+	dev = &rte_eth_devices[port_id];
+
+	if (queue_id == NULL)
+		return -EINVAL;
+	if (*queue_id >= dev->data->nb_rx_queues)
+		*queue_id = 0;
+
+	RTE_FUNC_PTR_OR_ERR_RET(*dev->dev_ops->rx_queue_avail_thresh_query, -ENOTSUP);
+	return eth_err(port_id, (*dev->dev_ops->rx_queue_avail_thresh_query)(dev,
+							     queue_id, avail_thresh));
+}
+
 RTE_INIT(eth_dev_init_fp_ops)
 {
 	uint32_t i;
diff --git a/lib/ethdev/rte_ethdev.h b/lib/ethdev/rte_ethdev.h
index 04225bb..d01bfe4 100644
--- a/lib/ethdev/rte_ethdev.h
+++ b/lib/ethdev/rte_ethdev.h
@@ -1931,6 +1931,14 @@ struct rte_eth_rxq_info {
 	uint8_t queue_state;        /**< one of RTE_ETH_QUEUE_STATE_*. */
 	uint16_t nb_desc;           /**< configured number of RXDs. */
 	uint16_t rx_buf_size;       /**< hardware receive buffer size. */
+	/**
+	 * Per-queue Rx available descriptor threshold defined as percentage
+	 * of Rx queue size. If Rx queue available descriptor is lower than
+	 * this percentage, the event RTE_ETH_EVENT_RX_AVAIL_THESH is triggered.
+	 * Value 0 means threshold monitoring is disabled, no event is
+	 * triggered.
+	 */
+	uint8_t avail_thresh;
 } __rte_cache_min_aligned;
 
 /**
@@ -3672,6 +3680,66 @@ int rte_eth_dev_set_vlan_ether_type(uint16_t port_id,
  */
 int rte_eth_dev_set_vlan_pvid(uint16_t port_id, uint16_t pvid, int on);
 
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Set Rx queue based available descriptor threshold.
+ *
+ * @param port_id
+ *  The port identifier of the Ethernet device.
+ * @param queue_id
+ *  The index of the receive queue.
+ * @param avail_thresh
+ *  The available descriptor threshold is percentage of Rx queue size which
+ *  describes the availability of Rx queue for hardware. If the Rx queue
+ *  availability is below it, the device will trigger the event
+ *  RTE_ETH_EVENT_RX_AVAIL_THRESH.
+ *  [1-99] to set a new available descriptor threshold.
+ *  0 to disable thresold monitoring.
+ *
+ * @return
+ *   - 0 if successful.
+ *   - negative if failed.
+ */
+__rte_experimental
+int rte_eth_rx_avail_thresh_set(uint16_t port_id, uint16_t queue_id,
+			       uint8_t avail_thresh);
+
+/**
+ * @warning
+ * @b EXPERIMENTAL: this API may change without prior notice.
+ *
+ * Query Rx queue based available descriptor threshold event.
+ * The function queries all queues in the port circularly until one
+ * pending avail_thresh event is found or no pending avail_thresh event is found.
+ *
+ * @param port_id
+ *  The port identifier of the Ethernet device.
+ * @param queue_id
+ *  The API caller sets the starting Rx queue id in the pointer.
+ *  If the queue_id is bigger than maximum queue id of the port,
+ *  it's rewinded to 0 so that application can keep calling
+ *  this function to handle all pending avail_thresh events in the queues
+ *  with a simple increment between calls.
+ *  If a Rx queue has pending avail_thresh event, the pointer is updated
+ *  with this Rx queue id; otherwise this pointer's content is
+ *  unchanged.
+ * @param avail_thresh
+ *  The pointer to the available descriptor threshold percentage of Rx queue.
+ *  If Rx queue with pending avail_thresh event is found, the queue's avail_thresh
+ *  percentage is stored in this pointer, otherwise the pointer's
+ *  content is unchanged.
+ *
+ * @return
+ *   - 1 if a Rx queue with pending avail_thresh event is found.
+ *   - 0 if no Rx queue with pending avail_thresh event is found.
+ *   - -EINVAL if queue_id is NULL.
+ */
+__rte_experimental
+int rte_eth_rx_avail_thresh_query(uint16_t port_id, uint16_t *queue_id,
+				 uint8_t *avail_thresh);
+
 typedef void (*buffer_tx_error_fn)(struct rte_mbuf **unsent, uint16_t count,
 		void *userdata);
 
@@ -3877,6 +3945,11 @@ enum rte_eth_event_type {
 	RTE_ETH_EVENT_DESTROY,  /**< port is released */
 	RTE_ETH_EVENT_IPSEC,    /**< IPsec offload related event */
 	RTE_ETH_EVENT_FLOW_AGED,/**< New aged-out flows is detected */
+	/**
+	 *  Available threshold value is exceeded in a queue.
+	 *  @see rte_eth_rx_avail_thresh_set()
+	 */
+	RTE_ETH_EVENT_RX_AVAIL_THRESH,
 	RTE_ETH_EVENT_MAX       /**< max value of this enum */
 };
 
diff --git a/lib/ethdev/version.map b/lib/ethdev/version.map
index daca785..2fd928f 100644
--- a/lib/ethdev/version.map
+++ b/lib/ethdev/version.map
@@ -285,6 +285,8 @@ EXPERIMENTAL {
 	rte_mtr_color_in_protocol_priority_get;
 	rte_mtr_color_in_protocol_set;
 	rte_mtr_meter_vlan_table_update;
+	rte_eth_rx_avail_thresh_set;
+	rte_eth_rx_avail_thresh_query;
 };
 
 INTERNAL {
-- 
1.8.3.1


  parent reply	other threads:[~2022-06-07 13:00 UTC|newest]

Thread overview: 131+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-01  3:22 [RFC 0/6] net/mlx5: introduce limit watermark and host shaper Spike Du
2022-04-01  3:22 ` [RFC 1/6] net/mlx5: add LWM support for Rxq Spike Du
2022-05-06  3:56   ` [RFC v1 0/7] net/mlx5: introduce limit watermark and host shaper Spike Du
2022-05-06  3:56     ` [RFC v1 1/7] net/mlx5: add LWM support for Rxq Spike Du
2022-05-06  3:56     ` [RFC v1 2/7] common/mlx5: share interrupt management Spike Du
2022-05-06  3:56     ` [RFC v1 3/7] ethdev: introduce Rx queue based limit watermark Spike Du
2022-05-19  9:37       ` Andrew Rybchenko
2022-05-06  3:56     ` [RFC v1 4/7] net/mlx5: add LWM event handling support Spike Du
2022-05-06  3:56     ` [RFC v1 5/7] net/mlx5: support Rx queue based limit watermark Spike Du
2022-05-06  3:56     ` [RFC v1 6/7] net/mlx5: add private API to config host port shaper Spike Du
2022-05-06  3:56     ` [RFC v1 7/7] app/testpmd: add LWM and Host Shaper command Spike Du
2022-05-22  5:58     ` [RFC v2 0/7] introduce per-queue limit watermark and host shaper Spike Du
2022-05-22  5:58       ` [RFC v2 1/7] net/mlx5: add LWM support for Rxq Spike Du
2022-05-22  5:58       ` [RFC v2 2/7] common/mlx5: share interrupt management Spike Du
2022-05-22  5:58       ` [RFC v2 3/7] ethdev: introduce Rx queue based limit watermark Spike Du
2022-05-22 15:23         ` Stephen Hemminger
2022-05-23  3:01           ` Spike Du
2022-05-23 21:45             ` Thomas Monjalon
2022-05-24  2:50               ` Spike Du
2022-05-24  8:18                 ` Thomas Monjalon
2022-05-25 12:59                   ` Andrew Rybchenko
2022-05-25 13:58                     ` Thomas Monjalon
2022-05-25 14:23                       ` Andrew Rybchenko
2022-05-23 22:54             ` Stephen Hemminger
2022-05-24  3:46               ` Spike Du
2022-05-22 15:24         ` Stephen Hemminger
2022-05-23  2:18           ` Spike Du
2022-05-23  6:07         ` Morten Brørup
2022-05-23 10:58           ` Thomas Monjalon
2022-05-23 14:10             ` Spike Du
2022-05-23 14:39               ` Thomas Monjalon
2022-05-24  6:35                 ` Andrew Rybchenko
2022-05-24  9:40                   ` Morten Brørup
2022-05-22  5:58       ` [RFC v2 4/7] net/mlx5: add LWM event handling support Spike Du
2022-05-22  5:58       ` [RFC v2 5/7] net/mlx5: support Rx queue based limit watermark Spike Du
2022-05-22  5:58       ` [RFC v2 6/7] net/mlx5: add private API to config host port shaper Spike Du
2022-05-22  5:59       ` [RFC v2 7/7] app/testpmd: add LWM and Host Shaper command Spike Du
2022-05-24 15:20       ` [PATCH v3 0/7] introduce per-queue limit watermark and host shaper Spike Du
2022-05-24 15:20         ` [PATCH v3 1/7] net/mlx5: add LWM support for Rxq Spike Du
2022-05-24 15:20         ` [PATCH v3 2/7] common/mlx5: share interrupt management Spike Du
2022-05-24 15:20         ` [PATCH v3 3/7] ethdev: introduce Rx queue based limit watermark Spike Du
2022-05-24 15:20         ` [PATCH v3 4/7] net/mlx5: add LWM event handling support Spike Du
2022-05-24 15:20         ` [PATCH v3 5/7] net/mlx5: support Rx queue based limit watermark Spike Du
2022-05-24 15:20         ` [PATCH v3 6/7] net/mlx5: add private API to config host port shaper Spike Du
2022-05-24 15:20         ` [PATCH v3 7/7] app/testpmd: add LWM and Host Shaper command Spike Du
2022-05-24 15:59         ` [PATCH v3 0/7] introduce per-queue limit watermark and host shaper Thomas Monjalon
2022-05-24 19:00           ` Morten Brørup
2022-05-24 19:22             ` Thomas Monjalon
2022-05-25 14:11               ` Andrew Rybchenko
2022-05-25 13:14             ` Spike Du
2022-05-25 13:40               ` Morten Brørup
2022-05-25 13:59                 ` Spike Du
2022-05-25 14:16                   ` Morten Brørup
2022-05-25 14:30                     ` Andrew Rybchenko
2022-06-03 12:48         ` [PATCH v4 0/7] introduce per-queue fill threshold " Spike Du
2022-06-03 12:48           ` [PATCH v4 1/7] net/mlx5: add LWM support for Rxq Spike Du
2022-06-03 12:48           ` [PATCH v4 2/7] common/mlx5: share interrupt management Spike Du
2022-06-03 14:30             ` Ray Kinsella
2022-06-03 12:48           ` [PATCH v4 3/7] ethdev: introduce Rx queue based fill threshold Spike Du
2022-06-03 14:30             ` Ray Kinsella
2022-06-04 12:46             ` Andrew Rybchenko
2022-06-06 13:16               ` Spike Du
2022-06-06 17:15                 ` Andrew Rybchenko
2022-06-06 21:30                   ` Thomas Monjalon
2022-06-07  8:02                     ` Andrew Rybchenko
2022-06-07  6:00                   ` Spike Du
2022-06-06 15:49             ` Stephen Hemminger
2022-06-03 12:48           ` [PATCH v4 4/7] net/mlx5: add LWM event handling support Spike Du
2022-06-03 12:48           ` [PATCH v4 5/7] net/mlx5: support Rx queue based fill threshold Spike Du
2022-06-03 12:48           ` [PATCH v4 6/7] net/mlx5: add private API to config host port shaper Spike Du
2022-06-03 14:55             ` Ray Kinsella
2022-06-03 12:48           ` [PATCH v4 7/7] app/testpmd: add Host Shaper command Spike Du
2022-06-07 12:59           ` [PATCH v5 0/7] introduce per-queue available descriptor threshold and host shaper Spike Du
2022-06-07 12:59             ` [PATCH v5 1/7] net/mlx5: add LWM support for Rxq Spike Du
2022-06-08 20:10               ` Matan Azrad
2022-06-07 12:59             ` [PATCH v5 2/7] common/mlx5: share interrupt management Spike Du
2022-06-07 12:59             ` Spike Du [this message]
2022-06-07 12:59             ` [PATCH v5 4/7] net/mlx5: add LWM event handling support Spike Du
2022-06-07 12:59             ` [PATCH v5 5/7] net/mlx5: support Rx queue based available descriptor threshold Spike Du
2022-06-07 12:59             ` [PATCH v5 6/7] net/mlx5: add private API to config host port shaper Spike Du
2022-06-07 12:59             ` [PATCH v5 7/7] app/testpmd: add Host Shaper command Spike Du
2022-06-09  7:55               ` Andrew Rybchenko
2022-06-10  2:22                 ` Spike Du
2022-06-13  2:50               ` [PATCH v6] " Spike Du
2022-06-13  2:50                 ` Spike Du
2022-06-14  9:43                   ` Singh, Aman Deep
2022-06-14  9:54                     ` Spike Du
2022-06-14 12:01                   ` [PATCH v7] " Spike Du
2022-06-14 12:01                     ` Spike Du
2022-06-15  7:51                       ` Matan Azrad
2022-06-15 11:08                       ` Thomas Monjalon
2022-06-15 12:58                       ` [PATCH v8 0/6] introduce per-queue available descriptor threshold and host shaper Spike Du
2022-06-15 12:58                         ` [PATCH v8 1/6] net/mlx5: add LWM support for Rxq Spike Du
2022-06-15 14:43                           ` [PATCH v9 0/6] introduce per-queue available descriptor threshold and host shaper Spike Du
2022-06-15 14:43                             ` [PATCH v9 1/6] net/mlx5: add LWM support for Rxq Spike Du
2022-06-16  8:41                               ` [PATCH v10 0/6] introduce per-queue available descriptor threshold and host shaper Spike Du
2022-06-16  8:41                                 ` [PATCH v10 1/6] net/mlx5: add LWM support for Rxq Spike Du
2022-06-16  8:41                                 ` [PATCH v10 2/6] common/mlx5: share interrupt management Spike Du
2022-06-23 16:05                                   ` Ray Kinsella
2022-06-16  8:41                                 ` [PATCH v10 3/6] net/mlx5: add LWM event handling support Spike Du
2022-06-16  8:41                                 ` [PATCH v10 4/6] net/mlx5: support Rx queue based available descriptor threshold Spike Du
2022-06-16  8:41                                 ` [PATCH v10 5/6] net/mlx5: add private API to config host port shaper Spike Du
2022-06-16  8:41                                 ` [PATCH v10 6/6] app/testpmd: add Host Shaper command Spike Du
2022-06-19  8:14                                 ` [PATCH v10 0/6] introduce per-queue available descriptor threshold and host shaper Raslan Darawsheh
2022-06-15 14:43                             ` [PATCH v9 2/6] common/mlx5: share interrupt management Spike Du
2022-06-15 14:43                             ` [PATCH v9 3/6] net/mlx5: add LWM event handling support Spike Du
2022-06-15 14:43                             ` [PATCH v9 4/6] net/mlx5: support Rx queue based available descriptor threshold Spike Du
2022-06-15 14:43                             ` [PATCH v9 5/6] net/mlx5: add private API to config host port shaper Spike Du
2022-06-15 14:43                             ` [PATCH v9 6/6] app/testpmd: add Host Shaper command Spike Du
2022-06-15 12:58                         ` [PATCH v8 2/6] common/mlx5: share interrupt management Spike Du
2022-06-15 12:58                         ` [PATCH v8 3/6] net/mlx5: add LWM event handling support Spike Du
2022-06-15 12:58                         ` [PATCH v8 4/6] net/mlx5: support Rx queue based available descriptor threshold Spike Du
2022-06-15 12:58                         ` [PATCH v8 5/6] net/mlx5: add private API to config host port shaper Spike Du
2022-06-15 12:58                         ` [PATCH v8 6/6] app/testpmd: add Host Shaper command Spike Du
2022-06-08  9:43             ` [PATCH v5 0/7] introduce per-queue available descriptor threshold and host shaper Andrew Rybchenko
2022-06-08 16:35             ` [PATCH v6] ethdev: introduce available Rx descriptors threshold Andrew Rybchenko
2022-06-08 17:22               ` Thomas Monjalon
2022-06-08 17:46                 ` Thomas Monjalon
2022-06-09  0:17                   ` fengchengwen
2022-06-09  7:05                     ` Thomas Monjalon
2022-06-10  0:01                       ` fengchengwen
2022-04-01  3:22 ` [RFC 2/6] common/mlx5: share interrupt management Spike Du
2022-04-01  3:22 ` [RFC 3/6] net/mlx5: add LWM event handling support Spike Du
2022-04-01  3:22 ` [RFC 4/6] net/mlx5: add private API to configure Rxq LWM Spike Du
2022-04-01  3:22 ` [RFC 5/6] net/mlx5: add private API to config host port shaper Spike Du
2022-04-01  3:22 ` [RFC 6/6] app/testpmd: add LWM and Host Shaper command Spike Du
2022-04-05  8:58 ` [RFC 0/6] net/mlx5: introduce limit watermark and host shaper Jerin Jacob
2022-04-26  2:42   ` Spike Du
2022-05-01 12:50     ` Jerin Jacob
2022-05-02  3:58       ` Spike Du
2022-04-29  5:48   ` Spike Du

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220607125942.241379-4-spiked@nvidia.com \
    --to=spiked@nvidia.com \
    --cc=aman.deep.singh@intel.com \
    --cc=andrew.rybchenko@oktetlabs.ru \
    --cc=dev@dpdk.org \
    --cc=ferruh.yigit@xilinx.com \
    --cc=matan@nvidia.com \
    --cc=mb@smartsharesystems.com \
    --cc=mdr@ashroe.eu \
    --cc=orika@nvidia.com \
    --cc=rasland@nvidia.com \
    --cc=stephen@networkplumber.org \
    --cc=thomas@monjalon.net \
    --cc=viacheslavo@nvidia.com \
    --cc=xiaoyun.li@intel.com \
    --cc=yuying.zhang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.