All of lore.kernel.org
 help / color / mirror / Atom feed
From: Mike Ximing Chen <mike.ximing.chen@intel.com>
To: netdev@vger.kernel.org
Cc: davem@davemloft.net, kuba@kernel.org, arnd@arndb.de,
	gregkh@linuxfoundation.org, dan.j.williams@intel.com,
	pierre-louis.bossart@linux.intel.com,
	Gage Eads <gage.eads@intel.com>
Subject: [PATCH v10 18/20] dlb: add dynamic queue map register operations
Date: Wed, 10 Feb 2021 11:54:21 -0600	[thread overview]
Message-ID: <20210210175423.1873-19-mike.ximing.chen@intel.com> (raw)
In-Reply-To: <20210210175423.1873-1-mike.ximing.chen@intel.com>

Adds the "dynamic" map procedure and register operations. If a queue map
is requested after the domain is started, the driver must disable the
requested queue and wait for it to quiesce before mapping it to the
requested port.

Signed-off-by: Gage Eads <gage.eads@intel.com>
Signed-off-by: Mike Ximing Chen <mike.ximing.chen@intel.com>
Reviewed-by: Björn Töpel <bjorn.topel@intel.com>
Reviewed-by: Dan Williams <dan.j.williams@intel.com>
---
 drivers/misc/dlb/dlb_resource.c | 393 +++++++++++++++++++++++++++++++-
 1 file changed, 392 insertions(+), 1 deletion(-)

diff --git a/drivers/misc/dlb/dlb_resource.c b/drivers/misc/dlb/dlb_resource.c
index 95ccb7eddb8b..93a3de642024 100644
--- a/drivers/misc/dlb/dlb_resource.c
+++ b/drivers/misc/dlb/dlb_resource.c
@@ -328,6 +328,37 @@ dlb_get_domain_dir_pq(u32 id, bool vdev_req, struct dlb_hw_domain *domain)
 	return NULL;
 }
 
+static struct dlb_ldb_queue *
+dlb_get_ldb_queue_from_id(struct dlb_hw *hw, u32 id, bool vdev_req,
+			  unsigned int vdev_id)
+{
+	struct dlb_function_resources *rsrcs;
+	struct dlb_hw_domain *domain;
+	struct dlb_ldb_queue *queue;
+
+	if (id >= DLB_MAX_NUM_LDB_QUEUES)
+		return NULL;
+
+	rsrcs = (vdev_req) ? &hw->vdev[vdev_id] : &hw->pf;
+
+	if (!vdev_req)
+		return &hw->rsrcs.ldb_queues[id];
+
+	list_for_each_entry(domain, &rsrcs->used_domains, func_list) {
+		list_for_each_entry(queue, &domain->used_ldb_queues, domain_list) {
+			if (queue->id.virt_id == id)
+				return queue;
+		}
+	}
+
+	list_for_each_entry(queue, &rsrcs->avail_ldb_queues, func_list) {
+		if (queue->id.virt_id == id)
+			return queue;
+	}
+
+	return NULL;
+}
+
 static struct dlb_ldb_queue *
 dlb_get_domain_ldb_queue(u32 id, bool vdev_req, struct dlb_hw_domain *domain)
 {
@@ -2251,6 +2282,75 @@ static void dlb_ldb_port_change_qid_priority(struct dlb_hw *hw,
 	port->qid_map[slot].priority = args->priority;
 }
 
+static int dlb_ldb_port_set_has_work_bits(struct dlb_hw *hw,
+					  struct dlb_ldb_port *port,
+					  struct dlb_ldb_queue *queue, int slot)
+{
+	u32 ctrl = 0;
+	u32 active;
+	u32 enq;
+
+	/* Set the atomic scheduling haswork bit */
+	active = DLB_CSR_RD(hw, LSP_QID_AQED_ACTIVE_CNT(queue->id.phys_id));
+
+	BITS_SET(ctrl, port->id.phys_id, LSP_LDB_SCHED_CTRL_CQ);
+	BITS_SET(ctrl, slot, LSP_LDB_SCHED_CTRL_QIDIX);
+	ctrl |= LSP_LDB_SCHED_CTRL_VALUE;
+	BITS_SET(ctrl, (u32)(BITS_GET(active, LSP_QID_AQED_ACTIVE_CNT_COUNT) > 0),
+		 LSP_LDB_SCHED_CTRL_RLIST_HASWORK_V);
+
+	/* Set the non-atomic scheduling haswork bit */
+	DLB_CSR_WR(hw, LSP_LDB_SCHED_CTRL, ctrl);
+
+	enq = DLB_CSR_RD(hw,
+			 LSP_QID_LDB_ENQUEUE_CNT(queue->id.phys_id));
+
+	memset(&ctrl, 0, sizeof(ctrl));
+
+	BITS_SET(ctrl, port->id.phys_id, LSP_LDB_SCHED_CTRL_CQ);
+	BITS_SET(ctrl, slot, LSP_LDB_SCHED_CTRL_QIDIX);
+	ctrl |= LSP_LDB_SCHED_CTRL_VALUE;
+	BITS_SET(ctrl, (u32)(BITS_GET(enq, LSP_QID_LDB_ENQUEUE_CNT_COUNT) > 0),
+		 LSP_LDB_SCHED_CTRL_NALB_HASWORK_V);
+
+	DLB_CSR_WR(hw, LSP_LDB_SCHED_CTRL, ctrl);
+
+	dlb_flush_csr(hw);
+
+	return 0;
+}
+
+static void dlb_ldb_port_clear_queue_if_status(struct dlb_hw *hw,
+					       struct dlb_ldb_port *port,
+					       int slot)
+{
+	u32 ctrl = 0;
+
+	BITS_SET(ctrl, port->id.phys_id, LSP_LDB_SCHED_CTRL_CQ);
+	BITS_SET(ctrl, slot, LSP_LDB_SCHED_CTRL_QIDIX);
+	ctrl |= LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V;
+
+	DLB_CSR_WR(hw, LSP_LDB_SCHED_CTRL, ctrl);
+
+	dlb_flush_csr(hw);
+}
+
+static void dlb_ldb_port_set_queue_if_status(struct dlb_hw *hw,
+					     struct dlb_ldb_port *port,
+					     int slot)
+{
+	u32 ctrl = 0;
+
+	BITS_SET(ctrl, port->id.phys_id, LSP_LDB_SCHED_CTRL_CQ);
+	BITS_SET(ctrl, slot, LSP_LDB_SCHED_CTRL_QIDIX);
+	ctrl |= LSP_LDB_SCHED_CTRL_VALUE;
+	ctrl |= LSP_LDB_SCHED_CTRL_INFLIGHT_OK_V;
+
+	DLB_CSR_WR(hw, LSP_LDB_SCHED_CTRL, ctrl);
+
+	dlb_flush_csr(hw);
+}
+
 static void dlb_ldb_queue_set_inflight_limit(struct dlb_hw *hw,
 					     struct dlb_ldb_queue *queue)
 {
@@ -2261,11 +2361,222 @@ static void dlb_ldb_queue_set_inflight_limit(struct dlb_hw *hw,
 	DLB_CSR_WR(hw, LSP_QID_LDB_INFL_LIM(queue->id.phys_id), infl_lim);
 }
 
+static void dlb_ldb_queue_clear_inflight_limit(struct dlb_hw *hw,
+					       struct dlb_ldb_queue *queue)
+{
+	DLB_CSR_WR(hw,
+		   LSP_QID_LDB_INFL_LIM(queue->id.phys_id),
+		   LSP_QID_LDB_INFL_LIM_RST);
+}
+
+/*
+ * dlb_ldb_queue_{enable, disable}_mapped_cqs() don't operate exactly as
+ * their function names imply, and should only be called by the dynamic CQ
+ * mapping code.
+ */
+static void dlb_ldb_queue_disable_mapped_cqs(struct dlb_hw *hw,
+					     struct dlb_hw_domain *domain,
+					     struct dlb_ldb_queue *queue)
+{
+	struct dlb_ldb_port *port;
+	int slot, i;
+
+	for (i = 0; i < DLB_NUM_COS_DOMAINS; i++) {
+		list_for_each_entry(port, &domain->used_ldb_ports[i], domain_list) {
+			enum dlb_qid_map_state state = DLB_QUEUE_MAPPED;
+
+			if (!dlb_port_find_slot_queue(port, state,
+						      queue, &slot))
+				continue;
+
+			if (port->enabled)
+				dlb_ldb_port_cq_disable(hw, port);
+		}
+	}
+}
+
+static void dlb_ldb_queue_enable_mapped_cqs(struct dlb_hw *hw,
+					    struct dlb_hw_domain *domain,
+					    struct dlb_ldb_queue *queue)
+{
+	struct dlb_ldb_port *port;
+	int slot, i;
+
+	for (i = 0; i < DLB_NUM_COS_DOMAINS; i++) {
+		list_for_each_entry(port, &domain->used_ldb_ports[i], domain_list) {
+			enum dlb_qid_map_state state = DLB_QUEUE_MAPPED;
+
+			if (!dlb_port_find_slot_queue(port, state,
+						      queue, &slot))
+				continue;
+
+			if (port->enabled)
+				dlb_ldb_port_cq_enable(hw, port);
+		}
+	}
+}
+
+static int dlb_ldb_port_finish_map_qid_dynamic(struct dlb_hw *hw,
+					       struct dlb_hw_domain *domain,
+					       struct dlb_ldb_port *port,
+					       struct dlb_ldb_queue *queue)
+{
+	enum dlb_qid_map_state state;
+	int slot, ret, i;
+	u32 infl_cnt;
+	u8 prio;
+
+	infl_cnt = DLB_CSR_RD(hw, LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
+
+	if (BITS_GET(infl_cnt, LSP_QID_LDB_INFL_CNT_COUNT)) {
+		DLB_HW_ERR(hw,
+			   "[%s()] Internal error: non-zero QID inflight count\n",
+			   __func__);
+		return -EINVAL;
+	}
+
+	/*
+	 * Static map the port and set its corresponding has_work bits.
+	 */
+	state = DLB_QUEUE_MAP_IN_PROG;
+	if (!dlb_port_find_slot_queue(port, state, queue, &slot))
+		return -EINVAL;
+
+	prio = port->qid_map[slot].priority;
+
+	/*
+	 * Update the CQ2QID, CQ2PRIOV, and QID2CQIDX registers, and
+	 * the port's qid_map state.
+	 */
+	ret = dlb_ldb_port_map_qid_static(hw, port, queue, prio);
+	if (ret)
+		return ret;
+
+	ret = dlb_ldb_port_set_has_work_bits(hw, port, queue, slot);
+	if (ret)
+		return ret;
+
+	/*
+	 * Ensure IF_status(cq,qid) is 0 before enabling the port to
+	 * prevent spurious schedules to cause the queue's inflight
+	 * count to increase.
+	 */
+	dlb_ldb_port_clear_queue_if_status(hw, port, slot);
+
+	/* Reset the queue's inflight status */
+	for (i = 0; i < DLB_NUM_COS_DOMAINS; i++) {
+		list_for_each_entry(port, &domain->used_ldb_ports[i], domain_list) {
+			state = DLB_QUEUE_MAPPED;
+			if (!dlb_port_find_slot_queue(port, state,
+						      queue, &slot))
+				continue;
+
+			dlb_ldb_port_set_queue_if_status(hw, port, slot);
+		}
+	}
+
+	dlb_ldb_queue_set_inflight_limit(hw, queue);
+
+	/* Re-enable CQs mapped to this queue */
+	dlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);
+
+	/* If this queue has other mappings pending, clear its inflight limit */
+	if (queue->num_pending_additions > 0)
+		dlb_ldb_queue_clear_inflight_limit(hw, queue);
+
+	return 0;
+}
+
+/**
+ * dlb_ldb_port_map_qid_dynamic() - perform a "dynamic" QID->CQ mapping
+ * @hw: dlb_hw handle for a particular device.
+ * @port: load-balanced port
+ * @queue: load-balanced queue
+ * @priority: queue servicing priority
+ *
+ * Returns 0 if the queue was mapped, 1 if the mapping is scheduled to occur
+ * at a later point, and <0 if an error occurred.
+ */
+static int dlb_ldb_port_map_qid_dynamic(struct dlb_hw *hw,
+					struct dlb_ldb_port *port,
+					struct dlb_ldb_queue *queue,
+					u8 priority)
+{
+	enum dlb_qid_map_state state;
+	struct dlb_hw_domain *domain;
+	int domain_id, slot, ret;
+	u32 infl_cnt;
+
+	domain_id = port->domain_id.phys_id;
+
+	domain = dlb_get_domain_from_id(hw, domain_id, false, 0);
+	if (!domain) {
+		DLB_HW_ERR(hw,
+			   "[%s()] Internal error: unable to find domain %d\n",
+			   __func__, port->domain_id.phys_id);
+		return -EINVAL;
+	}
+
+	/*
+	 * Set the QID inflight limit to 0 to prevent further scheduling of the
+	 * queue.
+	 */
+	DLB_CSR_WR(hw, LSP_QID_LDB_INFL_LIM(queue->id.phys_id), 0);
+
+	if (!dlb_port_find_slot(port, DLB_QUEUE_UNMAPPED, &slot)) {
+		DLB_HW_ERR(hw,
+			   "Internal error: No available unmapped slots\n");
+		return -EFAULT;
+	}
+
+	port->qid_map[slot].qid = queue->id.phys_id;
+	port->qid_map[slot].priority = priority;
+
+	state = DLB_QUEUE_MAP_IN_PROG;
+	ret = dlb_port_slot_state_transition(hw, port, queue, slot, state);
+	if (ret)
+		return ret;
+
+	infl_cnt = DLB_CSR_RD(hw, LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
+
+	if (BITS_GET(infl_cnt, LSP_QID_LDB_INFL_CNT_COUNT))
+		return 1;
+
+	/*
+	 * Disable the affected CQ, and the CQs already mapped to the QID,
+	 * before reading the QID's inflight count a second time. There is an
+	 * unlikely race in which the QID may schedule one more QE after we
+	 * read an inflight count of 0, and disabling the CQs guarantees that
+	 * the race will not occur after a re-read of the inflight count
+	 * register.
+	 */
+	if (port->enabled)
+		dlb_ldb_port_cq_disable(hw, port);
+
+	dlb_ldb_queue_disable_mapped_cqs(hw, domain, queue);
+
+	infl_cnt = DLB_CSR_RD(hw, LSP_QID_LDB_INFL_CNT(queue->id.phys_id));
+
+	if (BITS_GET(infl_cnt, LSP_QID_LDB_INFL_CNT_COUNT)) {
+		if (port->enabled)
+			dlb_ldb_port_cq_enable(hw, port);
+
+		dlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);
+
+		return 1;
+	}
+
+	return dlb_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
+}
+
 static int dlb_ldb_port_map_qid(struct dlb_hw *hw, struct dlb_hw_domain *domain,
 				struct dlb_ldb_port *port,
 				struct dlb_ldb_queue *queue, u8 prio)
 {
-	return dlb_ldb_port_map_qid_static(hw, port, queue, prio);
+	if (domain->started)
+		return dlb_ldb_port_map_qid_dynamic(hw, port, queue, prio);
+	else
+		return dlb_ldb_port_map_qid_static(hw, port, queue, prio);
 }
 
 static void
@@ -2722,6 +3033,82 @@ int dlb_hw_create_dir_port(struct dlb_hw *hw, u32 domain_id,
 	return 0;
 }
 
+static void dlb_domain_finish_map_port(struct dlb_hw *hw,
+				       struct dlb_hw_domain *domain,
+				       struct dlb_ldb_port *port)
+{
+	int i;
+
+	for (i = 0; i < DLB_MAX_NUM_QIDS_PER_LDB_CQ; i++) {
+		struct dlb_ldb_queue *queue;
+		u32 infl_cnt;
+		int qid;
+
+		if (port->qid_map[i].state != DLB_QUEUE_MAP_IN_PROG)
+			continue;
+
+		qid = port->qid_map[i].qid;
+
+		queue = dlb_get_ldb_queue_from_id(hw, qid, false, 0);
+
+		if (!queue) {
+			DLB_HW_ERR(hw,
+				   "[%s()] Internal error: unable to find queue %d\n",
+				   __func__, qid);
+			continue;
+		}
+
+		infl_cnt = DLB_CSR_RD(hw, LSP_QID_LDB_INFL_CNT(qid));
+
+		if (BITS_GET(infl_cnt, LSP_QID_LDB_INFL_CNT_COUNT))
+			continue;
+
+		/*
+		 * Disable the affected CQ, and the CQs already mapped to the
+		 * QID, before reading the QID's inflight count a second time.
+		 * There is an unlikely race in which the QID may schedule one
+		 * more QE after we read an inflight count of 0, and disabling
+		 * the CQs guarantees that the race will not occur after a
+		 * re-read of the inflight count register.
+		 */
+		if (port->enabled)
+			dlb_ldb_port_cq_disable(hw, port);
+
+		dlb_ldb_queue_disable_mapped_cqs(hw, domain, queue);
+
+		infl_cnt = DLB_CSR_RD(hw, LSP_QID_LDB_INFL_CNT(qid));
+
+		if (BITS_GET(infl_cnt, LSP_QID_LDB_INFL_CNT_COUNT)) {
+			if (port->enabled)
+				dlb_ldb_port_cq_enable(hw, port);
+
+			dlb_ldb_queue_enable_mapped_cqs(hw, domain, queue);
+
+			continue;
+		}
+
+		dlb_ldb_port_finish_map_qid_dynamic(hw, domain, port, queue);
+	}
+}
+
+static unsigned int
+dlb_domain_finish_map_qid_procedures(struct dlb_hw *hw,
+				     struct dlb_hw_domain *domain)
+{
+	struct dlb_ldb_port *port;
+	int i;
+
+	if (!domain->configured || domain->num_pending_additions == 0)
+		return 0;
+
+	for (i = 0; i < DLB_NUM_COS_DOMAINS; i++) {
+		list_for_each_entry(port, &domain->used_ldb_ports[i], domain_list)
+			dlb_domain_finish_map_port(hw, domain, port);
+	}
+
+	return domain->num_pending_additions;
+}
+
 static void dlb_log_map_qid(struct dlb_hw *hw, u32 domain_id,
 			    struct dlb_map_qid_args *args,
 			    bool vdev_req, unsigned int vdev_id)
@@ -4454,6 +4841,10 @@ int dlb_reset_domain(struct dlb_hw *hw, u32 domain_id, bool vdev_req,
 	if (ret)
 		return ret;
 
+	ret = dlb_domain_finish_map_qid_procedures(hw, domain);
+	if (ret)
+		return ret;
+
 	/* Re-enable the CQs in order to drain the mapped queues. */
 	dlb_domain_enable_ldb_cqs(hw, domain);
 
-- 
2.17.1


  parent reply	other threads:[~2021-02-10 18:02 UTC|newest]

Thread overview: 50+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-10 17:54 [PATCH v10 00/20] dlb: introduce DLB device driver Mike Ximing Chen
2021-02-10 17:54 ` [PATCH v10 01/20] dlb: add skeleton for DLB driver Mike Ximing Chen
2021-02-18  7:34   ` Chen, Mike Ximing
2021-02-18  7:52     ` gregkh
2021-02-18 15:37       ` Chen, Mike Ximing
2021-03-07 13:59       ` Chen, Mike Ximing
2021-02-10 17:54 ` [PATCH v10 02/20] dlb: initialize device Mike Ximing Chen
2021-02-10 17:54 ` [PATCH v10 03/20] dlb: add resource and device initialization Mike Ximing Chen
2021-03-09  9:24   ` Greg KH
2021-03-10  1:33     ` Chen, Mike Ximing
2021-03-10  8:13       ` Greg KH
2021-03-10 20:26         ` Chen, Mike Ximing
2021-02-10 17:54 ` [PATCH v10 04/20] dlb: add device ioctl layer and first three ioctls Mike Ximing Chen
2021-03-09  9:26   ` Greg KH
2021-03-10  1:34     ` Chen, Mike Ximing
2021-02-10 17:54 ` [PATCH v10 05/20] dlb: add scheduling domain configuration Mike Ximing Chen
2021-03-09  9:28   ` Greg KH
2021-03-10  1:35     ` Chen, Mike Ximing
2021-02-10 17:54 ` [PATCH v10 06/20] dlb: add domain software reset Mike Ximing Chen
2021-02-10 17:54 ` [PATCH v10 07/20] dlb: add low-level register reset operations Mike Ximing Chen
2021-02-10 17:54 ` [PATCH v10 08/20] dlb: add runtime power-management support Mike Ximing Chen
2021-02-10 17:54 ` [PATCH v10 09/20] dlb: add queue create, reset, get-depth ioctls Mike Ximing Chen
2021-02-10 17:54 ` [PATCH v10 10/20] dlb: add register operations for queue management Mike Ximing Chen
2021-02-10 17:54 ` [PATCH v10 11/20] dlb: add ioctl to configure ports and query poll mode Mike Ximing Chen
2021-02-10 17:54 ` [PATCH v10 12/20] dlb: add register operations for port management Mike Ximing Chen
2021-02-10 17:54 ` [PATCH v10 13/20] dlb: add port mmap support Mike Ximing Chen
2021-02-10 17:54 ` [PATCH v10 14/20] dlb: add start domain ioctl Mike Ximing Chen
2021-03-09  9:29   ` Greg KH
2021-03-10  2:45     ` Chen, Mike Ximing
2021-03-10  8:14       ` Greg KH
2021-03-10 20:19         ` Dan Williams
2021-03-10 20:26         ` Chen, Mike Ximing
2021-02-10 17:54 ` [PATCH v10 15/20] dlb: add queue map, unmap, and pending unmap operations Mike Ximing Chen
2021-02-10 17:54 ` [PATCH v10 16/20] dlb: add port map/unmap state machine Mike Ximing Chen
2021-02-10 17:54 ` [PATCH v10 17/20] dlb: add static queue map register operations Mike Ximing Chen
2021-02-10 17:54 ` Mike Ximing Chen [this message]
2021-02-10 17:54 ` [PATCH v10 19/20] dlb: add queue unmap " Mike Ximing Chen
2021-02-10 17:54 ` [PATCH v10 20/20] dlb: queue map/unmap workqueue Mike Ximing Chen
2021-03-10  9:02 ` [PATCH v10 00/20] dlb: introduce DLB device driver Greg KH
2021-03-12  7:18   ` Dan Williams
2021-03-12 21:55     ` Chen, Mike Ximing
2021-03-13  1:39       ` Dan Williams
2021-03-15 20:04         ` Chen, Mike Ximing
2021-03-15 20:08         ` Chen, Mike Ximing
2021-03-15 20:18         ` Chen, Mike Ximing
2021-03-16  9:01           ` Greg KH
2021-05-12 19:07             ` Dan Williams
2021-05-14 14:33               ` Greg KH
2021-07-16  1:04                 ` Chen, Mike Ximing
  -- strict thread matches above, loose matches on Subject: below --
2021-01-27 22:56 Mike Ximing Chen
2021-01-27 22:56 ` [PATCH v10 18/20] dlb: add dynamic queue map register operations Mike Ximing Chen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210210175423.1873-19-mike.ximing.chen@intel.com \
    --to=mike.ximing.chen@intel.com \
    --cc=arnd@arndb.de \
    --cc=dan.j.williams@intel.com \
    --cc=davem@davemloft.net \
    --cc=gage.eads@intel.com \
    --cc=gregkh@linuxfoundation.org \
    --cc=kuba@kernel.org \
    --cc=netdev@vger.kernel.org \
    --cc=pierre-louis.bossart@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.