All of lore.kernel.org
 help / color / mirror / Atom feed
From: Yuval Mintz <Yuval.Mintz@cavium.com>
To: <davem@davemloft.net>, <netdev@vger.kernel.org>
Cc: Yuval Mintz <Yuval.Mintz@cavium.com>
Subject: [PATCH net-next 01/11] qed: Add bitmaps for VF CIDs
Date: Sun, 4 Jun 2017 13:30:59 +0300	[thread overview]
Message-ID: <20170604103109.3082-2-Yuval.Mintz@cavium.com> (raw)
In-Reply-To: <20170604103109.3082-1-Yuval.Mintz@cavium.com>

Each PF has a bitmap for its own ranges of CIDs, to allow easy grabbing
of an available CID when such is needed. But VFs are not using the same
mechanism, instead relying on hard-coded CIDs [ queue-index == cid ].

As an infrastructure step toward increasing number of CIDs of VFs,
the PF is going to maintain bitmaps for the VF CIDs as well -
the bitmaps would be per-VF and the ranges would be the same [in HW all
VFs of a given PF have the same mapping of CIDs, and the HW is capable
of distinguishing between those according to the VF index]

Signed-off-by: Yuval Mintz <Yuval.Mintz@cavium.com>
---
 drivers/net/ethernet/qlogic/qed/qed_cxt.c | 222 ++++++++++++++++++++++--------
 drivers/net/ethernet/qlogic/qed/qed_cxt.h |  54 ++++++--
 2 files changed, 202 insertions(+), 74 deletions(-)

diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.c b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
index 6948457..25d5b91 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.c
@@ -135,7 +135,6 @@ struct qed_tid_seg {
 
 struct qed_conn_type_cfg {
 	u32 cid_count;
-	u32 cid_start;
 	u32 cids_per_vf;
 	struct qed_tid_seg tid_seg[TASK_SEGMENTS];
 };
@@ -222,6 +221,9 @@ struct qed_cxt_mngr {
 	/* Acquired CIDs */
 	struct qed_cid_acquired_map	acquired[MAX_CONN_TYPES];
 
+	struct qed_cid_acquired_map
+	acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
+
 	/* ILT  shadow table */
 	struct qed_dma_mem		*ilt_shadow;
 	u32				pf_start_line;
@@ -1121,45 +1123,76 @@ static int qed_ilt_shadow_alloc(struct qed_hwfn *p_hwfn)
 static void qed_cid_map_free(struct qed_hwfn *p_hwfn)
 {
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
-	u32 type;
+	u32 type, vf;
 
 	for (type = 0; type < MAX_CONN_TYPES; type++) {
 		kfree(p_mngr->acquired[type].cid_map);
 		p_mngr->acquired[type].max_count = 0;
 		p_mngr->acquired[type].start_cid = 0;
+
+		for (vf = 0; vf < MAX_NUM_VFS; vf++) {
+			kfree(p_mngr->acquired_vf[type][vf].cid_map);
+			p_mngr->acquired_vf[type][vf].max_count = 0;
+			p_mngr->acquired_vf[type][vf].start_cid = 0;
+		}
 	}
 }
 
+static int
+qed_cid_map_alloc_single(struct qed_hwfn *p_hwfn,
+			 u32 type,
+			 u32 cid_start,
+			 u32 cid_count, struct qed_cid_acquired_map *p_map)
+{
+	u32 size;
+
+	if (!cid_count)
+		return 0;
+
+	size = DIV_ROUND_UP(cid_count,
+			    sizeof(unsigned long) * BITS_PER_BYTE) *
+	       sizeof(unsigned long);
+	p_map->cid_map = kzalloc(size, GFP_KERNEL);
+	if (!p_map->cid_map)
+		return -ENOMEM;
+
+	p_map->max_count = cid_count;
+	p_map->start_cid = cid_start;
+
+	DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+		   "Type %08x start: %08x count %08x\n",
+		   type, p_map->start_cid, p_map->max_count);
+
+	return 0;
+}
+
 static int qed_cid_map_alloc(struct qed_hwfn *p_hwfn)
 {
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
-	u32 start_cid = 0;
-	u32 type;
+	u32 start_cid = 0, vf_start_cid = 0;
+	u32 type, vf;
 
 	for (type = 0; type < MAX_CONN_TYPES; type++) {
-		u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
-		u32 size;
+		struct qed_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[type];
+		struct qed_cid_acquired_map *p_map;
 
-		if (cid_cnt == 0)
-			continue;
-
-		size = DIV_ROUND_UP(cid_cnt,
-				    sizeof(unsigned long) * BITS_PER_BYTE) *
-		       sizeof(unsigned long);
-		p_mngr->acquired[type].cid_map = kzalloc(size, GFP_KERNEL);
-		if (!p_mngr->acquired[type].cid_map)
+		/* Handle PF maps */
+		p_map = &p_mngr->acquired[type];
+		if (qed_cid_map_alloc_single(p_hwfn, type, start_cid,
+					     p_cfg->cid_count, p_map))
 			goto cid_map_fail;
 
-		p_mngr->acquired[type].max_count = cid_cnt;
-		p_mngr->acquired[type].start_cid = start_cid;
-
-		p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid;
+		/* Handle VF maps */
+		for (vf = 0; vf < MAX_NUM_VFS; vf++) {
+			p_map = &p_mngr->acquired_vf[type][vf];
+			if (qed_cid_map_alloc_single(p_hwfn, type,
+						     vf_start_cid,
+						     p_cfg->cids_per_vf, p_map))
+				goto cid_map_fail;
+		}
 
-		DP_VERBOSE(p_hwfn, QED_MSG_CXT,
-			   "Type %08x start: %08x count %08x\n",
-			   type, p_mngr->acquired[type].start_cid,
-			   p_mngr->acquired[type].max_count);
-		start_cid += cid_cnt;
+		start_cid += p_cfg->cid_count;
+		vf_start_cid += p_cfg->cids_per_vf;
 	}
 
 	return 0;
@@ -1265,19 +1298,36 @@ void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn)
 void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn)
 {
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	struct qed_cid_acquired_map *p_map;
+	struct qed_conn_type_cfg *p_cfg;
 	int type;
+	u32 len;
 
 	/* Reset acquired cids */
 	for (type = 0; type < MAX_CONN_TYPES; type++) {
-		u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count;
+		u32 vf;
+
+		p_cfg = &p_mngr->conn_cfg[type];
+		if (p_cfg->cid_count) {
+			p_map = &p_mngr->acquired[type];
+			len = DIV_ROUND_UP(p_map->max_count,
+					   sizeof(unsigned long) *
+					   BITS_PER_BYTE) *
+			      sizeof(unsigned long);
+			memset(p_map->cid_map, 0, len);
+		}
 
-		if (cid_cnt == 0)
+		if (!p_cfg->cids_per_vf)
 			continue;
 
-		memset(p_mngr->acquired[type].cid_map, 0,
-		       DIV_ROUND_UP(cid_cnt,
-				    sizeof(unsigned long) * BITS_PER_BYTE) *
-		       sizeof(unsigned long));
+		for (vf = 0; vf < MAX_NUM_VFS; vf++) {
+			p_map = &p_mngr->acquired_vf[type][vf];
+			len = DIV_ROUND_UP(p_map->max_count,
+					   sizeof(unsigned long) *
+					   BITS_PER_BYTE) *
+			      sizeof(unsigned long);
+			memset(p_map->cid_map, 0, len);
+		}
 	}
 }
 
@@ -1841,91 +1891,145 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
 	qed_prs_init_pf(p_hwfn);
 }
 
-int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
-			enum protocol_type type, u32 *p_cid)
+int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+			 enum protocol_type type, u32 *p_cid, u8 vfid)
 {
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	struct qed_cid_acquired_map *p_map;
 	u32 rel_cid;
 
-	if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) {
+	if (type >= MAX_CONN_TYPES) {
+		DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
+		return -EINVAL;
+	}
+
+	if (vfid >= MAX_NUM_VFS && vfid != QED_CXT_PF_CID) {
+		DP_NOTICE(p_hwfn, "VF [%02x] is out of range\n", vfid);
+		return -EINVAL;
+	}
+
+	/* Determine the right map to take this CID from */
+	if (vfid == QED_CXT_PF_CID)
+		p_map = &p_mngr->acquired[type];
+	else
+		p_map = &p_mngr->acquired_vf[type][vfid];
+
+	if (!p_map->cid_map) {
 		DP_NOTICE(p_hwfn, "Invalid protocol type %d", type);
 		return -EINVAL;
 	}
 
-	rel_cid = find_first_zero_bit(p_mngr->acquired[type].cid_map,
-				      p_mngr->acquired[type].max_count);
+	rel_cid = find_first_zero_bit(p_map->cid_map, p_map->max_count);
 
-	if (rel_cid >= p_mngr->acquired[type].max_count) {
+	if (rel_cid >= p_map->max_count) {
 		DP_NOTICE(p_hwfn, "no CID available for protocol %d\n", type);
 		return -EINVAL;
 	}
 
-	__set_bit(rel_cid, p_mngr->acquired[type].cid_map);
+	__set_bit(rel_cid, p_map->cid_map);
+
+	*p_cid = rel_cid + p_map->start_cid;
 
-	*p_cid = rel_cid + p_mngr->acquired[type].start_cid;
+	DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+		   "Acquired cid 0x%08x [rel. %08x] vfid %02x type %d\n",
+		   *p_cid, rel_cid, vfid, type);
 
 	return 0;
 }
 
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+			enum protocol_type type, u32 *p_cid)
+{
+	return _qed_cxt_acquire_cid(p_hwfn, type, p_cid, QED_CXT_PF_CID);
+}
+
 static bool qed_cxt_test_cid_acquired(struct qed_hwfn *p_hwfn,
-				      u32 cid, enum protocol_type *p_type)
+				      u32 cid,
+				      u8 vfid,
+				      enum protocol_type *p_type,
+				      struct qed_cid_acquired_map **pp_map)
 {
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
-	struct qed_cid_acquired_map *p_map;
-	enum protocol_type p;
 	u32 rel_cid;
 
 	/* Iterate over protocols and find matching cid range */
-	for (p = 0; p < MAX_CONN_TYPES; p++) {
-		p_map = &p_mngr->acquired[p];
+	for (*p_type = 0; *p_type < MAX_CONN_TYPES; (*p_type)++) {
+		if (vfid == QED_CXT_PF_CID)
+			*pp_map = &p_mngr->acquired[*p_type];
+		else
+			*pp_map = &p_mngr->acquired_vf[*p_type][vfid];
 
-		if (!p_map->cid_map)
+		if (!((*pp_map)->cid_map))
 			continue;
-		if (cid >= p_map->start_cid &&
-		    cid < p_map->start_cid + p_map->max_count)
+		if (cid >= (*pp_map)->start_cid &&
+		    cid < (*pp_map)->start_cid + (*pp_map)->max_count)
 			break;
 	}
-	*p_type = p;
 
-	if (p == MAX_CONN_TYPES) {
-		DP_NOTICE(p_hwfn, "Invalid CID %d", cid);
-		return false;
+	if (*p_type == MAX_CONN_TYPES) {
+		DP_NOTICE(p_hwfn, "Invalid CID %d vfid %02x", cid, vfid);
+		goto fail;
 	}
 
-	rel_cid = cid - p_map->start_cid;
-	if (!test_bit(rel_cid, p_map->cid_map)) {
-		DP_NOTICE(p_hwfn, "CID %d not acquired", cid);
-		return false;
+	rel_cid = cid - (*pp_map)->start_cid;
+	if (!test_bit(rel_cid, (*pp_map)->cid_map)) {
+		DP_NOTICE(p_hwfn, "CID %d [vifd %02x] not acquired",
+			  cid, vfid);
+		goto fail;
 	}
+
 	return true;
+fail:
+	*p_type = MAX_CONN_TYPES;
+	*pp_map = NULL;
+	return false;
 }
 
-void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
+void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid)
 {
-	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	struct qed_cid_acquired_map *p_map = NULL;
 	enum protocol_type type;
 	bool b_acquired;
 	u32 rel_cid;
 
+	if (vfid != QED_CXT_PF_CID && vfid > MAX_NUM_VFS) {
+		DP_NOTICE(p_hwfn,
+			  "Trying to return incorrect CID belonging to VF %02x\n",
+			  vfid);
+		return;
+	}
+
 	/* Test acquired and find matching per-protocol map */
-	b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, &type);
+	b_acquired = qed_cxt_test_cid_acquired(p_hwfn, cid, vfid,
+					       &type, &p_map);
 
 	if (!b_acquired)
 		return;
 
-	rel_cid = cid - p_mngr->acquired[type].start_cid;
-	__clear_bit(rel_cid, p_mngr->acquired[type].cid_map);
+	rel_cid = cid - p_map->start_cid;
+	clear_bit(rel_cid, p_map->cid_map);
+
+	DP_VERBOSE(p_hwfn, QED_MSG_CXT,
+		   "Released CID 0x%08x [rel. %08x] vfid %02x type %d\n",
+		   cid, rel_cid, vfid, type);
+}
+
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid)
+{
+	_qed_cxt_release_cid(p_hwfn, cid, QED_CXT_PF_CID);
 }
 
 int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info)
 {
 	struct qed_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr;
+	struct qed_cid_acquired_map *p_map = NULL;
 	u32 conn_cxt_size, hw_p_size, cxts_per_p, line;
 	enum protocol_type type;
 	bool b_acquired;
 
 	/* Test acquired and find matching per-protocol map */
-	b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type);
+	b_acquired = qed_cxt_test_cid_acquired(p_hwfn, p_info->iid,
+					       QED_CXT_PF_CID, &type, &p_map);
 
 	if (!b_acquired)
 		return -EINVAL;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
index 53ad532..1783634 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h
@@ -54,19 +54,6 @@ struct qed_tid_mem {
 };
 
 /**
- * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
- *
- * @param p_hwfn
- * @param type
- * @param p_cid
- *
- * @return int
- */
-int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
-			enum protocol_type type,
-			u32 *p_cid);
-
-/**
  * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid
  *
  *
@@ -195,14 +182,51 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
  */
 int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
 
+#define QED_CXT_PF_CID (0xff)
+
 /**
  * @brief qed_cxt_release - Release a cid
  *
  * @param p_hwfn
  * @param cid
  */
-void qed_cxt_release_cid(struct qed_hwfn *p_hwfn,
-			 u32 cid);
+void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid);
+
+/**
+ * @brief qed_cxt_release - Release a cid belonging to a vf-queue
+ *
+ * @param p_hwfn
+ * @param cid
+ * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
+ */
+void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid);
+
+/**
+ * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ *
+ * @return int
+ */
+int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+			enum protocol_type type, u32 *p_cid);
+
+/**
+ * @brief _qed_cxt_acquire - Acquire a new cid of a specific protocol type
+ *                           for a vf-queue
+ *
+ * @param p_hwfn
+ * @param type
+ * @param p_cid
+ * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF
+ *
+ * @return int
+ */
+int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
+			 enum protocol_type type, u32 *p_cid, u8 vfid);
+
 int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
 			      enum qed_cxt_elem_type elem_type, u32 iid);
 u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
-- 
2.9.4

  reply	other threads:[~2017-06-04 10:31 UTC|newest]

Thread overview: 13+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-06-04 10:30 (unknown), Yuval Mintz
2017-06-04 10:30 ` Yuval Mintz [this message]
2017-06-04 10:31 ` [PATCH net-next 02/11] qed: Create L2 queue database Yuval Mintz
2017-06-04 10:31 ` [PATCH net-next 03/11] qed*: L2 interface to use the SB structures directly Yuval Mintz
2017-06-04 10:31 ` [PATCH net-next 04/11] qed: Pass vf_params when creating a queue-cid Yuval Mintz
2017-06-04 10:31 ` [PATCH net-next 05/11] qed: Assign a unique per-queue index to queue-cid Yuval Mintz
2017-06-04 10:31 ` [PATCH net-next 06/11] qed: Make VF legacy a bitfield Yuval Mintz
2017-06-04 10:31 ` [PATCH net-next 07/11] qed: IOV db support multiple queues per qzone Yuval Mintz
2017-06-04 10:31 ` [PATCH net-next 08/11] qed: Multiple qzone queues for VFs Yuval Mintz
2017-06-04 10:31 ` [PATCH net-next 09/11] qed: VFs to try utilizing the doorbell bar Yuval Mintz
2017-06-04 10:31 ` [PATCH net-next 10/11] qed: VF XDP support Yuval Mintz
2017-06-04 10:31 ` [PATCH net-next 11/11] qede: " Yuval Mintz
2017-06-05  3:09 ` [PATCH net-next 00/11] qed*: Support VF XDP attachment David Miller

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170604103109.3082-2-Yuval.Mintz@cavium.com \
    --to=yuval.mintz@cavium.com \
    --cc=davem@davemloft.net \
    --cc=netdev@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.