All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jakub Kicinski <jakub.kicinski@netronome.com>
To: netdev@vger.kernel.org
Cc: oss-drivers@netronome.com, Jakub Kicinski <jakub.kicinski@netronome.com>
Subject: [PATCH net-next 11/16] nfp: map all queue controllers at once
Date: Mon,  5 Jun 2017 17:01:52 -0700	[thread overview]
Message-ID: <20170606000157.17556-12-jakub.kicinski@netronome.com> (raw)
In-Reply-To: <20170606000157.17556-1-jakub.kicinski@netronome.com>

RX and TX queue controllers are interleaved.  Instead of creating
two mappings which map the same area at slightly different offset,
create only one mapping.  Always map all queue controllers to simplify
the code and allow reusing the mapping for non-data vNICs.

Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
---
 drivers/net/ethernet/netronome/nfp/nfp_main.h     |   6 +-
 drivers/net/ethernet/netronome/nfp/nfp_net.h      |   1 +
 drivers/net/ethernet/netronome/nfp/nfp_net_main.c | 122 +++++-----------------
 3 files changed, 28 insertions(+), 101 deletions(-)

diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.h b/drivers/net/ethernet/netronome/nfp/nfp_main.h
index c46d00bbf19d..66b1e1490805 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.h
@@ -63,8 +63,7 @@ struct nfp_nsp_identify;
  * @cpp:		Pointer to the CPP handle
  * @app:		Pointer to the APP handle
  * @data_vnic_bar:	Pointer to the CPP area for the data vNICs' BARs
- * @tx_area:		Pointer to the CPP area for the TX queues
- * @rx_area:		Pointer to the CPP area for the FL/RX queues
+ * @qc_area:		Pointer to the CPP area for the queues
  * @irq_entries:	Array of MSI-X entries for all vNICs
  * @limit_vfs:		Number of VFs supported by firmware (~0 for PCI limit)
  * @num_vfs:		Number of SR-IOV VFs enabled
@@ -88,8 +87,7 @@ struct nfp_pf {
 	struct nfp_app *app;
 
 	struct nfp_cpp_area *data_vnic_bar;
-	struct nfp_cpp_area *tx_area;
-	struct nfp_cpp_area *rx_area;
+	struct nfp_cpp_area *qc_area;
 
 	struct msix_entry *irq_entries;
 
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net.h b/drivers/net/ethernet/netronome/nfp/nfp_net.h
index eb849d26f4dd..02fd8d4e253c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net.h
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net.h
@@ -705,6 +705,7 @@ static inline void nn_pci_flush(struct nfp_net *nn)
  * either add to a pointer or to read the pointer value.
  */
 #define NFP_QCP_QUEUE_ADDR_SZ			0x800
+#define NFP_QCP_QUEUE_AREA_SZ			0x80000
 #define NFP_QCP_QUEUE_OFF(_x)			((_x) * NFP_QCP_QUEUE_ADDR_SZ)
 #define NFP_QCP_QUEUE_ADD_RPTR			0x0000
 #define NFP_QCP_QUEUE_ADD_WPTR			0x0004
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
index 3644b12d93db..2a3b6deae607 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_main.c
@@ -223,31 +223,6 @@ static int nfp_net_pf_get_app_id(struct nfp_pf *pf)
 					      NFP_APP_CORE_NIC);
 }
 
-static unsigned int
-nfp_net_pf_total_qcs(struct nfp_pf *pf, void __iomem *ctrl_bar,
-		     unsigned int stride, u32 start_off, u32 num_off)
-{
-	unsigned int i, min_qc, max_qc;
-
-	min_qc = readl(ctrl_bar + start_off);
-	max_qc = min_qc;
-
-	for (i = 0; i < pf->max_data_vnics; i++) {
-		/* To make our lives simpler only accept configuration where
-		 * queues are allocated to PFs in order (queues of PFn all have
-		 * indexes lower than PFn+1).
-		 */
-		if (max_qc > readl(ctrl_bar + start_off))
-			return 0;
-
-		max_qc = readl(ctrl_bar + start_off);
-		max_qc += readl(ctrl_bar + num_off) * stride;
-		ctrl_bar += NFP_PF_CSR_SLICE_SIZE;
-	}
-
-	return max_qc - min_qc;
-}
-
 static u8 __iomem *
 nfp_net_pf_map_rtsym(struct nfp_pf *pf, const char *name, const char *sym_fmt,
 		     unsigned int min_size, struct nfp_cpp_area **area)
@@ -301,15 +276,16 @@ static void nfp_net_pf_free_vnics(struct nfp_pf *pf)
 
 static struct nfp_net *
 nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
-		      void __iomem *ctrl_bar,
-		      void __iomem *tx_bar, void __iomem *rx_bar,
+		      void __iomem *ctrl_bar, void __iomem *qc_bar,
 		      int stride, struct nfp_net_fw_version *fw_ver,
 		      unsigned int eth_id)
 {
-	u32 n_tx_rings, n_rx_rings;
+	u32 tx_base, rx_base, n_tx_rings, n_rx_rings;
 	struct nfp_net *nn;
 	int err;
 
+	tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
+	rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
 	n_tx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_TXRINGS);
 	n_rx_rings = readl(ctrl_bar + NFP_NET_CFG_MAX_RXRINGS);
 
@@ -321,8 +297,8 @@ nfp_net_pf_alloc_vnic(struct nfp_pf *pf, bool needs_netdev,
 	nn->app = pf->app;
 	nn->fw_ver = *fw_ver;
 	nn->dp.ctrl_bar = ctrl_bar;
-	nn->tx_bar = tx_bar;
-	nn->rx_bar = rx_bar;
+	nn->tx_bar = qc_bar + tx_base * NFP_QCP_QUEUE_ADDR_SZ;
+	nn->rx_bar = qc_bar + rx_base * NFP_QCP_QUEUE_ADDR_SZ;
 	nn->dp.is_vf = 0;
 	nn->stride_rx = stride;
 	nn->stride_tx = stride;
@@ -374,26 +350,15 @@ nfp_net_pf_init_vnic(struct nfp_pf *pf, struct nfp_net *nn, unsigned int id)
 
 static int
 nfp_net_pf_alloc_vnics(struct nfp_pf *pf, void __iomem *ctrl_bar,
-		       void __iomem *tx_bar, void __iomem *rx_bar,
-		       int stride, struct nfp_net_fw_version *fw_ver)
+		       void __iomem *qc_bar, int stride,
+		       struct nfp_net_fw_version *fw_ver)
 {
-	u32 prev_tx_base, prev_rx_base, tgt_tx_base, tgt_rx_base;
 	struct nfp_net *nn;
 	unsigned int i;
 	int err;
 
-	prev_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
-	prev_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
-
 	for (i = 0; i < pf->max_data_vnics; i++) {
-		tgt_tx_base = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
-		tgt_rx_base = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
-		tx_bar += (tgt_tx_base - prev_tx_base) * NFP_QCP_QUEUE_ADDR_SZ;
-		rx_bar += (tgt_rx_base - prev_rx_base) * NFP_QCP_QUEUE_ADDR_SZ;
-		prev_tx_base = tgt_tx_base;
-		prev_rx_base = tgt_rx_base;
-
-		nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, tx_bar, rx_bar,
+		nn = nfp_net_pf_alloc_vnic(pf, true, ctrl_bar, qc_bar,
 					   stride, fw_ver, i);
 		if (IS_ERR(nn)) {
 			err = PTR_ERR(nn);
@@ -430,8 +395,7 @@ static void nfp_net_pf_clean_vnic(struct nfp_pf *pf, struct nfp_net *nn)
 
 static int
 nfp_net_pf_spawn_vnics(struct nfp_pf *pf,
-		       void __iomem *ctrl_bar, void __iomem *tx_bar,
-		       void __iomem *rx_bar, int stride,
+		       void __iomem *ctrl_bar, void __iomem *qc_bar, int stride,
 		       struct nfp_net_fw_version *fw_ver)
 {
 	unsigned int id, wanted_irqs, num_irqs, vnics_left, irqs_left;
@@ -439,8 +403,7 @@ nfp_net_pf_spawn_vnics(struct nfp_pf *pf,
 	int err;
 
 	/* Allocate the vnics and do basic init */
-	err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, tx_bar, rx_bar,
-				     stride, fw_ver);
+	err = nfp_net_pf_alloc_vnics(pf, ctrl_bar, qc_bar, stride, fw_ver);
 	if (err)
 		return err;
 
@@ -534,8 +497,7 @@ static void nfp_net_pci_remove_finish(struct nfp_pf *pf)
 
 	nfp_net_pf_app_clean(pf);
 
-	nfp_cpp_area_release_free(pf->rx_area);
-	nfp_cpp_area_release_free(pf->tx_area);
+	nfp_cpp_area_release_free(pf->qc_area);
 	nfp_cpp_area_release_free(pf->data_vnic_bar);
 }
 
@@ -659,11 +621,9 @@ int nfp_net_refresh_eth_port(struct nfp_port *port)
  */
 int nfp_net_pci_probe(struct nfp_pf *pf)
 {
-	u32 ctrl_bar_sz, tx_area_sz, rx_area_sz;
-	u8 __iomem *ctrl_bar, *tx_bar, *rx_bar;
-	u32 total_tx_qcs, total_rx_qcs;
 	struct nfp_net_fw_version fw_ver;
-	u32 start_q;
+	u8 __iomem *ctrl_bar, *qc_bar;
+	u32 ctrl_bar_sz;
 	int stride;
 	int err;
 
@@ -718,53 +678,23 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
 		}
 	}
 
-	/* Find how many QC structs need to be mapped */
-	total_tx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
-					    NFP_NET_CFG_START_TXQ,
-					    NFP_NET_CFG_MAX_TXRINGS);
-	total_rx_qcs = nfp_net_pf_total_qcs(pf, ctrl_bar, stride,
-					    NFP_NET_CFG_START_RXQ,
-					    NFP_NET_CFG_MAX_RXRINGS);
-	if (!total_tx_qcs || !total_rx_qcs) {
-		nfp_err(pf->cpp, "Invalid PF QC configuration [%d,%d]\n",
-			total_tx_qcs, total_rx_qcs);
-		err = -EINVAL;
-		goto err_ctrl_unmap;
-	}
-
-	tx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_tx_qcs;
-	rx_area_sz = NFP_QCP_QUEUE_ADDR_SZ * total_rx_qcs;
-
-	/* Map TX queues */
-	start_q = readl(ctrl_bar + NFP_NET_CFG_START_TXQ);
-	tx_bar = nfp_net_map_area(pf->cpp, "net.tx", 0, 0,
-				  NFP_PCIE_QUEUE(start_q),
-				  tx_area_sz, &pf->tx_area);
-	if (IS_ERR(tx_bar)) {
-		nfp_err(pf->cpp, "Failed to map TX area.\n");
-		err = PTR_ERR(tx_bar);
+	/* Map queues */
+	qc_bar = nfp_net_map_area(pf->cpp, "net.qc", 0, 0,
+				  NFP_PCIE_QUEUE(0), NFP_QCP_QUEUE_AREA_SZ,
+				  &pf->qc_area);
+	if (IS_ERR(qc_bar)) {
+		nfp_err(pf->cpp, "Failed to map Queue Controller area.\n");
+		err = PTR_ERR(qc_bar);
 		goto err_ctrl_unmap;
 	}
 
-	/* Map RX queues */
-	start_q = readl(ctrl_bar + NFP_NET_CFG_START_RXQ);
-	rx_bar = nfp_net_map_area(pf->cpp, "net.rx", 0, 0,
-				  NFP_PCIE_QUEUE(start_q),
-				  rx_area_sz, &pf->rx_area);
-	if (IS_ERR(rx_bar)) {
-		nfp_err(pf->cpp, "Failed to map RX area.\n");
-		err = PTR_ERR(rx_bar);
-		goto err_unmap_tx;
-	}
-
 	err = nfp_net_pf_app_init(pf);
 	if (err)
-		goto err_unmap_rx;
+		goto err_unmap_qc;
 
 	pf->ddir = nfp_net_debugfs_device_add(pf->pdev);
 
-	err = nfp_net_pf_spawn_vnics(pf, ctrl_bar, tx_bar, rx_bar,
-				     stride, &fw_ver);
+	err = nfp_net_pf_spawn_vnics(pf, ctrl_bar, qc_bar, stride, &fw_ver);
 	if (err)
 		goto err_clean_ddir;
 
@@ -775,10 +705,8 @@ int nfp_net_pci_probe(struct nfp_pf *pf)
 err_clean_ddir:
 	nfp_net_debugfs_dir_clean(&pf->ddir);
 	nfp_net_pf_app_clean(pf);
-err_unmap_rx:
-	nfp_cpp_area_release_free(pf->rx_area);
-err_unmap_tx:
-	nfp_cpp_area_release_free(pf->tx_area);
+err_unmap_qc:
+	nfp_cpp_area_release_free(pf->qc_area);
 err_ctrl_unmap:
 	nfp_cpp_area_release_free(pf->data_vnic_bar);
 err_unlock:
-- 
2.11.0

  parent reply	other threads:[~2017-06-06  0:02 UTC|newest]

Thread overview: 30+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-06-06  0:01 [PATCH net-next 00/16] nfp: ctrl vNIC Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 01/16] nfp: reorder open and close functions Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 02/16] nfp: split out the allocation part of open Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 03/16] nfp: reuse ring free code on close Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 04/16] nfp: move nfp_net_vecs_init() Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 05/16] nfp: prepare print macros for use without netdev Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 06/16] nfp: make sure debug accesses don't depend on netdevs Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 07/16] nfp: allow allocation and initialization of netdev-less vNICs Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 08/16] nfp: prepare config and enable for working without netdevs Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 09/16] nfp: add control vNIC datapath Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 10/16] nfp: make vNIC ctrl memory mapping function reusable Jakub Kicinski
2017-06-06  0:01 ` Jakub Kicinski [this message]
2017-06-06  0:01 ` [PATCH net-next 12/16] nfp: don't clutter init code passing fw_ver around Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 13/16] nfp: slice the netdev spawning function Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 14/16] nfp: allow non-equal distribution of IRQs Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 15/16] nfp: create control vNICs and wire up rx/tx Jakub Kicinski
2017-06-06  0:01 ` [PATCH net-next 16/16] nfp: advertise support for NFD ABI 0.5 Jakub Kicinski
2017-06-06  6:16 ` [PATCH net-next 00/16] nfp: ctrl vNIC Jiri Pirko
2017-06-06  7:21   ` Jakub Kicinski
2017-06-06  8:23     ` Jiri Pirko
2017-06-06  9:09       ` Jakub Kicinski
2017-06-06  9:17         ` Jiri Pirko
2017-06-06  9:35           ` Mintz, Yuval
2017-06-06 11:20             ` Jiri Pirko
2017-06-07 17:48               ` Mintz, Yuval
2017-06-08  5:38                 ` Jiri Pirko
2017-06-08  6:33                   ` Mintz, Yuval
2017-06-08  6:38                     ` Jiri Pirko
2017-06-06 19:54           ` Jakub Kicinski
2017-06-07 16:52 ` David Miller

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170606000157.17556-12-jakub.kicinski@netronome.com \
    --to=jakub.kicinski@netronome.com \
    --cc=netdev@vger.kernel.org \
    --cc=oss-drivers@netronome.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.