From: "Steven J. Hill" <steven.hill@cavium.com>
To: netdev@vger.kernel.org
Cc: Carlos Munoz <cmunoz@cavium.com>,
Chandrakala Chavva <cchavva@caviumnetworks.com>,
"Steven J. Hill" <Steven.Hill@cavium.com>
Subject: [PATCH v12 07/10] netdev: cavium: octeon: Add Octeon III SSO Support
Date: Wed, 27 Jun 2018 16:25:16 -0500 [thread overview]
Message-ID: <1530134719-19407-8-git-send-email-steven.hill@cavium.com> (raw)
In-Reply-To: <1530134719-19407-1-git-send-email-steven.hill@cavium.com>
From: Carlos Munoz <cmunoz@cavium.com>
Add support for Octeon III SSO logic block for BGX Ethernet.
Signed-off-by: Carlos Munoz <cmunoz@cavium.com>
Signed-off-by: Steven J. Hill <Steven.Hill@cavium.com>
---
drivers/net/ethernet/cavium/octeon/octeon3-sso.c | 221 +++++++++++++++++++++++
drivers/net/ethernet/cavium/octeon/octeon3-sso.h | 89 +++++++++
2 files changed, 310 insertions(+)
create mode 100644 drivers/net/ethernet/cavium/octeon/octeon3-sso.c
create mode 100644 drivers/net/ethernet/cavium/octeon/octeon3-sso.h
diff --git a/drivers/net/ethernet/cavium/octeon/octeon3-sso.c b/drivers/net/ethernet/cavium/octeon/octeon3-sso.c
new file mode 100644
index 0000000..73afad0
--- /dev/null
+++ b/drivers/net/ethernet/cavium/octeon/octeon3-sso.c
@@ -0,0 +1,221 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Octeon III Schedule/Synchronize/Order Unit (SSO)
+ *
+ * Copyright (C) 2018 Cavium, Inc.
+ */
+
+#include "octeon3.h"
+
+static int octeon3_sso_get_num_groups(void)
+{
+ if (OCTEON_IS_MODEL(OCTEON_CN78XX))
+ return 256;
+ if (OCTEON_IS_MODEL(OCTEON_CNF75XX) || OCTEON_IS_MODEL(OCTEON_CN73XX))
+ return 64;
+ return 0;
+}
+
+void octeon3_sso_irq_set(int node, int group, bool enable)
+{
+ if (enable)
+ oct_csr_write(1, SSO_GRP_INT_THR(node, group));
+ else
+ oct_csr_write(0, SSO_GRP_INT_THR(node, group));
+
+ oct_csr_write(SSO_GRP_INT_EXE_INT, SSO_GRP_INT(node, group));
+}
+EXPORT_SYMBOL(octeon3_sso_irq_set);
+
+/* octeon3_sso_alloc_groups - Allocate a range of SSO groups.
+ * @node: Node where SSO resides.
+ * @groups: Pointer to allocated groups.
+ * @cnt: Number of groups to allocate.
+ * @start: Group number to start sequential allocation from. -1 for don't care.
+ *
+ * Returns 0 if successful, error code otherwise..
+ */
+int octeon3_sso_alloc_groups(int node, int *groups, int cnt, int start)
+{
+ struct global_resource_tag tag;
+ int group, ret;
+ char buf[16];
+
+ strncpy((char *)&tag.lo, "cvm_sso_", 8);
+ snprintf(buf, 16, "0%d......", node);
+ memcpy(&tag.hi, buf, 8);
+
+ res_mgr_create_resource(tag, octeon3_sso_get_num_groups());
+
+ if (!groups)
+ ret = res_mgr_alloc_range(tag, start, cnt, false, &group);
+ if (!ret)
+ ret = group;
+ else
+ ret = res_mgr_alloc_range(tag, start, cnt, false, groups);
+
+ return ret;
+}
+EXPORT_SYMBOL(octeon3_sso_alloc_groups);
+
+/* octeon3_sso_free_groups - Free SSO groups.
+ * @node: Node where SSO resides.
+ * @groups: Array of groups to free.
+ * @cnt: Number of groups to free.
+ */
+void octeon3_sso_free_groups(int node, int *groups, int cnt)
+{
+ struct global_resource_tag tag;
+ char buf[16];
+
+ /* Allocate the requested groups. */
+ strncpy((char *)&tag.lo, "cvm_sso_", 8);
+ snprintf(buf, 16, "0%d......", node);
+ memcpy(&tag.hi, buf, 8);
+
+ res_mgr_free_range(tag, groups, cnt);
+}
+EXPORT_SYMBOL(octeon3_sso_free_groups);
+
+/* octeon3_sso_pass1_limit - When the Transitory Admission Queue (TAQ) is
+ * almost full, it is possible for the SSo to hang. We work around this
+ * by ensuring that the sum of SSO_GRP(0..255)_TAQ_THR[MAX_THR] of all
+ * used groups is <= 1264. This may reduce single group performance when
+ * many groups are in use.
+ * @node: Node to update.
+ * @grp: SSO group to update.
+ */
+void octeon3_sso_pass1_limit(int node, int group)
+{
+ u64 max_thr, rsvd_thr, taq_add, taq_thr;
+
+ /* Ideally we would like to divide the maximum number of TAQ buffers
+ * (1264) among the SSO groups in use. However, since we do not know
+ * how many SSO groups are used by code outside this driver, we take
+ * the worst case approach.
+ */
+ max_thr = 1264 / octeon3_sso_get_num_groups();
+ if (max_thr < 4)
+ max_thr = 4;
+ rsvd_thr = max_thr - 1;
+
+ /* Changes to SSO_GRP_TAQ_THR[rsvd_thr] must also update
+ * SSO_TAQ_ADD[RSVD_FREE].
+ */
+ taq_thr = oct_csr_read(SSO_GRP_TAQ_THR(node, group));
+ taq_add = (rsvd_thr - (taq_thr & SSO_GRP_TAQ_THR_RSVD_THR_MASK)) <<
+ SSO_TAQ_ADD_RSVD_FREE_SHIFT;
+
+ taq_thr &= ~(SSO_GRP_TAQ_THR_MAX_THR_MASK |
+ SSO_GRP_TAQ_THR_RSVD_THR_MASK);
+ taq_thr |= max_thr << SSO_GRP_TAQ_THR_RSVD_THR_SHIFT;
+ taq_thr |= rsvd_thr;
+
+ oct_csr_write(taq_thr, SSO_GRP_TAQ_THR(node, group));
+ oct_csr_write(taq_add, SSO_TAQ_ADD(node));
+}
+EXPORT_SYMBOL(octeon3_sso_pass1_limit);
+
+/* octeon3_sso_shutdown - Shutdown the SSO.
+ * @node: Node where SSO to disable is.
+ * @aura: Aura used for the SSO buffers.
+ */
+void octeon3_sso_shutdown(int node, int aura)
+{
+ int i, max_grps, timeout;
+ u64 data, head, tail;
+ void *ptr;
+
+ /* Disable SSO. */
+ data = oct_csr_read(SSO_AW_CFG(node));
+ data |= SSO_AW_CFG_XAQ_ALOC_DIS | SSO_AW_CFG_XAQ_BYP_DIS;
+ data &= ~SSO_AW_CFG_RWEN;
+ oct_csr_write(data, SSO_AW_CFG(node));
+
+ /* Extract the FPA buffers. */
+ max_grps = octeon3_sso_get_num_groups();
+ for (i = 0; i < max_grps; i++) {
+ head = oct_csr_read(SSO_XAQ_HEAD_PTR(node, i));
+ tail = oct_csr_read(SSO_XAQ_TAIL_PTR(node, i));
+ data = oct_csr_read(SSO_GRP_AQ_CNT(node, i));
+
+ /* Verify pointers. */
+ head &= SSO_XAQ_PTR_MASK;
+ tail &= SSO_XAQ_PTR_MASK;
+ if (head != tail) {
+ pr_err("%s: Bad pointer\n", __func__);
+ continue;
+ }
+
+ /* This SSO group should have no pending entries. */
+ if (data & SSO_GRP_AQ_CNT_AQ_CNT_MASK)
+ pr_err("%s: Group not empty\n", __func__);
+
+ ptr = phys_to_virt(head);
+ octeon_fpa3_free(node, aura, ptr);
+
+ /* Clear pointers. */
+ oct_csr_write(0, SSO_XAQ_HEAD_PTR(node, i));
+ oct_csr_write(0, SSO_XAQ_HEAD_NEXT(node, i));
+ oct_csr_write(0, SSO_XAQ_TAIL_PTR(node, i));
+ oct_csr_write(0, SSO_XAQ_TAIL_NEXT(node, i));
+ }
+
+ /* Make sure all buffers are drained. */
+ timeout = 10000;
+ do {
+ data = oct_csr_read(SSO_AW_STATUS(node));
+ if ((data & SSO_AW_STATUS_XAQ_BU_CACHED_MASK) == 0)
+ break;
+ timeout--;
+ udelay(1);
+ } while (timeout);
+ if (!timeout)
+ pr_err("%s: Timed out draining buffers\n", __func__);
+}
+EXPORT_SYMBOL(octeon3_sso_shutdown);
+
+/* octeon3_sso_init - Initialize the SSO.
+ * @node: Node where SSO resides.
+ * @aura: Aura used for the SSO buffers.
+ */
+int octeon3_sso_init(int node, int aura)
+{
+ int i, max_grps, err = 0;
+ u64 data, phys;
+ void *mem;
+
+ data = SSO_AW_CFG_STT | SSO_AW_CFG_LDT | SSO_AW_CFG_LDWB;
+ oct_csr_write(data, SSO_AW_CFG(node));
+
+ data = (node << SSO_XAQ_AURA_NODE_SHIFT) | aura;
+ oct_csr_write(data, SSO_XAQ_AURA(node));
+
+ max_grps = octeon3_sso_get_num_groups();
+ for (i = 0; i < max_grps; i++) {
+ mem = octeon_fpa3_alloc(node, aura);
+ if (!mem) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ phys = virt_to_phys(mem);
+ oct_csr_write(phys, SSO_XAQ_HEAD_PTR(node, i));
+ oct_csr_write(phys, SSO_XAQ_HEAD_NEXT(node, i));
+ oct_csr_write(phys, SSO_XAQ_TAIL_PTR(node, i));
+ oct_csr_write(phys, SSO_XAQ_TAIL_NEXT(node, i));
+
+ /* SSO-18678 */
+ data = SSO_GRP_PRI_WEIGHT_MAXIMUM << SSO_GRP_PRI_WEIGHT_SHIFT;
+ oct_csr_write(data, SSO_GRP_PRI(node, i));
+ }
+
+ data = SSO_ERR0_FPE;
+ oct_csr_write(data, SSO_ERR0(node));
+
+ data = SSO_AW_CFG_STT | SSO_AW_CFG_LDT | SSO_AW_CFG_LDWB |
+ SSO_AW_CFG_RWEN;
+ oct_csr_write(data, SSO_AW_CFG(node));
+out:
+ return err;
+}
+EXPORT_SYMBOL(octeon3_sso_init);
diff --git a/drivers/net/ethernet/cavium/octeon/octeon3-sso.h b/drivers/net/ethernet/cavium/octeon/octeon3-sso.h
new file mode 100644
index 0000000..dc68c4b
--- /dev/null
+++ b/drivers/net/ethernet/cavium/octeon/octeon3-sso.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Octeon III Schedule/Synchronize/Order Unit (SSO)
+ *
+ * Copyright (C) 2018 Cavium, Inc.
+ */
+#ifndef _OCTEON3_SSO_H_
+#define _OCTEON3_SSO_H_
+
+#include <linux/bitops.h>
+
+#define SSO_BASE 0x1670000000000ull
+#define SSO_ADDR(n) (SSO_BASE + SET_XKPHYS + NODE_OFFSET(n))
+#define SSO_AQ_ADDR(n, a) (SSO_ADDR(n) + ((a) << 3))
+#define SSO_GRP_ADDR(n, g) (SSO_ADDR(n) + ((g) << 16))
+
+#define SSO_AW_STATUS(n) (SSO_ADDR(n) + 0x000010e0)
+#define SSO_AW_CFG(n) (SSO_ADDR(n) + 0x000010f0)
+#define SSO_ERR0(n) (SSO_ADDR(n) + 0x00001240)
+#define SSO_TAQ_ADD(n) (SSO_ADDR(n) + 0x000020e0)
+#define SSO_XAQ_AURA(n) (SSO_ADDR(n) + 0x00002100)
+
+#define SSO_XAQ_HEAD_PTR(n, a) (SSO_AQ_ADDR(n, a) + 0x00080000)
+#define SSO_XAQ_TAIL_PTR(n, a) (SSO_AQ_ADDR(n, a) + 0x00090000)
+#define SSO_XAQ_HEAD_NEXT(n, a) (SSO_AQ_ADDR(n, a) + 0x000a0000)
+#define SSO_XAQ_TAIL_NEXT(n, a) (SSO_AQ_ADDR(n, a) + 0x000b0000)
+
+#define SSO_GRP_TAQ_THR(n, g) (SSO_GRP_ADDR(n, g) + 0x20000100)
+#define SSO_GRP_PRI(n, g) (SSO_GRP_ADDR(n, g) + 0x20000200)
+#define SSO_GRP_INT(n, g) (SSO_GRP_ADDR(n, g) + 0x20000400)
+#define SSO_GRP_INT_THR(n, g) (SSO_GRP_ADDR(n, g) + 0x20000500)
+#define SSO_GRP_AQ_CNT(n, g) (SSO_GRP_ADDR(n, g) + 0x20000700)
+
+/* SSO interrupt numbers start here */
+#define SSO_IRQ_START 0x61000
+
+#define SSO_AW_STATUS_XAQ_BU_CACHED_MASK GENMASK_ULL(5, 0)
+
+#define SSO_AW_CFG_XAQ_ALOC_DIS BIT(6)
+#define SSO_AW_CFG_XAQ_BYP_DIS BIT(4)
+#define SSO_AW_CFG_STT BIT(3)
+#define SSO_AW_CFG_LDT BIT(2)
+#define SSO_AW_CFG_LDWB BIT(1)
+#define SSO_AW_CFG_RWEN BIT(0)
+
+#define SSO_ERR0_FPE BIT(0)
+
+#define SSO_TAQ_ADD_RSVD_FREE_SHIFT 16
+
+#define SSO_XAQ_AURA_NODE_SHIFT 10
+
+#define SSO_XAQ_PTR_MASK GENMASK_ULL(41, 7)
+
+#define SSO_GRP_TAQ_THR_MAX_THR_MASK GENMASK_ULL(42, 32)
+#define SSO_GRP_TAQ_THR_RSVD_THR_MASK GENMASK_ULL(10, 0)
+#define SSO_GRP_TAQ_THR_RSVD_THR_SHIFT 32
+
+#define SSO_GRP_PRI_WEIGHT_MAXIMUM 63
+#define SSO_GRP_PRI_WEIGHT_SHIFT 16
+
+#define SSO_GRP_INT_EXE_INT BIT(1)
+
+#define SSO_GRP_AQ_CNT_AQ_CNT_MASK GENMASK_ULL(32, 0)
+
+/* SSO tag types */
+#define SSO_TAG_TYPE_ORDERED 0ull
+#define SSO_TAG_TYPE_ATOMIC 1ull
+#define SSO_TAG_TYPE_UNTAGGED 2ull
+#define SSO_TAG_TYPE_EMPTY 3ull
+#define SSO_TAG_SWDID 0x60ull
+
+
+/* SSO work queue bitfields */
+#define SSO_GET_WORK_DID_SHIFT 40
+#define SSO_GET_WORK_NODE_SHIFT 36
+#define SSO_GET_WORK_GROUPED BIT(30)
+#define SSO_GET_WORK_RTNGRP BIT(29)
+#define SSO_GET_WORK_IDX_GRP_MASK_SHIFT 4
+#define SSO_GET_WORK_WAITW_WAIT BIT(3)
+#define SSO_GET_WORK_WAITW_NO_WAIT 0ull
+
+#define SSO_GET_WORK_DMA_S_SCRADDR BIT(63)
+#define SSO_GET_WORK_DMA_S_LEN_SHIFT 48
+#define SSO_GET_WORK_LD_S_IO BIT(48)
+#define SSO_GET_WORK_RTN_S_NO_WORK BIT(63)
+#define SSO_GET_WORK_RTN_S_GRP_MASK GENMASK_ULL(57, 48)
+#define SSO_GET_WORK_RTN_S_GRP_SHIFT 48
+#define SSO_GET_WORK_RTN_S_WQP_MASK GENMASK_ULL(41, 0)
+
+#endif /* _OCTEON3_SSO_H_ */
--
2.1.4
next prev parent reply other threads:[~2018-06-27 21:25 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2018-06-27 21:25 [PATCH v12 00/10] netdev: octeon-ethernet: Add Cavium Octeon III support Steven J. Hill
2018-06-27 21:25 ` [PATCH v12 01/10] dt-bindings: Add Cavium Octeon Common Ethernet Interface Steven J. Hill
2018-06-28 8:35 ` Andrew Lunn
2018-07-06 22:10 ` Steven J. Hill
2018-07-06 22:41 ` Andrew Lunn
2018-06-27 21:25 ` [PATCH v12 02/10] netdev: cavium: octeon: Header for Octeon III BGX Ethernet Steven J. Hill
2018-06-27 21:25 ` [PATCH v12 03/10] netdev: cavium: octeon: Add Octeon III BGX Ethernet Nexus Steven J. Hill
2018-06-28 8:41 ` Andrew Lunn
2018-06-28 21:20 ` Carlos Munoz
2018-06-29 2:19 ` David Miller
2018-06-29 3:30 ` Chavva, Chandrakala
2018-06-29 6:21 ` David Miller
2018-06-29 6:13 ` Jiri Pirko
2018-06-27 21:25 ` [PATCH v12 04/10] netdev: cavium: octeon: Add Octeon III BGX Ports Steven J. Hill
2018-06-27 21:25 ` [PATCH v12 05/10] netdev: cavium: octeon: Add Octeon III PKI Support Steven J. Hill
2018-06-27 21:25 ` [PATCH v12 06/10] netdev: cavium: octeon: Add Octeon III PKO Support Steven J. Hill
2018-06-27 21:25 ` Steven J. Hill [this message]
2018-06-27 21:25 ` [PATCH v12 08/10] netdev: cavium: octeon: Add Octeon III BGX Ethernet core Steven J. Hill
2018-06-27 21:25 ` [PATCH v12 09/10] netdev: cavium: octeon: Add Octeon III BGX Ethernet building Steven J. Hill
2018-06-27 21:25 ` [PATCH v12 10/10] MAINTAINERS: Add entry for drivers/net/ethernet/cavium/octeon/octeon3-* Steven J. Hill
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1530134719-19407-8-git-send-email-steven.hill@cavium.com \
--to=steven.hill@cavium.com \
--cc=cchavva@caviumnetworks.com \
--cc=cmunoz@cavium.com \
--cc=netdev@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.