* [net-next PATCH 1/3] octeontx2-af: Add devlink suppoort to af driver
2020-11-02 5:06 [net-next PATCH 0/3] Add devlink and devlink health reporters to George Cherian
@ 2020-11-02 5:06 ` George Cherian
2020-11-02 13:31 ` Willem de Bruijn
2020-11-02 5:06 ` [net-next PATCH 2/3] octeontx2-af: Add devlink health reporters for NPA George Cherian
` (2 subsequent siblings)
3 siblings, 1 reply; 10+ messages in thread
From: George Cherian @ 2020-11-02 5:06 UTC (permalink / raw)
To: netdev, linux-kernel
Cc: kuba, davem, sgoutham, lcherian, gakula, masahiroy, george.cherian
Add devlink support to AF driver. Basic devlink support is added.
Currently info_get is the only supported devlink ops.
devlink ouptput looks like this
# devlink dev
pci/0002:01:00.0
# devlink dev info
pci/0002:01:00.0:
driver octeontx2-af
versions:
fixed:
mbox version: 9
Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
Signed-off-by: Jerin Jacob <jerinj@marvell.com>
Signed-off-by: George Cherian <george.cherian@marvell.com>
---
.../net/ethernet/marvell/octeontx2/Kconfig | 1 +
.../ethernet/marvell/octeontx2/af/Makefile | 3 +-
.../net/ethernet/marvell/octeontx2/af/rvu.c | 9 ++-
.../net/ethernet/marvell/octeontx2/af/rvu.h | 5 +-
.../marvell/octeontx2/af/rvu_devlink.c | 69 +++++++++++++++++++
.../marvell/octeontx2/af/rvu_devlink.h | 20 ++++++
6 files changed, 104 insertions(+), 3 deletions(-)
create mode 100644 drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
create mode 100644 drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig
index 543a1d047567..16caa02095fe 100644
--- a/drivers/net/ethernet/marvell/octeontx2/Kconfig
+++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig
@@ -9,6 +9,7 @@ config OCTEONTX2_MBOX
config OCTEONTX2_AF
tristate "Marvell OcteonTX2 RVU Admin Function driver"
select OCTEONTX2_MBOX
+ select NET_DEVLINK
depends on (64BIT && COMPILE_TEST) || ARM64
depends on PCI
help
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
index 2f7a861d0c7b..20135f1d3387 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile
+++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile
@@ -9,4 +9,5 @@ obj-$(CONFIG_OCTEONTX2_AF) += octeontx2_af.o
octeontx2_mbox-y := mbox.o rvu_trace.o
octeontx2_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \
- rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o
+ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o \
+ rvu_devlink.o
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
index a28a518c0eae..58c48fa7aa72 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c
@@ -2812,10 +2812,14 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (err)
goto err_mbox;
- err = rvu_register_interrupts(rvu);
+ err = rvu_register_dl(rvu);
if (err)
goto err_flr;
+ err = rvu_register_interrupts(rvu);
+ if (err)
+ goto err_dl;
+
rvu_setup_rvum_blk_revid(rvu);
/* Enable AF's VFs (if any) */
@@ -2829,6 +2833,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
err_irq:
rvu_unregister_interrupts(rvu);
+err_dl:
+ rvu_unregister_dl(rvu);
err_flr:
rvu_flr_wq_destroy(rvu);
err_mbox:
@@ -2858,6 +2864,7 @@ static void rvu_remove(struct pci_dev *pdev)
rvu_dbg_exit(rvu);
rvu_unregister_interrupts(rvu);
+ rvu_unregister_dl(rvu);
rvu_flr_wq_destroy(rvu);
rvu_cgx_exit(rvu);
rvu_fwdata_exit(rvu);
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
index 5ac9bb12415f..c112b299635d 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
@@ -12,7 +12,10 @@
#define RVU_H
#include <linux/pci.h>
+#include <net/devlink.h>
+
#include "rvu_struct.h"
+#include "rvu_devlink.h"
#include "common.h"
#include "mbox.h"
@@ -372,10 +375,10 @@ struct rvu {
struct npc_kpu_profile_adapter kpu;
struct ptp *ptp;
-
#ifdef CONFIG_DEBUG_FS
struct rvu_debugfs rvu_dbg;
#endif
+ struct rvu_devlink *rvu_dl;
};
static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
new file mode 100644
index 000000000000..c9f5f66e6701
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Marvell OcteonTx2 RVU Devlink
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ */
+
+#include "rvu.h"
+
+#define DRV_NAME "octeontx2-af"
+
+static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
+ struct netlink_ext_ack *extack)
+{
+ char buf[10];
+ int err;
+
+ err = devlink_info_driver_name_put(req, DRV_NAME);
+ if (err)
+ return err;
+
+ sprintf(buf, "%X", OTX2_MBOX_VERSION);
+ return devlink_info_version_fixed_put(req, "mbox version:", buf);
+}
+
+static const struct devlink_ops rvu_devlink_ops = {
+ .info_get = rvu_devlink_info_get,
+};
+
+int rvu_register_dl(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl;
+ struct devlink *dl;
+ int err;
+
+ rvu_dl = kzalloc(sizeof(*rvu_dl), GFP_KERNEL);
+ if (!rvu_dl)
+ return -ENOMEM;
+
+ dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink));
+ if (!dl) {
+ dev_warn(rvu->dev, "devlink_alloc failed\n");
+ return -ENOMEM;
+ }
+
+ err = devlink_register(dl, rvu->dev);
+ if (err) {
+ dev_err(rvu->dev, "devlink register failed with error %d\n", err);
+ devlink_free(dl);
+ return err;
+ }
+
+ rvu_dl->dl = dl;
+ rvu_dl->rvu = rvu;
+ rvu->rvu_dl = rvu_dl;
+ return 0;
+}
+
+void rvu_unregister_dl(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct devlink *dl = rvu_dl->dl;
+
+ if (!dl)
+ return;
+
+ devlink_unregister(dl);
+ devlink_free(dl);
+}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
new file mode 100644
index 000000000000..b0a0dfeb99c2
--- /dev/null
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Marvell OcteonTx2 RVU Devlink
+ *
+ * Copyright (C) 2020 Marvell International Ltd.
+ *
+ */
+
+#ifndef RVU_DEVLINK_H
+#define RVU_DEVLINK_H
+
+struct rvu_devlink {
+ struct devlink *dl;
+ struct rvu *rvu;
+};
+
+/* Devlink APIs */
+int rvu_register_dl(struct rvu *rvu);
+void rvu_unregister_dl(struct rvu *rvu);
+
+#endif /* RVU_DEVLINK_H */
--
2.25.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [net-next PATCH 1/3] octeontx2-af: Add devlink suppoort to af driver
2020-11-02 5:06 ` [net-next PATCH 1/3] octeontx2-af: Add devlink suppoort to af driver George Cherian
@ 2020-11-02 13:31 ` Willem de Bruijn
0 siblings, 0 replies; 10+ messages in thread
From: Willem de Bruijn @ 2020-11-02 13:31 UTC (permalink / raw)
To: George Cherian
Cc: Network Development, linux-kernel, Jakub Kicinski, David Miller,
sgoutham, lcherian, gakula, masahiroy
On Mon, Nov 2, 2020 at 12:07 AM George Cherian
<george.cherian@marvell.com> wrote:
>
> Add devlink support to AF driver. Basic devlink support is added.
> Currently info_get is the only supported devlink ops.
>
> devlink ouptput looks like this
> # devlink dev
> pci/0002:01:00.0
> # devlink dev info
> pci/0002:01:00.0:
> driver octeontx2-af
> versions:
> fixed:
> mbox version: 9
>
> Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
> Signed-off-by: Jerin Jacob <jerinj@marvell.com>
> Signed-off-by: George Cherian <george.cherian@marvell.com>
> diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
> index 5ac9bb12415f..c112b299635d 100644
> --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
> +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h
> @@ -12,7 +12,10 @@
> #define RVU_H
>
> #include <linux/pci.h>
> +#include <net/devlink.h>
> +
> #include "rvu_struct.h"
> +#include "rvu_devlink.h"
> #include "common.h"
> #include "mbox.h"
>
> @@ -372,10 +375,10 @@ struct rvu {
> struct npc_kpu_profile_adapter kpu;
>
> struct ptp *ptp;
> -
accidentally removed this line?
> #ifdef CONFIG_DEBUG_FS
> struct rvu_debugfs rvu_dbg;
> #endif
> + struct rvu_devlink *rvu_dl;
> };
> +int rvu_register_dl(struct rvu *rvu)
> +{
> + struct rvu_devlink *rvu_dl;
> + struct devlink *dl;
> + int err;
> +
> + rvu_dl = kzalloc(sizeof(*rvu_dl), GFP_KERNEL);
> + if (!rvu_dl)
> + return -ENOMEM;
> +
> + dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink));
> + if (!dl) {
> + dev_warn(rvu->dev, "devlink_alloc failed\n");
> + return -ENOMEM;
rvu_dl not freed on error.
This happens a couple of times in these patches
Is the intermediate struct needed, or could you embed the fields
directly into rvu and use container_of to get from devlink to struct
rvu? Even if needed, perhaps easier to embed the struct into rvu
rather than a pointer.
> + }
> +
> + err = devlink_register(dl, rvu->dev);
> + if (err) {
> + dev_err(rvu->dev, "devlink register failed with error %d\n", err);
> + devlink_free(dl);
> + return err;
> + }
> +
> + rvu_dl->dl = dl;
> + rvu_dl->rvu = rvu;
> + rvu->rvu_dl = rvu_dl;
> + return 0;
> +}
> +
> +void rvu_unregister_dl(struct rvu *rvu)
> +{
> + struct rvu_devlink *rvu_dl = rvu->rvu_dl;
> + struct devlink *dl = rvu_dl->dl;
> +
> + if (!dl)
> + return;
> +
> + devlink_unregister(dl);
> + devlink_free(dl);
here too
^ permalink raw reply [flat|nested] 10+ messages in thread
* [net-next PATCH 2/3] octeontx2-af: Add devlink health reporters for NPA
2020-11-02 5:06 [net-next PATCH 0/3] Add devlink and devlink health reporters to George Cherian
2020-11-02 5:06 ` [net-next PATCH 1/3] octeontx2-af: Add devlink suppoort to af driver George Cherian
@ 2020-11-02 5:06 ` George Cherian
2020-11-02 13:42 ` Willem de Bruijn
2020-11-03 7:26 ` kernel test robot
2020-11-02 5:06 ` [net-next PATCH 3/3] octeontx2-af: Add devlink health reporters for NIX George Cherian
2020-11-02 18:00 ` [net-next PATCH 0/3] Add devlink and devlink health reporters to Jakub Kicinski
3 siblings, 2 replies; 10+ messages in thread
From: George Cherian @ 2020-11-02 5:06 UTC (permalink / raw)
To: netdev, linux-kernel
Cc: kuba, davem, sgoutham, lcherian, gakula, masahiroy, george.cherian
Add health reporters for RVU NPA block.
Only reporter dump is supported
Output:
# devlink health
pci/0002:01:00.0:
reporter npa
state healthy error 0 recover 0
# devlink health dump show pci/0002:01:00.0 reporter npa
NPA_AF_GENERAL:
Unmap PF Error: 0
Free Disabled for NIX0 RX: 0
Free Disabled for NIX0 TX: 0
Free Disabled for NIX1 RX: 0
Free Disabled for NIX1 TX: 0
Free Disabled for SSO: 0
Free Disabled for TIM: 0
Free Disabled for DPI: 0
Free Disabled for AURA: 0
Alloc Disabled for Resvd: 0
NPA_AF_ERR:
Memory Fault on NPA_AQ_INST_S read: 0
Memory Fault on NPA_AQ_RES_S write: 0
AQ Doorbell Error: 0
Poisoned data on NPA_AQ_INST_S read: 0
Poisoned data on NPA_AQ_RES_S write: 0
Poisoned data on HW context read: 0
NPA_AF_RVU:
Unmap Slot Error: 0
Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
Signed-off-by: Jerin Jacob <jerinj@marvell.com>
Signed-off-by: George Cherian <george.cherian@marvell.com>
---
.../marvell/octeontx2/af/rvu_devlink.c | 434 +++++++++++++++++-
.../marvell/octeontx2/af/rvu_devlink.h | 23 +
.../marvell/octeontx2/af/rvu_struct.h | 23 +
3 files changed, 479 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index c9f5f66e6701..946e751fb544 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -5,10 +5,440 @@
*
*/
+#include<linux/bitfield.h>
+
#include "rvu.h"
+#include "rvu_reg.h"
+#include "rvu_struct.h"
#define DRV_NAME "octeontx2-af"
+void rvu_npa_unregister_interrupts(struct rvu *rvu);
+
+int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
+{
+ int err;
+
+ err = devlink_fmsg_pair_nest_start(fmsg, name);
+ if (err)
+ return err;
+
+ return devlink_fmsg_obj_nest_start(fmsg);
+}
+
+int rvu_report_pair_end(struct devlink_fmsg *fmsg)
+{
+ int err;
+
+ err = devlink_fmsg_obj_nest_end(fmsg);
+ if (err)
+ return err;
+
+ return devlink_fmsg_pair_nest_end(fmsg);
+}
+
+static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_cnt *npa_event_count;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ npa_event_count = rvu_dl->npa_event_cnt;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
+
+ if (intr & BIT_ULL(0))
+ npa_event_count->unmap_slot_count++;
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
+ return IRQ_HANDLED;
+}
+
+static int rvu_npa_inpq_to_cnt(u16 in,
+ struct rvu_npa_event_cnt *npa_event_count)
+{
+ switch (in) {
+ case 0:
+ return 0;
+ case BIT(NPA_INPQ_NIX0_RX):
+ return npa_event_count->free_dis_nix0_rx_count++;
+ case BIT(NPA_INPQ_NIX0_TX):
+ return npa_event_count->free_dis_nix0_tx_count++;
+ case BIT(NPA_INPQ_NIX1_RX):
+ return npa_event_count->free_dis_nix1_rx_count++;
+ case BIT(NPA_INPQ_NIX1_TX):
+ return npa_event_count->free_dis_nix1_tx_count++;
+ case BIT(NPA_INPQ_SSO):
+ return npa_event_count->free_dis_sso_count++;
+ case BIT(NPA_INPQ_TIM):
+ return npa_event_count->free_dis_tim_count++;
+ case BIT(NPA_INPQ_DPI):
+ return npa_event_count->free_dis_dpi_count++;
+ case BIT(NPA_INPQ_AURA_OP):
+ return npa_event_count->free_dis_aura_count++;
+ case BIT(NPA_INPQ_INTERNAL_RSV):
+ return npa_event_count->free_dis_rsvd_count++;
+ }
+
+ return npa_event_count->alloc_dis_rsvd_count++;
+}
+
+static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_cnt *npa_event_count;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr, val;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ npa_event_count = rvu_dl->npa_event_cnt;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
+
+ if (intr & BIT_ULL(32))
+ npa_event_count->unmap_pf_count++;
+
+ val = FIELD_GET(GENMASK(31, 16), intr);
+ rvu_npa_inpq_to_cnt(val, npa_event_count);
+
+ val = FIELD_GET(GENMASK(15, 0), intr);
+ rvu_npa_inpq_to_cnt(val, npa_event_count);
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_cnt *npa_event_count;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ npa_event_count = rvu_dl->npa_event_cnt;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
+
+ if (intr & BIT_ULL(14))
+ npa_event_count->aq_inst_count++;
+
+ if (intr & BIT_ULL(13))
+ npa_event_count->aq_res_count++;
+
+ if (intr & BIT_ULL(12))
+ npa_event_count->aq_db_count++;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_npa_event_cnt *npa_event_count;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ npa_event_count = rvu_dl->npa_event_cnt;
+ intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
+
+ if (intr & BIT_ULL(34))
+ npa_event_count->poison_aq_inst_count++;
+
+ if (intr & BIT_ULL(33))
+ npa_event_count->poison_aq_res_count++;
+
+ if (intr & BIT_ULL(32))
+ npa_event_count->poison_aq_cxt_count++;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
+ return IRQ_HANDLED;
+}
+
+static bool rvu_npa_af_request_irq(struct rvu *rvu, int blkaddr, int offset,
+ const char *name, irq_handler_t fn)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int rc;
+
+ WARN_ON(rvu->irq_allocated[offset]);
+ rvu->irq_allocated[offset] = false;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], name);
+ rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
+ if (rc)
+ dev_warn(rvu->dev, "Failed to register %s irq\n", name);
+ else
+ rvu->irq_allocated[offset] = true;
+
+ return rvu->irq_allocated[offset];
+}
+
+int rvu_npa_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr, base;
+ bool rc;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ /* Get NPA AF MSIX vectors offset. */
+ base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!base) {
+ dev_warn(rvu->dev,
+ "Failed to get NPA_AF_INT vector offsets\n");
+ return 0;
+ }
+
+ /* Register and enable NPA_AF_RVU_INT interrupt */
+ rc = rvu_npa_af_request_irq(rvu, blkaddr, base + NPA_AF_INT_VEC_RVU,
+ "NPA_AF_RVU_INT",
+ rvu_npa_af_rvu_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_GEN_INT interrupt */
+ rc = rvu_npa_af_request_irq(rvu, blkaddr, base + NPA_AF_INT_VEC_GEN,
+ "NPA_AF_RVU_GEN",
+ rvu_npa_af_gen_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_ERR_INT interrupt */
+ rc = rvu_npa_af_request_irq(rvu, blkaddr, base + NPA_AF_INT_VEC_AF_ERR,
+ "NPA_AF_ERR_INT",
+ rvu_npa_af_err_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NPA_AF_RAS interrupt */
+ rc = rvu_npa_af_request_irq(rvu, blkaddr, base + NPA_AF_INT_VEC_POISON,
+ "NPA_AF_RAS",
+ rvu_npa_af_ras_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_npa_unregister_interrupts(rvu);
+ return rc;
+}
+
+void rvu_npa_unregister_interrupts(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int i, offs, blkaddr;
+ u64 reg;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
+ if (blkaddr < 0)
+ return;
+
+ reg = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG);
+ offs = reg & 0x3FF;
+
+ rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1C, ~0ULL);
+
+ for (i = 0; i < NPA_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+static int rvu_npa_report_show(struct devlink_fmsg *fmsg, struct rvu *rvu)
+{
+ struct rvu_npa_event_cnt *npa_event_count;
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int err;
+
+ npa_event_count = rvu_dl->npa_event_cnt;
+ err = rvu_report_pair_start(fmsg, "NPA_AF_GENERAL");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tUnmap PF Error",
+ npa_event_count->unmap_pf_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tFree Disabled for NIX0 RX",
+ npa_event_count->free_dis_nix0_rx_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tFree Disabled for NIX0 TX",
+ npa_event_count->free_dis_nix0_tx_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tFree Disabled for NIX1 RX",
+ npa_event_count->free_dis_nix1_rx_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tFree Disabled for NIX1 TX",
+ npa_event_count->free_dis_nix1_tx_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tFree Disabled for SSO",
+ npa_event_count->free_dis_sso_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tFree Disabled for TIM",
+ npa_event_count->free_dis_tim_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tFree Disabled for DPI",
+ npa_event_count->free_dis_dpi_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tFree Disabled for AURA",
+ npa_event_count->free_dis_aura_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tAlloc Disabled for Resvd",
+ npa_event_count->alloc_dis_rsvd_count);
+ if (err)
+ return err;
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ err = rvu_report_pair_start(fmsg, "NPA_AF_ERR");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tMemory Fault on NPA_AQ_INST_S read",
+ npa_event_count->aq_inst_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tMemory Fault on NPA_AQ_RES_S write",
+ npa_event_count->aq_res_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tAQ Doorbell Error",
+ npa_event_count->aq_db_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tPoisoned data on NPA_AQ_INST_S read",
+ npa_event_count->poison_aq_inst_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tPoisoned data on NPA_AQ_RES_S write",
+ npa_event_count->poison_aq_res_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tPoisoned data on HW context read",
+ npa_event_count->poison_aq_cxt_count);
+ if (err)
+ return err;
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ err = rvu_report_pair_start(fmsg, "NPA_AF_RVU");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tUnmap Slot Error",
+ npa_event_count->unmap_slot_count);
+ if (err)
+ return err;
+ return rvu_report_pair_end(fmsg);
+}
+
+static int rvu_npa_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+
+ return rvu_npa_report_show(fmsg, rvu);
+}
+
+static const struct devlink_health_reporter_ops rvu_npa_hw_fault_reporter_ops = {
+ .name = "npa",
+ .dump = rvu_npa_reporter_dump,
+};
+
+static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
+{
+ struct devlink_health_reporter *rvu_npa_health_reporter;
+ struct rvu_npa_event_cnt *npa_event_count;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ npa_event_count = kzalloc(sizeof(*npa_event_count), GFP_KERNEL);
+ if (!npa_event_count)
+ return -ENOMEM;
+
+ rvu_dl->npa_event_cnt = npa_event_count;
+ rvu_npa_health_reporter = devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_npa_hw_fault_reporter_ops,
+ 0, rvu);
+ if (IS_ERR(rvu_npa_health_reporter)) {
+ dev_warn(rvu->dev, "Failed to create npa reporter, err =%ld\n",
+ PTR_ERR(rvu_npa_health_reporter));
+ return PTR_ERR(rvu_npa_health_reporter);
+ }
+
+ rvu_dl->rvu_npa_health_reporter = rvu_npa_health_reporter;
+ return 0;
+}
+
+static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
+{
+ if (!rvu_dl->rvu_npa_health_reporter)
+ return;
+
+ devlink_health_reporter_destroy(rvu_dl->rvu_npa_health_reporter);
+}
+
+static int rvu_health_reporters_create(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl;
+
+ if (!rvu->rvu_dl)
+ return -EINVAL;
+
+ rvu_dl = rvu->rvu_dl;
+ return rvu_npa_health_reporters_create(rvu_dl);
+}
+
+static void rvu_health_reporters_destroy(struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl;
+
+ if (!rvu->rvu_dl)
+ return;
+
+ rvu_dl = rvu->rvu_dl;
+ rvu_npa_health_reporters_destroy(rvu_dl);
+}
+
static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
struct netlink_ext_ack *extack)
{
@@ -53,7 +483,8 @@ int rvu_register_dl(struct rvu *rvu)
rvu_dl->dl = dl;
rvu_dl->rvu = rvu;
rvu->rvu_dl = rvu_dl;
- return 0;
+
+ return rvu_health_reporters_create(rvu);
}
void rvu_unregister_dl(struct rvu *rvu)
@@ -64,6 +495,7 @@ void rvu_unregister_dl(struct rvu *rvu)
if (!dl)
return;
+ rvu_health_reporters_destroy(rvu);
devlink_unregister(dl);
devlink_free(dl);
}
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
index b0a0dfeb99c2..b3ce1a8fff57 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
@@ -8,9 +8,32 @@
#ifndef RVU_DEVLINK_H
#define RVU_DEVLINK_H
+struct rvu_npa_event_cnt {
+ unsigned long unmap_slot_count;
+ unsigned long unmap_pf_count;
+ unsigned long free_dis_nix0_rx_count;
+ unsigned long free_dis_nix0_tx_count;
+ unsigned long free_dis_nix1_rx_count;
+ unsigned long free_dis_nix1_tx_count;
+ unsigned long free_dis_sso_count;
+ unsigned long free_dis_tim_count;
+ unsigned long free_dis_dpi_count;
+ unsigned long free_dis_aura_count;
+ unsigned long free_dis_rsvd_count;
+ unsigned long alloc_dis_rsvd_count;
+ unsigned long aq_inst_count;
+ unsigned long aq_res_count;
+ unsigned long aq_db_count;
+ unsigned long poison_aq_inst_count;
+ unsigned long poison_aq_res_count;
+ unsigned long poison_aq_cxt_count;
+};
+
struct rvu_devlink {
struct devlink *dl;
struct rvu *rvu;
+ struct devlink_health_reporter *rvu_npa_health_reporter;
+ struct rvu_npa_event_cnt *npa_event_cnt;
};
/* Devlink APIs */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 9a7eb074cdc2..995add5d8bff 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -64,6 +64,16 @@ enum rvu_af_int_vec_e {
RVU_AF_INT_VEC_CNT = 0x5,
};
+/* NPA Admin function Interrupt Vector Enumeration */
+enum npa_af_int_vec_e {
+ NPA_AF_INT_VEC_RVU = 0x0,
+ NPA_AF_INT_VEC_GEN = 0x1,
+ NPA_AF_INT_VEC_AQ_DONE = 0x2,
+ NPA_AF_INT_VEC_AF_ERR = 0x3,
+ NPA_AF_INT_VEC_POISON = 0x4,
+ NPA_AF_INT_VEC_CNT = 0x5,
+};
+
/**
* RVU PF Interrupt Vector Enumeration
*/
@@ -104,6 +114,19 @@ enum npa_aq_instop {
NPA_AQ_INSTOP_UNLOCK = 0x5,
};
+/* ALLOC/FREE input queues Enumeration from coprocessors */
+enum npa_inpq {
+ NPA_INPQ_NIX0_RX = 0x0,
+ NPA_INPQ_NIX0_TX = 0x1,
+ NPA_INPQ_NIX1_RX = 0x2,
+ NPA_INPQ_NIX1_TX = 0x3,
+ NPA_INPQ_SSO = 0x4,
+ NPA_INPQ_TIM = 0x5,
+ NPA_INPQ_DPI = 0x6,
+ NPA_INPQ_AURA_OP = 0xe,
+ NPA_INPQ_INTERNAL_RSV = 0xf,
+};
+
/* NPA admin queue instruction structure */
struct npa_aq_inst_s {
#if defined(__BIG_ENDIAN_BITFIELD)
--
2.25.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [net-next PATCH 2/3] octeontx2-af: Add devlink health reporters for NPA
2020-11-02 5:06 ` [net-next PATCH 2/3] octeontx2-af: Add devlink health reporters for NPA George Cherian
@ 2020-11-02 13:42 ` Willem de Bruijn
2020-11-03 7:26 ` kernel test robot
1 sibling, 0 replies; 10+ messages in thread
From: Willem de Bruijn @ 2020-11-02 13:42 UTC (permalink / raw)
To: George Cherian
Cc: Network Development, linux-kernel, Jakub Kicinski, David Miller,
sgoutham, lcherian, gakula, masahiroy
On Mon, Nov 2, 2020 at 12:07 AM George Cherian
<george.cherian@marvell.com> wrote:
>
> Add health reporters for RVU NPA block.
> Only reporter dump is supported
>
> Output:
> # devlink health
> pci/0002:01:00.0:
> reporter npa
> state healthy error 0 recover 0
> # devlink health dump show pci/0002:01:00.0 reporter npa
> NPA_AF_GENERAL:
> Unmap PF Error: 0
> Free Disabled for NIX0 RX: 0
> Free Disabled for NIX0 TX: 0
> Free Disabled for NIX1 RX: 0
> Free Disabled for NIX1 TX: 0
> Free Disabled for SSO: 0
> Free Disabled for TIM: 0
> Free Disabled for DPI: 0
> Free Disabled for AURA: 0
> Alloc Disabled for Resvd: 0
> NPA_AF_ERR:
> Memory Fault on NPA_AQ_INST_S read: 0
> Memory Fault on NPA_AQ_RES_S write: 0
> AQ Doorbell Error: 0
> Poisoned data on NPA_AQ_INST_S read: 0
> Poisoned data on NPA_AQ_RES_S write: 0
> Poisoned data on HW context read: 0
> NPA_AF_RVU:
> Unmap Slot Error: 0
>
> Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
> Signed-off-by: Jerin Jacob <jerinj@marvell.com>
> Signed-off-by: George Cherian <george.cherian@marvell.com>
> +static bool rvu_npa_af_request_irq(struct rvu *rvu, int blkaddr, int offset,
> + const char *name, irq_handler_t fn)
> +{
> + struct rvu_devlink *rvu_dl = rvu->rvu_dl;
> + int rc;
> +
> + WARN_ON(rvu->irq_allocated[offset]);
Please use WARN_ON sparingly for important unrecoverable events. This
seems like a basic precondition. If it can happen at all, can probably
catch in a normal branch with a netdev_err. The stacktrace in the oops
is not likely to point at the source of the non-zero value, anyway.
> + rvu->irq_allocated[offset] = false;
Why initialize this here? Are these fields not zeroed on alloc? Is
this here only to safely call rvu_npa_unregister_interrupts on partial
alloc? Then it might be simpler to just have jump labels in this
function to free the successfully requested irqs.
> + sprintf(&rvu->irq_name[offset * NAME_SIZE], name);
> + rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
> + &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
> + if (rc)
> + dev_warn(rvu->dev, "Failed to register %s irq\n", name);
> + else
> + rvu->irq_allocated[offset] = true;
> +
> + return rvu->irq_allocated[offset];
> +}
> +static int rvu_npa_health_reporters_create(struct rvu_devlink *rvu_dl)
> +{
> + struct devlink_health_reporter *rvu_npa_health_reporter;
> + struct rvu_npa_event_cnt *npa_event_count;
> + struct rvu *rvu = rvu_dl->rvu;
> +
> + npa_event_count = kzalloc(sizeof(*npa_event_count), GFP_KERNEL);
> + if (!npa_event_count)
> + return -ENOMEM;
> +
> + rvu_dl->npa_event_cnt = npa_event_count;
> + rvu_npa_health_reporter = devlink_health_reporter_create(rvu_dl->dl,
> + &rvu_npa_hw_fault_reporter_ops,
> + 0, rvu);
> + if (IS_ERR(rvu_npa_health_reporter)) {
> + dev_warn(rvu->dev, "Failed to create npa reporter, err =%ld\n",
> + PTR_ERR(rvu_npa_health_reporter));
> + return PTR_ERR(rvu_npa_health_reporter);
> + }
> +
> + rvu_dl->rvu_npa_health_reporter = rvu_npa_health_reporter;
> + return 0;
> +}
> +
> +static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
> +{
> + if (!rvu_dl->rvu_npa_health_reporter)
> + return;
> +
> + devlink_health_reporter_destroy(rvu_dl->rvu_npa_health_reporter);
> +}
> +
> +static int rvu_health_reporters_create(struct rvu *rvu)
> +{
> + struct rvu_devlink *rvu_dl;
> +
> + if (!rvu->rvu_dl)
> + return -EINVAL;
> +
> + rvu_dl = rvu->rvu_dl;
> + return rvu_npa_health_reporters_create(rvu_dl);
No need for local var rvu_dl. Here and below.
Without that, the entire helper is probably not needed.
> +}
> +
> +static void rvu_health_reporters_destroy(struct rvu *rvu)
> +{
> + struct rvu_devlink *rvu_dl;
> +
> + if (!rvu->rvu_dl)
> + return;
> +
> + rvu_dl = rvu->rvu_dl;
> + rvu_npa_health_reporters_destroy(rvu_dl);
> +}
> +
> static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
> struct netlink_ext_ack *extack)
> {
> @@ -53,7 +483,8 @@ int rvu_register_dl(struct rvu *rvu)
> rvu_dl->dl = dl;
> rvu_dl->rvu = rvu;
> rvu->rvu_dl = rvu_dl;
> - return 0;
> +
> + return rvu_health_reporters_create(rvu);
when would this be called with rvu->rvu_dl == NULL?
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [net-next PATCH 2/3] octeontx2-af: Add devlink health reporters for NPA
2020-11-02 5:06 ` [net-next PATCH 2/3] octeontx2-af: Add devlink health reporters for NPA George Cherian
2020-11-02 13:42 ` Willem de Bruijn
@ 2020-11-03 7:26 ` kernel test robot
1 sibling, 0 replies; 10+ messages in thread
From: kernel test robot @ 2020-11-03 7:26 UTC (permalink / raw)
To: George Cherian, netdev, linux-kernel
Cc: kbuild-all, kuba, davem, sgoutham, lcherian, gakula, masahiroy,
george.cherian
[-- Attachment #1: Type: text/plain, Size: 9701 bytes --]
Hi George,
I love your patch! Perhaps something to improve:
[auto build test WARNING on net-next/master]
url: https://github.com/0day-ci/linux/commits/George-Cherian/Add-devlink-and-devlink-health-reporters-to/20201102-130844
base: https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git c43fd36f7fec6c227c5e8a8ddd7d3fe97472182f
config: x86_64-allyesconfig (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
reproduce (this is a W=1 build):
# https://github.com/0day-ci/linux/commit/b407a9eab03c85981a41a1e03c88d04036a860d6
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review George-Cherian/Add-devlink-and-devlink-health-reporters-to/20201102-130844
git checkout b407a9eab03c85981a41a1e03c88d04036a860d6
# save the attached .config to linux build tree
make W=1 ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
All warnings (new ones prefixed by >>):
>> drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:18:5: warning: no previous prototype for 'rvu_report_pair_start' [-Wmissing-prototypes]
18 | int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
| ^~~~~~~~~~~~~~~~~~~~~
>> drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:29:5: warning: no previous prototype for 'rvu_report_pair_end' [-Wmissing-prototypes]
29 | int rvu_report_pair_end(struct devlink_fmsg *fmsg)
| ^~~~~~~~~~~~~~~~~~~
>> drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:201:5: warning: no previous prototype for 'rvu_npa_register_interrupts' [-Wmissing-prototypes]
201 | int rvu_npa_register_interrupts(struct rvu *rvu)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~
vim +/rvu_report_pair_start +18 drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
17
> 18 int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
19 {
20 int err;
21
22 err = devlink_fmsg_pair_nest_start(fmsg, name);
23 if (err)
24 return err;
25
26 return devlink_fmsg_obj_nest_start(fmsg);
27 }
28
> 29 int rvu_report_pair_end(struct devlink_fmsg *fmsg)
30 {
31 int err;
32
33 err = devlink_fmsg_obj_nest_end(fmsg);
34 if (err)
35 return err;
36
37 return devlink_fmsg_pair_nest_end(fmsg);
38 }
39
40 static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
41 {
42 struct rvu_npa_event_cnt *npa_event_count;
43 struct rvu_devlink *rvu_dl = rvu_irq;
44 struct rvu *rvu;
45 int blkaddr;
46 u64 intr;
47
48 rvu = rvu_dl->rvu;
49 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
50 if (blkaddr < 0)
51 return IRQ_NONE;
52
53 npa_event_count = rvu_dl->npa_event_cnt;
54 intr = rvu_read64(rvu, blkaddr, NPA_AF_RVU_INT);
55
56 if (intr & BIT_ULL(0))
57 npa_event_count->unmap_slot_count++;
58 /* Clear interrupts */
59 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT, intr);
60 return IRQ_HANDLED;
61 }
62
63 static int rvu_npa_inpq_to_cnt(u16 in,
64 struct rvu_npa_event_cnt *npa_event_count)
65 {
66 switch (in) {
67 case 0:
68 return 0;
69 case BIT(NPA_INPQ_NIX0_RX):
70 return npa_event_count->free_dis_nix0_rx_count++;
71 case BIT(NPA_INPQ_NIX0_TX):
72 return npa_event_count->free_dis_nix0_tx_count++;
73 case BIT(NPA_INPQ_NIX1_RX):
74 return npa_event_count->free_dis_nix1_rx_count++;
75 case BIT(NPA_INPQ_NIX1_TX):
76 return npa_event_count->free_dis_nix1_tx_count++;
77 case BIT(NPA_INPQ_SSO):
78 return npa_event_count->free_dis_sso_count++;
79 case BIT(NPA_INPQ_TIM):
80 return npa_event_count->free_dis_tim_count++;
81 case BIT(NPA_INPQ_DPI):
82 return npa_event_count->free_dis_dpi_count++;
83 case BIT(NPA_INPQ_AURA_OP):
84 return npa_event_count->free_dis_aura_count++;
85 case BIT(NPA_INPQ_INTERNAL_RSV):
86 return npa_event_count->free_dis_rsvd_count++;
87 }
88
89 return npa_event_count->alloc_dis_rsvd_count++;
90 }
91
92 static irqreturn_t rvu_npa_af_gen_intr_handler(int irq, void *rvu_irq)
93 {
94 struct rvu_npa_event_cnt *npa_event_count;
95 struct rvu_devlink *rvu_dl = rvu_irq;
96 struct rvu *rvu;
97 int blkaddr, val;
98 u64 intr;
99
100 rvu = rvu_dl->rvu;
101 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
102 if (blkaddr < 0)
103 return IRQ_NONE;
104
105 npa_event_count = rvu_dl->npa_event_cnt;
106 intr = rvu_read64(rvu, blkaddr, NPA_AF_GEN_INT);
107
108 if (intr & BIT_ULL(32))
109 npa_event_count->unmap_pf_count++;
110
111 val = FIELD_GET(GENMASK(31, 16), intr);
112 rvu_npa_inpq_to_cnt(val, npa_event_count);
113
114 val = FIELD_GET(GENMASK(15, 0), intr);
115 rvu_npa_inpq_to_cnt(val, npa_event_count);
116
117 /* Clear interrupts */
118 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT, intr);
119 return IRQ_HANDLED;
120 }
121
122 static irqreturn_t rvu_npa_af_err_intr_handler(int irq, void *rvu_irq)
123 {
124 struct rvu_npa_event_cnt *npa_event_count;
125 struct rvu_devlink *rvu_dl = rvu_irq;
126 struct rvu *rvu;
127 int blkaddr;
128 u64 intr;
129
130 rvu = rvu_dl->rvu;
131 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
132 if (blkaddr < 0)
133 return IRQ_NONE;
134
135 npa_event_count = rvu_dl->npa_event_cnt;
136 intr = rvu_read64(rvu, blkaddr, NPA_AF_ERR_INT);
137
138 if (intr & BIT_ULL(14))
139 npa_event_count->aq_inst_count++;
140
141 if (intr & BIT_ULL(13))
142 npa_event_count->aq_res_count++;
143
144 if (intr & BIT_ULL(12))
145 npa_event_count->aq_db_count++;
146
147 /* Clear interrupts */
148 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT, intr);
149 return IRQ_HANDLED;
150 }
151
152 static irqreturn_t rvu_npa_af_ras_intr_handler(int irq, void *rvu_irq)
153 {
154 struct rvu_npa_event_cnt *npa_event_count;
155 struct rvu_devlink *rvu_dl = rvu_irq;
156 struct rvu *rvu;
157 int blkaddr;
158 u64 intr;
159
160 rvu = rvu_dl->rvu;
161 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
162 if (blkaddr < 0)
163 return IRQ_NONE;
164
165 npa_event_count = rvu_dl->npa_event_cnt;
166 intr = rvu_read64(rvu, blkaddr, NPA_AF_RAS);
167
168 if (intr & BIT_ULL(34))
169 npa_event_count->poison_aq_inst_count++;
170
171 if (intr & BIT_ULL(33))
172 npa_event_count->poison_aq_res_count++;
173
174 if (intr & BIT_ULL(32))
175 npa_event_count->poison_aq_cxt_count++;
176
177 /* Clear interrupts */
178 rvu_write64(rvu, blkaddr, NPA_AF_RAS, intr);
179 return IRQ_HANDLED;
180 }
181
182 static bool rvu_npa_af_request_irq(struct rvu *rvu, int blkaddr, int offset,
183 const char *name, irq_handler_t fn)
184 {
185 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
186 int rc;
187
188 WARN_ON(rvu->irq_allocated[offset]);
189 rvu->irq_allocated[offset] = false;
190 sprintf(&rvu->irq_name[offset * NAME_SIZE], name);
191 rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
192 &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
193 if (rc)
194 dev_warn(rvu->dev, "Failed to register %s irq\n", name);
195 else
196 rvu->irq_allocated[offset] = true;
197
198 return rvu->irq_allocated[offset];
199 }
200
> 201 int rvu_npa_register_interrupts(struct rvu *rvu)
202 {
203 int blkaddr, base;
204 bool rc;
205
206 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NPA, 0);
207 if (blkaddr < 0)
208 return blkaddr;
209
210 /* Get NPA AF MSIX vectors offset. */
211 base = rvu_read64(rvu, blkaddr, NPA_PRIV_AF_INT_CFG) & 0x3ff;
212 if (!base) {
213 dev_warn(rvu->dev,
214 "Failed to get NPA_AF_INT vector offsets\n");
215 return 0;
216 }
217
218 /* Register and enable NPA_AF_RVU_INT interrupt */
219 rc = rvu_npa_af_request_irq(rvu, blkaddr, base + NPA_AF_INT_VEC_RVU,
220 "NPA_AF_RVU_INT",
221 rvu_npa_af_rvu_intr_handler);
222 if (!rc)
223 goto err;
224 rvu_write64(rvu, blkaddr, NPA_AF_RVU_INT_ENA_W1S, ~0ULL);
225
226 /* Register and enable NPA_AF_GEN_INT interrupt */
227 rc = rvu_npa_af_request_irq(rvu, blkaddr, base + NPA_AF_INT_VEC_GEN,
228 "NPA_AF_RVU_GEN",
229 rvu_npa_af_gen_intr_handler);
230 if (!rc)
231 goto err;
232 rvu_write64(rvu, blkaddr, NPA_AF_GEN_INT_ENA_W1S, ~0ULL);
233
234 /* Register and enable NPA_AF_ERR_INT interrupt */
235 rc = rvu_npa_af_request_irq(rvu, blkaddr, base + NPA_AF_INT_VEC_AF_ERR,
236 "NPA_AF_ERR_INT",
237 rvu_npa_af_err_intr_handler);
238 if (!rc)
239 goto err;
240 rvu_write64(rvu, blkaddr, NPA_AF_ERR_INT_ENA_W1S, ~0ULL);
241
242 /* Register and enable NPA_AF_RAS interrupt */
243 rc = rvu_npa_af_request_irq(rvu, blkaddr, base + NPA_AF_INT_VEC_POISON,
244 "NPA_AF_RAS",
245 rvu_npa_af_ras_intr_handler);
246 if (!rc)
247 goto err;
248 rvu_write64(rvu, blkaddr, NPA_AF_RAS_ENA_W1S, ~0ULL);
249
250 return 0;
251 err:
252 rvu_npa_unregister_interrupts(rvu);
253 return rc;
254 }
255
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 77062 bytes --]
^ permalink raw reply [flat|nested] 10+ messages in thread
* [net-next PATCH 3/3] octeontx2-af: Add devlink health reporters for NIX
2020-11-02 5:06 [net-next PATCH 0/3] Add devlink and devlink health reporters to George Cherian
2020-11-02 5:06 ` [net-next PATCH 1/3] octeontx2-af: Add devlink suppoort to af driver George Cherian
2020-11-02 5:06 ` [net-next PATCH 2/3] octeontx2-af: Add devlink health reporters for NPA George Cherian
@ 2020-11-02 5:06 ` George Cherian
2020-11-03 7:30 ` kernel test robot
2020-11-02 18:00 ` [net-next PATCH 0/3] Add devlink and devlink health reporters to Jakub Kicinski
3 siblings, 1 reply; 10+ messages in thread
From: George Cherian @ 2020-11-02 5:06 UTC (permalink / raw)
To: netdev, linux-kernel
Cc: kuba, davem, sgoutham, lcherian, gakula, masahiroy, george.cherian
Add health reporters for RVU NPA block.
Only reporter dump is supported.
Output:
# ./devlink health
pci/0002:01:00.0:
reporter npa
state healthy error 0 recover 0
reporter nix
state healthy error 0 recover 0
# ./devlink health dump show pci/0002:01:00.0 reporter nix
NIX_AF_GENERAL:
Memory Fault on NIX_AQ_INST_S read: 0
Memory Fault on NIX_AQ_RES_S write: 0
AQ Doorbell error: 0
Rx on unmapped PF_FUNC: 0
Rx multicast replication error: 0
Memory fault on NIX_RX_MCE_S read: 0
Memory fault on multicast WQE read: 0
Memory fault on mirror WQE read: 0
Memory fault on mirror pkt write: 0
Memory fault on multicast pkt write: 0
NIX_AF_RAS:
Poisoned data on NIX_AQ_INST_S read: 0
Poisoned data on NIX_AQ_RES_S write: 0
Poisoned data on HW context read: 0
Poisoned data on packet read from mirror buffer: 0
Poisoned data on packet read from mcast buffer: 0
Poisoned data on WQE read from mirror buffer: 0
Poisoned data on WQE read from multicast buffer: 0
Poisoned data on NIX_RX_MCE_S read: 0
NIX_AF_RVU:
Unmap Slot Error: 0
Signed-off-by: Sunil Kovvuri Goutham <sgoutham@marvell.com>
Signed-off-by: Jerin Jacob <jerinj@marvell.com>
Signed-off-by: George Cherian <george.cherian@marvell.com>
---
.../marvell/octeontx2/af/rvu_devlink.c | 376 +++++++++++++++++-
.../marvell/octeontx2/af/rvu_devlink.h | 24 ++
.../marvell/octeontx2/af/rvu_struct.h | 10 +
3 files changed, 409 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
index 946e751fb544..c2dd2026c7da 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
@@ -14,6 +14,7 @@
#define DRV_NAME "octeontx2-af"
void rvu_npa_unregister_interrupts(struct rvu *rvu);
+void rvu_nix_unregister_interrupts(struct rvu *rvu);
int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
{
@@ -37,6 +38,373 @@ int rvu_report_pair_end(struct devlink_fmsg *fmsg)
return devlink_fmsg_pair_nest_end(fmsg);
}
+irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_cnt *nix_event_count;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_count = rvu_dl->nix_event_cnt;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
+
+ if (intr & BIT_ULL(0))
+ nix_event_count->unmap_slot_count++;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
+ return IRQ_HANDLED;
+}
+
+irqreturn_t rvu_nix_af_err_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_cnt *nix_event_count;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_count = rvu_dl->nix_event_cnt;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
+
+ if (intr & BIT_ULL(14))
+ nix_event_count->aq_inst_count++;
+ if (intr & BIT_ULL(13))
+ nix_event_count->aq_res_count++;
+ if (intr & BIT_ULL(12))
+ nix_event_count->aq_db_count++;
+ if (intr & BIT_ULL(6))
+ nix_event_count->rx_on_unmap_pf_count++;
+ if (intr & BIT_ULL(5))
+ nix_event_count->rx_mcast_repl_count++;
+ if (intr & BIT_ULL(4))
+ nix_event_count->rx_mcast_memfault_count++;
+ if (intr & BIT_ULL(3))
+ nix_event_count->rx_mcast_wqe_memfault_count++;
+ if (intr & BIT_ULL(2))
+ nix_event_count->rx_mirror_wqe_memfault_count++;
+ if (intr & BIT_ULL(1))
+ nix_event_count->rx_mirror_pktw_memfault_count++;
+ if (intr & BIT_ULL(0))
+ nix_event_count->rx_mcast_pktw_memfault_count++;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
+ return IRQ_HANDLED;
+}
+
+irqreturn_t rvu_nix_af_ras_intr_handler(int irq, void *rvu_irq)
+{
+ struct rvu_nix_event_cnt *nix_event_count;
+ struct rvu_devlink *rvu_dl = rvu_irq;
+ struct rvu *rvu;
+ int blkaddr;
+ u64 intr;
+
+ rvu = rvu_dl->rvu;
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return IRQ_NONE;
+
+ nix_event_count = rvu_dl->nix_event_cnt;
+ intr = rvu_read64(rvu, blkaddr, NIX_AF_RAS);
+
+ if (intr & BIT_ULL(34))
+ nix_event_count->poison_aq_inst_count++;
+ if (intr & BIT_ULL(33))
+ nix_event_count->poison_aq_res_count++;
+ if (intr & BIT_ULL(32))
+ nix_event_count->poison_aq_cxt_count++;
+ if (intr & BIT_ULL(4))
+ nix_event_count->rx_mirror_data_poison_count++;
+ if (intr & BIT_ULL(3))
+ nix_event_count->rx_mcast_data_poison_count++;
+ if (intr & BIT_ULL(2))
+ nix_event_count->rx_mirror_wqe_poison_count++;
+ if (intr & BIT_ULL(1))
+ nix_event_count->rx_mcast_wqe_poison_count++;
+ if (intr & BIT_ULL(0))
+ nix_event_count->rx_mce_poison_count++;
+
+ /* Clear interrupts */
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
+ return IRQ_HANDLED;
+}
+
+static bool rvu_nix_af_request_irq(struct rvu *rvu, int offset,
+ const char *name, irq_handler_t fn)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int rc;
+
+ WARN_ON(rvu->irq_allocated[offset]);
+ rvu->irq_allocated[offset] = false;
+ sprintf(&rvu->irq_name[offset * NAME_SIZE], name);
+ rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
+ &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
+ if (rc)
+ dev_warn(rvu->dev, "Failed to register %s irq\n", name);
+ else
+ rvu->irq_allocated[offset] = true;
+
+ return rvu->irq_allocated[offset];
+}
+
+static int rvu_nix_blk_register_interrupts(struct rvu *rvu,
+ int blkaddr)
+{
+ int base;
+ bool rc;
+
+ /* Get NIX AF MSIX vectors offset. */
+ base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!base) {
+ dev_warn(rvu->dev,
+ "Failed to get NIX%d NIX_AF_INT vector offsets\n",
+ blkaddr - BLKADDR_NIX0);
+ return 0;
+ }
+ /* Register and enable NIX_AF_RVU_INT interrupt */
+ rc = rvu_nix_af_request_irq(rvu, base + NIX_AF_INT_VEC_RVU,
+ "NIX_AF_RVU_INT",
+ rvu_nix_af_rvu_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_ERR_INT interrupt */
+ rc = rvu_nix_af_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
+ "NIX_AF_ERR_INT",
+ rvu_nix_af_err_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
+
+ /* Register and enable NIX_AF_RAS interrupt */
+ rc = rvu_nix_af_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
+ "NIX_AF_RAS",
+ rvu_nix_af_ras_intr_handler);
+ if (!rc)
+ goto err;
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
+
+ return 0;
+err:
+ rvu_nix_unregister_interrupts(rvu);
+ return -1;
+}
+
+int rvu_nix_register_interrupts(struct rvu *rvu)
+{
+ int blkaddr = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, blkaddr, 0);
+ if (blkaddr < 0)
+ return blkaddr;
+
+ rvu_nix_blk_register_interrupts(rvu, blkaddr);
+
+ return 0;
+}
+
+static void rvu_nix_blk_unregister_interrupts(struct rvu *rvu,
+ int blkaddr)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ int offs, i;
+
+ offs = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
+ if (!offs)
+ return;
+
+ rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1C, ~0ULL);
+ rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1C, ~0ULL);
+
+ if (rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + NIX_AF_INT_VEC_RVU),
+ rvu_dl);
+ rvu->irq_allocated[offs + NIX_AF_INT_VEC_RVU] = false;
+ }
+
+ for (i = NIX_AF_INT_VEC_AF_ERR; i < NIX_AF_INT_VEC_CNT; i++)
+ if (rvu->irq_allocated[offs + i]) {
+ free_irq(pci_irq_vector(rvu->pdev, offs + i), rvu_dl);
+ rvu->irq_allocated[offs + i] = false;
+ }
+}
+
+void rvu_nix_unregister_interrupts(struct rvu *rvu)
+{
+ int blkaddr = 0;
+
+ blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
+ if (blkaddr < 0)
+ return;
+
+ rvu_nix_blk_unregister_interrupts(rvu, blkaddr);
+}
+
+static int rvu_nix_report_show(struct devlink_fmsg *fmsg, struct rvu *rvu)
+{
+ struct rvu_devlink *rvu_dl = rvu->rvu_dl;
+ struct rvu_nix_event_cnt *nix_event_count = rvu_dl->nix_event_cnt;
+ int err;
+
+ err = rvu_report_pair_start(fmsg, "NIX_AF_GENERAL");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tMemory Fault on NIX_AQ_INST_S read",
+ nix_event_count->aq_inst_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tMemory Fault on NIX_AQ_RES_S write",
+ nix_event_count->aq_res_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tAQ Doorbell error",
+ nix_event_count->aq_db_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tRx on unmapped PF_FUNC",
+ nix_event_count->rx_on_unmap_pf_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tRx multicast replication error",
+ nix_event_count->rx_mcast_repl_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tMemory fault on NIX_RX_MCE_S read",
+ nix_event_count->rx_mcast_memfault_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tMemory fault on multicast WQE read",
+ nix_event_count->rx_mcast_wqe_memfault_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tMemory fault on mirror WQE read",
+ nix_event_count->rx_mirror_wqe_memfault_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tMemory fault on mirror pkt write",
+ nix_event_count->rx_mirror_pktw_memfault_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tMemory fault on multicast pkt write",
+ nix_event_count->rx_mcast_pktw_memfault_count);
+ if (err)
+ return err;
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_RAS");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tPoisoned data on NIX_AQ_INST_S read",
+ nix_event_count->poison_aq_inst_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tPoisoned data on NIX_AQ_RES_S write",
+ nix_event_count->poison_aq_res_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tPoisoned data on HW context read",
+ nix_event_count->poison_aq_cxt_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tPoisoned data on packet read from mirror buffer",
+ nix_event_count->rx_mirror_data_poison_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tPoisoned data on packet read from mcast buffer",
+ nix_event_count->rx_mcast_data_poison_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tPoisoned data on WQE read from mirror buffer",
+ nix_event_count->rx_mirror_wqe_poison_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tPoisoned data on WQE read from multicast buffer",
+ nix_event_count->rx_mcast_wqe_poison_count);
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\n\tPoisoned data on NIX_RX_MCE_S read",
+ nix_event_count->rx_mce_poison_count);
+ if (err)
+ return err;
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ err = rvu_report_pair_start(fmsg, "NIX_AF_RVU");
+ if (err)
+ return err;
+ err = devlink_fmsg_u64_pair_put(fmsg, "\tUnmap Slot Error",
+ nix_event_count->unmap_slot_count);
+ if (err)
+ return err;
+ err = rvu_report_pair_end(fmsg);
+ if (err)
+ return err;
+ return 0;
+}
+
+static int rvu_nix_reporter_dump(struct devlink_health_reporter *reporter,
+ struct devlink_fmsg *fmsg, void *ctx,
+ struct netlink_ext_ack *netlink_extack)
+{
+ struct rvu *rvu = devlink_health_reporter_priv(reporter);
+
+ return rvu_nix_report_show(fmsg, rvu);
+}
+
+static const struct devlink_health_reporter_ops rvu_nix_fault_reporter_ops = {
+ .name = "nix",
+ .dump = rvu_nix_reporter_dump,
+};
+
+int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
+{
+ struct devlink_health_reporter *rvu_nix_health_reporter;
+ struct rvu_nix_event_cnt *nix_event_count;
+ struct rvu *rvu = rvu_dl->rvu;
+
+ nix_event_count = kzalloc(sizeof(*nix_event_count), GFP_KERNEL);
+ if (!nix_event_count)
+ return -ENOMEM;
+
+ rvu_dl->nix_event_cnt = nix_event_count;
+ rvu_nix_health_reporter = devlink_health_reporter_create(rvu_dl->dl,
+ &rvu_nix_fault_reporter_ops,
+ 0, rvu);
+ if (IS_ERR(rvu_nix_health_reporter)) {
+ dev_warn(rvu->dev, "Failed to create nix reporter, err = %ld\n",
+ PTR_ERR(rvu_nix_health_reporter));
+ return PTR_ERR(rvu_nix_health_reporter);
+ }
+
+ rvu_dl->rvu_nix_health_reporter = rvu_nix_health_reporter;
+ return 0;
+}
+
+void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
+{
+ if (!rvu_dl->rvu_nix_health_reporter)
+ return;
+
+ devlink_health_reporter_destroy(rvu_dl->rvu_nix_health_reporter);
+}
+
static irqreturn_t rvu_npa_af_rvu_intr_handler(int irq, void *rvu_irq)
{
struct rvu_npa_event_cnt *npa_event_count;
@@ -420,12 +788,17 @@ static void rvu_npa_health_reporters_destroy(struct rvu_devlink *rvu_dl)
static int rvu_health_reporters_create(struct rvu *rvu)
{
struct rvu_devlink *rvu_dl;
+ int err;
if (!rvu->rvu_dl)
return -EINVAL;
rvu_dl = rvu->rvu_dl;
- return rvu_npa_health_reporters_create(rvu_dl);
+ err = rvu_npa_health_reporters_create(rvu_dl);
+ if (err)
+ return err;
+
+ return rvu_nix_health_reporters_create(rvu_dl);
}
static void rvu_health_reporters_destroy(struct rvu *rvu)
@@ -437,6 +810,7 @@ static void rvu_health_reporters_destroy(struct rvu *rvu)
rvu_dl = rvu->rvu_dl;
rvu_npa_health_reporters_destroy(rvu_dl);
+ rvu_nix_health_reporters_destroy(rvu_dl);
}
static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
index b3ce1a8fff57..15724ad2ed44 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.h
@@ -29,11 +29,35 @@ struct rvu_npa_event_cnt {
unsigned long poison_aq_cxt_count;
};
+struct rvu_nix_event_cnt {
+ unsigned long unmap_slot_count;
+ unsigned long aq_inst_count;
+ unsigned long aq_res_count;
+ unsigned long aq_db_count;
+ unsigned long rx_on_unmap_pf_count;
+ unsigned long rx_mcast_repl_count;
+ unsigned long rx_mcast_memfault_count;
+ unsigned long rx_mcast_wqe_memfault_count;
+ unsigned long rx_mirror_wqe_memfault_count;
+ unsigned long rx_mirror_pktw_memfault_count;
+ unsigned long rx_mcast_pktw_memfault_count;
+ unsigned long poison_aq_inst_count;
+ unsigned long poison_aq_res_count;
+ unsigned long poison_aq_cxt_count;
+ unsigned long rx_mirror_data_poison_count;
+ unsigned long rx_mcast_data_poison_count;
+ unsigned long rx_mirror_wqe_poison_count;
+ unsigned long rx_mcast_wqe_poison_count;
+ unsigned long rx_mce_poison_count;
+};
+
struct rvu_devlink {
struct devlink *dl;
struct rvu *rvu;
struct devlink_health_reporter *rvu_npa_health_reporter;
struct rvu_npa_event_cnt *npa_event_cnt;
+ struct devlink_health_reporter *rvu_nix_health_reporter;
+ struct rvu_nix_event_cnt *nix_event_cnt;
};
/* Devlink APIs */
diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
index 995add5d8bff..b5944199faf5 100644
--- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
+++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_struct.h
@@ -74,6 +74,16 @@ enum npa_af_int_vec_e {
NPA_AF_INT_VEC_CNT = 0x5,
};
+/* NIX Admin function Interrupt Vector Enumeration */
+enum nix_af_int_vec_e {
+ NIX_AF_INT_VEC_RVU = 0x0,
+ NIX_AF_INT_VEC_GEN = 0x1,
+ NIX_AF_INT_VEC_AQ_DONE = 0x2,
+ NIX_AF_INT_VEC_AF_ERR = 0x3,
+ NIX_AF_INT_VEC_POISON = 0x4,
+ NIX_AF_INT_VEC_CNT = 0x5,
+};
+
/**
* RVU PF Interrupt Vector Enumeration
*/
--
2.25.1
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [net-next PATCH 3/3] octeontx2-af: Add devlink health reporters for NIX
2020-11-02 5:06 ` [net-next PATCH 3/3] octeontx2-af: Add devlink health reporters for NIX George Cherian
@ 2020-11-03 7:30 ` kernel test robot
0 siblings, 0 replies; 10+ messages in thread
From: kernel test robot @ 2020-11-03 7:30 UTC (permalink / raw)
To: George Cherian, netdev, linux-kernel
Cc: kbuild-all, kuba, davem, sgoutham, lcherian, gakula, masahiroy,
george.cherian
[-- Attachment #1: Type: text/plain, Size: 9672 bytes --]
Hi George,
I love your patch! Perhaps something to improve:
[auto build test WARNING on net-next/master]
url: https://github.com/0day-ci/linux/commits/George-Cherian/Add-devlink-and-devlink-health-reporters-to/20201102-130844
base: https://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next.git c43fd36f7fec6c227c5e8a8ddd7d3fe97472182f
config: x86_64-allyesconfig (attached as .config)
compiler: gcc-9 (Debian 9.3.0-15) 9.3.0
reproduce (this is a W=1 build):
# https://github.com/0day-ci/linux/commit/bdffba84e2716a5f218840ac6a80052587e48c59
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review George-Cherian/Add-devlink-and-devlink-health-reporters-to/20201102-130844
git checkout bdffba84e2716a5f218840ac6a80052587e48c59
# save the attached .config to linux build tree
make W=1 ARCH=x86_64
If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@intel.com>
All warnings (new ones prefixed by >>):
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:19:5: warning: no previous prototype for 'rvu_report_pair_start' [-Wmissing-prototypes]
19 | int rvu_report_pair_start(struct devlink_fmsg *fmsg, const char *name)
| ^~~~~~~~~~~~~~~~~~~~~
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:30:5: warning: no previous prototype for 'rvu_report_pair_end' [-Wmissing-prototypes]
30 | int rvu_report_pair_end(struct devlink_fmsg *fmsg)
| ^~~~~~~~~~~~~~~~~~~
>> drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:41:13: warning: no previous prototype for 'rvu_nix_af_rvu_intr_handler' [-Wmissing-prototypes]
41 | irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~
>> drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:65:13: warning: no previous prototype for 'rvu_nix_af_err_intr_handler' [-Wmissing-prototypes]
65 | irqreturn_t rvu_nix_af_err_intr_handler(int irq, void *rvu_irq)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~
>> drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:107:13: warning: no previous prototype for 'rvu_nix_af_ras_intr_handler' [-Wmissing-prototypes]
107 | irqreturn_t rvu_nix_af_ras_intr_handler(int irq, void *rvu_irq)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~
>> drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:208:5: warning: no previous prototype for 'rvu_nix_register_interrupts' [-Wmissing-prototypes]
208 | int rvu_nix_register_interrupts(struct rvu *rvu)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~
>> drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:376:5: warning: no previous prototype for 'rvu_nix_health_reporters_create' [-Wmissing-prototypes]
376 | int rvu_nix_health_reporters_create(struct rvu_devlink *rvu_dl)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
>> drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:400:6: warning: no previous prototype for 'rvu_nix_health_reporters_destroy' [-Wmissing-prototypes]
400 | void rvu_nix_health_reporters_destroy(struct rvu_devlink *rvu_dl)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c:569:5: warning: no previous prototype for 'rvu_npa_register_interrupts' [-Wmissing-prototypes]
569 | int rvu_npa_register_interrupts(struct rvu *rvu)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~
vim +/rvu_nix_af_rvu_intr_handler +41 drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c
40
> 41 irqreturn_t rvu_nix_af_rvu_intr_handler(int irq, void *rvu_irq)
42 {
43 struct rvu_nix_event_cnt *nix_event_count;
44 struct rvu_devlink *rvu_dl = rvu_irq;
45 struct rvu *rvu;
46 int blkaddr;
47 u64 intr;
48
49 rvu = rvu_dl->rvu;
50 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
51 if (blkaddr < 0)
52 return IRQ_NONE;
53
54 nix_event_count = rvu_dl->nix_event_cnt;
55 intr = rvu_read64(rvu, blkaddr, NIX_AF_RVU_INT);
56
57 if (intr & BIT_ULL(0))
58 nix_event_count->unmap_slot_count++;
59
60 /* Clear interrupts */
61 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT, intr);
62 return IRQ_HANDLED;
63 }
64
> 65 irqreturn_t rvu_nix_af_err_intr_handler(int irq, void *rvu_irq)
66 {
67 struct rvu_nix_event_cnt *nix_event_count;
68 struct rvu_devlink *rvu_dl = rvu_irq;
69 struct rvu *rvu;
70 int blkaddr;
71 u64 intr;
72
73 rvu = rvu_dl->rvu;
74 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
75 if (blkaddr < 0)
76 return IRQ_NONE;
77
78 nix_event_count = rvu_dl->nix_event_cnt;
79 intr = rvu_read64(rvu, blkaddr, NIX_AF_ERR_INT);
80
81 if (intr & BIT_ULL(14))
82 nix_event_count->aq_inst_count++;
83 if (intr & BIT_ULL(13))
84 nix_event_count->aq_res_count++;
85 if (intr & BIT_ULL(12))
86 nix_event_count->aq_db_count++;
87 if (intr & BIT_ULL(6))
88 nix_event_count->rx_on_unmap_pf_count++;
89 if (intr & BIT_ULL(5))
90 nix_event_count->rx_mcast_repl_count++;
91 if (intr & BIT_ULL(4))
92 nix_event_count->rx_mcast_memfault_count++;
93 if (intr & BIT_ULL(3))
94 nix_event_count->rx_mcast_wqe_memfault_count++;
95 if (intr & BIT_ULL(2))
96 nix_event_count->rx_mirror_wqe_memfault_count++;
97 if (intr & BIT_ULL(1))
98 nix_event_count->rx_mirror_pktw_memfault_count++;
99 if (intr & BIT_ULL(0))
100 nix_event_count->rx_mcast_pktw_memfault_count++;
101
102 /* Clear interrupts */
103 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT, intr);
104 return IRQ_HANDLED;
105 }
106
> 107 irqreturn_t rvu_nix_af_ras_intr_handler(int irq, void *rvu_irq)
108 {
109 struct rvu_nix_event_cnt *nix_event_count;
110 struct rvu_devlink *rvu_dl = rvu_irq;
111 struct rvu *rvu;
112 int blkaddr;
113 u64 intr;
114
115 rvu = rvu_dl->rvu;
116 blkaddr = rvu_get_blkaddr(rvu, BLKTYPE_NIX, 0);
117 if (blkaddr < 0)
118 return IRQ_NONE;
119
120 nix_event_count = rvu_dl->nix_event_cnt;
121 intr = rvu_read64(rvu, blkaddr, NIX_AF_RAS);
122
123 if (intr & BIT_ULL(34))
124 nix_event_count->poison_aq_inst_count++;
125 if (intr & BIT_ULL(33))
126 nix_event_count->poison_aq_res_count++;
127 if (intr & BIT_ULL(32))
128 nix_event_count->poison_aq_cxt_count++;
129 if (intr & BIT_ULL(4))
130 nix_event_count->rx_mirror_data_poison_count++;
131 if (intr & BIT_ULL(3))
132 nix_event_count->rx_mcast_data_poison_count++;
133 if (intr & BIT_ULL(2))
134 nix_event_count->rx_mirror_wqe_poison_count++;
135 if (intr & BIT_ULL(1))
136 nix_event_count->rx_mcast_wqe_poison_count++;
137 if (intr & BIT_ULL(0))
138 nix_event_count->rx_mce_poison_count++;
139
140 /* Clear interrupts */
141 rvu_write64(rvu, blkaddr, NIX_AF_RAS, intr);
142 return IRQ_HANDLED;
143 }
144
145 static bool rvu_nix_af_request_irq(struct rvu *rvu, int offset,
146 const char *name, irq_handler_t fn)
147 {
148 struct rvu_devlink *rvu_dl = rvu->rvu_dl;
149 int rc;
150
151 WARN_ON(rvu->irq_allocated[offset]);
152 rvu->irq_allocated[offset] = false;
153 sprintf(&rvu->irq_name[offset * NAME_SIZE], name);
154 rc = request_irq(pci_irq_vector(rvu->pdev, offset), fn, 0,
155 &rvu->irq_name[offset * NAME_SIZE], rvu_dl);
156 if (rc)
157 dev_warn(rvu->dev, "Failed to register %s irq\n", name);
158 else
159 rvu->irq_allocated[offset] = true;
160
161 return rvu->irq_allocated[offset];
162 }
163
164 static int rvu_nix_blk_register_interrupts(struct rvu *rvu,
165 int blkaddr)
166 {
167 int base;
168 bool rc;
169
170 /* Get NIX AF MSIX vectors offset. */
171 base = rvu_read64(rvu, blkaddr, NIX_PRIV_AF_INT_CFG) & 0x3ff;
172 if (!base) {
173 dev_warn(rvu->dev,
174 "Failed to get NIX%d NIX_AF_INT vector offsets\n",
175 blkaddr - BLKADDR_NIX0);
176 return 0;
177 }
178 /* Register and enable NIX_AF_RVU_INT interrupt */
179 rc = rvu_nix_af_request_irq(rvu, base + NIX_AF_INT_VEC_RVU,
180 "NIX_AF_RVU_INT",
181 rvu_nix_af_rvu_intr_handler);
182 if (!rc)
183 goto err;
184 rvu_write64(rvu, blkaddr, NIX_AF_RVU_INT_ENA_W1S, ~0ULL);
185
186 /* Register and enable NIX_AF_ERR_INT interrupt */
187 rc = rvu_nix_af_request_irq(rvu, base + NIX_AF_INT_VEC_AF_ERR,
188 "NIX_AF_ERR_INT",
189 rvu_nix_af_err_intr_handler);
190 if (!rc)
191 goto err;
192 rvu_write64(rvu, blkaddr, NIX_AF_ERR_INT_ENA_W1S, ~0ULL);
193
194 /* Register and enable NIX_AF_RAS interrupt */
195 rc = rvu_nix_af_request_irq(rvu, base + NIX_AF_INT_VEC_POISON,
196 "NIX_AF_RAS",
197 rvu_nix_af_ras_intr_handler);
198 if (!rc)
199 goto err;
200 rvu_write64(rvu, blkaddr, NIX_AF_RAS_ENA_W1S, ~0ULL);
201
202 return 0;
203 err:
204 rvu_nix_unregister_interrupts(rvu);
205 return -1;
206 }
207
> 208 int rvu_nix_register_interrupts(struct rvu *rvu)
209 {
210 int blkaddr = 0;
211
212 blkaddr = rvu_get_blkaddr(rvu, blkaddr, 0);
213 if (blkaddr < 0)
214 return blkaddr;
215
216 rvu_nix_blk_register_interrupts(rvu, blkaddr);
217
218 return 0;
219 }
220
---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
[-- Attachment #2: .config.gz --]
[-- Type: application/gzip, Size: 77062 bytes --]
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [net-next PATCH 0/3] Add devlink and devlink health reporters to
2020-11-02 5:06 [net-next PATCH 0/3] Add devlink and devlink health reporters to George Cherian
` (2 preceding siblings ...)
2020-11-02 5:06 ` [net-next PATCH 3/3] octeontx2-af: Add devlink health reporters for NIX George Cherian
@ 2020-11-02 18:00 ` Jakub Kicinski
3 siblings, 0 replies; 10+ messages in thread
From: Jakub Kicinski @ 2020-11-02 18:00 UTC (permalink / raw)
To: George Cherian
Cc: netdev, linux-kernel, davem, sgoutham, lcherian, gakula, masahiroy
On Mon, 2 Nov 2020 10:36:46 +0530 George Cherian wrote:
> Add basic devlink and devlink health reporters.
> Devlink health reporters are added for NPA and NIX blocks.
> These reporters report the error count in respective blocks.
>
> Address Jakub's comment to add devlink support for error reporting.
> https://www.spinics.net/lists/netdev/msg670712.html
Please make sure you fix all new warnings when built with W=1 C=1.
^ permalink raw reply [flat|nested] 10+ messages in thread