All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next 0/7] bnxt_en: Error recovery optimizations.
@ 2021-02-14 23:04 Michael Chan
  2021-02-14 23:04 ` [PATCH net-next 1/7] bnxt_en: Update firmware interface spec to 1.10.2.16 Michael Chan
                   ` (7 more replies)
  0 siblings, 8 replies; 9+ messages in thread
From: Michael Chan @ 2021-02-14 23:04 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

This series implements some optimizations to error recovery.  One
patch adds an echo/reply mechanism with firmware to enhance error
detection.  The other patches speed up the recovery process by
polling config space earlier and to selectively initialize
context memory during re-initialization.

Edwin Peer (1):
  bnxt_en: selectively allocate context memories

Michael Chan (6):
  bnxt_en: Update firmware interface spec to 1.10.2.16.
  bnxt_en: Implement faster recovery for firmware fatal error.
  bnxt_en: Add context memory initialization infrastructure.
  bnxt_en: Initialize "context kind" field for context memory blocks.
  bnxt_en: Reply to firmware's echo request async message.
  bnxt_en: Improve logging of error recovery settings information.

 drivers/net/ethernet/broadcom/bnxt/bnxt.c     | 222 ++++++++++++++----
 drivers/net/ethernet/broadcom/bnxt/bnxt.h     |  24 +-
 drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 105 ++++++++-
 3 files changed, 290 insertions(+), 61 deletions(-)

-- 
2.18.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH net-next 1/7] bnxt_en: Update firmware interface spec to 1.10.2.16.
  2021-02-14 23:04 [PATCH net-next 0/7] bnxt_en: Error recovery optimizations Michael Chan
@ 2021-02-14 23:04 ` Michael Chan
  2021-02-14 23:04 ` [PATCH net-next 2/7] bnxt_en: selectively allocate context memories Michael Chan
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Michael Chan @ 2021-02-14 23:04 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

The main changes are the echo request/response from firmware for error
detection and the NO_FCS feature to transmit frames without FCS.

Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.h     |   2 +-
 drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h | 105 ++++++++++++++++--
 2 files changed, 96 insertions(+), 11 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 4ef6888acdc6..36007b0d1177 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -18,7 +18,7 @@
  */
 #define DRV_VER_MAJ	1
 #define DRV_VER_MIN	10
-#define DRV_VER_UPD	1
+#define DRV_VER_UPD	2
 
 #include <linux/ethtool.h>
 #include <linux/interrupt.h>
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index d5c6e6a3d22d..6199f125bc13 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -103,6 +103,7 @@ struct hwrm_short_input {
 struct cmd_nums {
 	__le16	req_type;
 	#define HWRM_VER_GET                              0x0UL
+	#define HWRM_FUNC_ECHO_RESPONSE                   0xbUL
 	#define HWRM_ERROR_RECOVERY_QCFG                  0xcUL
 	#define HWRM_FUNC_DRV_IF_CHANGE                   0xdUL
 	#define HWRM_FUNC_BUF_UNRGTR                      0xeUL
@@ -501,8 +502,8 @@ struct hwrm_err_output {
 #define HWRM_VERSION_MAJOR 1
 #define HWRM_VERSION_MINOR 10
 #define HWRM_VERSION_UPDATE 2
-#define HWRM_VERSION_RSVD 11
-#define HWRM_VERSION_STR "1.10.2.11"
+#define HWRM_VERSION_RSVD 16
+#define HWRM_VERSION_STR "1.10.2.16"
 
 /* hwrm_ver_get_input (size:192b/24B) */
 struct hwrm_ver_get_input {
@@ -723,7 +724,8 @@ struct hwrm_async_event_cmpl {
 	#define ASYNC_EVENT_CMPL_EVENT_ID_QUIESCE_DONE               0x3fUL
 	#define ASYNC_EVENT_CMPL_EVENT_ID_DEFERRED_RESPONSE          0x40UL
 	#define ASYNC_EVENT_CMPL_EVENT_ID_PFC_WATCHDOG_CFG_CHANGE    0x41UL
-	#define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID          0x42UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST               0x42UL
+	#define ASYNC_EVENT_CMPL_EVENT_ID_MAX_RGTR_EVENT_ID          0x43UL
 	#define ASYNC_EVENT_CMPL_EVENT_ID_FW_TRACE_MSG               0xfeUL
 	#define ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR                 0xffUL
 	#define ASYNC_EVENT_CMPL_EVENT_ID_LAST                      ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR
@@ -1052,6 +1054,26 @@ struct hwrm_async_event_cmpl_deferred_response {
 	__le32	event_data1;
 };
 
+/* hwrm_async_event_cmpl_echo_request (size:128b/16B) */
+struct hwrm_async_event_cmpl_echo_request {
+	__le16	type;
+	#define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_MASK            0x3fUL
+	#define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_SFT             0
+	#define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT  0x2eUL
+	#define ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_LAST             ASYNC_EVENT_CMPL_ECHO_REQUEST_TYPE_HWRM_ASYNC_EVENT
+	__le16	event_id;
+	#define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST 0x42UL
+	#define ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_LAST        ASYNC_EVENT_CMPL_ECHO_REQUEST_EVENT_ID_ECHO_REQUEST
+	__le32	event_data2;
+	u8	opaque_v;
+	#define ASYNC_EVENT_CMPL_ECHO_REQUEST_V          0x1UL
+	#define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_MASK 0xfeUL
+	#define ASYNC_EVENT_CMPL_ECHO_REQUEST_OPAQUE_SFT 1
+	u8	timestamp_lo;
+	__le16	timestamp_hi;
+	__le32	event_data1;
+};
+
 /* hwrm_func_reset_input (size:192b/24B) */
 struct hwrm_func_reset_input {
 	__le16	req_type;
@@ -1294,6 +1316,10 @@ struct hwrm_func_qcaps_output {
 	#define FUNC_QCAPS_RESP_FLAGS_EXT_SOC_SPD_SUPPORTED                      0x200UL
 	#define FUNC_QCAPS_RESP_FLAGS_EXT_FW_LIVEPATCH_SUPPORTED                 0x400UL
 	#define FUNC_QCAPS_RESP_FLAGS_EXT_FAST_RESET_CAPABLE                     0x800UL
+	#define FUNC_QCAPS_RESP_FLAGS_EXT_TX_METADATA_CFG_CAPABLE                0x1000UL
+	#define FUNC_QCAPS_RESP_FLAGS_EXT_NVM_OPTION_ACTION_SUPPORTED            0x2000UL
+	#define FUNC_QCAPS_RESP_FLAGS_EXT_BD_METADATA_SUPPORTED                  0x4000UL
+	#define FUNC_QCAPS_RESP_FLAGS_EXT_ECHO_REQUEST_SUPPORTED                 0x8000UL
 	u8	max_schqs;
 	u8	mpc_chnls_cap;
 	#define FUNC_QCAPS_RESP_MPC_CHNLS_CAP_TCE         0x1UL
@@ -1339,6 +1365,7 @@ struct hwrm_func_qcfg_output {
 	#define FUNC_QCFG_RESP_FLAGS_PPP_PUSH_MODE_ENABLED        0x400UL
 	#define FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED         0x800UL
 	#define FUNC_QCFG_RESP_FLAGS_FAST_RESET_ALLOWED           0x1000UL
+	#define FUNC_QCFG_RESP_FLAGS_MULTI_ROOT                   0x2000UL
 	u8	mac_address[6];
 	__le16	pci_id;
 	__le16	alloc_rsscos_ctx;
@@ -1474,6 +1501,8 @@ struct hwrm_func_cfg_input {
 	#define FUNC_CFG_REQ_FLAGS_HOT_RESET_IF_EN_DIS            0x4000000UL
 	#define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_ENABLE           0x8000000UL
 	#define FUNC_CFG_REQ_FLAGS_PPP_PUSH_MODE_DISABLE          0x10000000UL
+	#define FUNC_CFG_REQ_FLAGS_BD_METADATA_ENABLE             0x20000000UL
+	#define FUNC_CFG_REQ_FLAGS_BD_METADATA_DISABLE            0x40000000UL
 	__le32	enables;
 	#define FUNC_CFG_REQ_ENABLES_MTU                      0x1UL
 	#define FUNC_CFG_REQ_ENABLES_MRU                      0x2UL
@@ -2063,8 +2092,32 @@ struct hwrm_func_backing_store_qcaps_output {
 	u8	tqm_fp_rings_count;
 	u8	stat_init_offset;
 	u8	mrav_init_offset;
-	u8	rsvd[6];
-	u8	valid;
+	u8	tqm_fp_rings_count_ext;
+	u8	rsvd[5];
+	u8	valid;
+};
+
+/* tqm_fp_ring_cfg (size:128b/16B) */
+struct tqm_fp_ring_cfg {
+	u8	tqm_ring_pg_size_tqm_ring_lvl;
+	#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_MASK      0xfUL
+	#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_SFT       0
+	#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_0       0x0UL
+	#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_1       0x1UL
+	#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2       0x2UL
+	#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LAST       TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_LVL_LVL_2
+	#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_MASK  0xf0UL
+	#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_SFT   4
+	#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_4K   (0x0UL << 4)
+	#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8K   (0x1UL << 4)
+	#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_64K  (0x2UL << 4)
+	#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_2M   (0x3UL << 4)
+	#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_8M   (0x4UL << 4)
+	#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G   (0x5UL << 4)
+	#define TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_LAST   TQM_FP_RING_CFG_TQM_RING_CFG_TQM_RING_PG_SIZE_PG_1G
+	u8	unused[3];
+	__le32	tqm_ring_num_entries;
+	__le64	tqm_ring_page_dir;
 };
 
 /* hwrm_func_backing_store_cfg_input (size:2432b/304B) */
@@ -2560,6 +2613,27 @@ struct hwrm_error_recovery_qcfg_output {
 	u8	valid;
 };
 
+/* hwrm_func_echo_response_input (size:192b/24B) */
+struct hwrm_func_echo_response_input {
+	__le16	req_type;
+	__le16	cmpl_ring;
+	__le16	seq_id;
+	__le16	target_id;
+	__le64	resp_addr;
+	__le32	event_data1;
+	__le32	event_data2;
+};
+
+/* hwrm_func_echo_response_output (size:128b/16B) */
+struct hwrm_func_echo_response_output {
+	__le16	error_code;
+	__le16	req_type;
+	__le16	seq_id;
+	__le16	resp_len;
+	u8	unused_0[7];
+	u8	valid;
+};
+
 /* hwrm_func_drv_if_change_input (size:192b/24B) */
 struct hwrm_func_drv_if_change_input {
 	__le16	req_type;
@@ -3627,7 +3701,7 @@ struct hwrm_port_phy_qcaps_output {
 	#define PORT_PHY_QCAPS_RESP_FLAGS_CUMULATIVE_COUNTERS_ON_RESET     0x10UL
 	#define PORT_PHY_QCAPS_RESP_FLAGS_LOCAL_LPBK_NOT_SUPPORTED         0x20UL
 	#define PORT_PHY_QCAPS_RESP_FLAGS_FW_MANAGED_LINK_DOWN             0x40UL
-	#define PORT_PHY_QCAPS_RESP_FLAGS_RSVD1                            0x80UL
+	#define PORT_PHY_QCAPS_RESP_FLAGS_NO_FCS                           0x80UL
 	u8	port_cnt;
 	#define PORT_PHY_QCAPS_RESP_PORT_CNT_UNKNOWN 0x0UL
 	#define PORT_PHY_QCAPS_RESP_PORT_CNT_1       0x1UL
@@ -5392,6 +5466,7 @@ struct hwrm_vnic_qcaps_output {
 	#define VNIC_QCAPS_RESP_FLAGS_RX_CMPL_V2_CAP                      0x200UL
 	#define VNIC_QCAPS_RESP_FLAGS_VNIC_STATE_CAP                      0x400UL
 	#define VNIC_QCAPS_RESP_FLAGS_VIRTIO_NET_VNIC_ALLOC_CAP           0x800UL
+	#define VNIC_QCAPS_RESP_FLAGS_METADATA_FORMAT_CAP                 0x1000UL
 	__le16	max_aggs_supported;
 	u8	unused_1[5];
 	u8	valid;
@@ -5725,7 +5800,7 @@ struct hwrm_ring_alloc_output {
 	u8	valid;
 };
 
-/* hwrm_ring_free_input (size:192b/24B) */
+/* hwrm_ring_free_input (size:256b/32B) */
 struct hwrm_ring_free_input {
 	__le16	req_type;
 	__le16	cmpl_ring;
@@ -5740,9 +5815,13 @@ struct hwrm_ring_free_input {
 	#define RING_FREE_REQ_RING_TYPE_RX_AGG    0x4UL
 	#define RING_FREE_REQ_RING_TYPE_NQ        0x5UL
 	#define RING_FREE_REQ_RING_TYPE_LAST     RING_FREE_REQ_RING_TYPE_NQ
-	u8	unused_0;
+	u8	flags;
+	#define RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID 0x1UL
+	#define RING_FREE_REQ_FLAGS_LAST             RING_FREE_REQ_FLAGS_VIRTIO_RING_VALID
 	__le16	ring_id;
-	u8	unused_1[4];
+	__le32	prod_idx;
+	__le32	opaque;
+	__le32	unused_1;
 };
 
 /* hwrm_ring_free_output (size:128b/16B) */
@@ -7562,7 +7641,13 @@ struct hwrm_fw_qstatus_output {
 	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPCIERST 0x2UL
 	#define FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER   0x3UL
 	#define FW_QSTATUS_RESP_SELFRST_STATUS_LAST          FW_QSTATUS_RESP_SELFRST_STATUS_SELFRSTPOWER
-	u8	unused_0[6];
+	u8	nvm_option_action_status;
+	#define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_NONE     0x0UL
+	#define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_HOTRESET 0x1UL
+	#define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_WARMBOOT 0x2UL
+	#define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT 0x3UL
+	#define FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_LAST                  FW_QSTATUS_RESP_NVM_OPTION_ACTION_STATUS_NVMOPT_ACTION_COLDBOOT
+	u8	unused_0[5];
 	u8	valid;
 };
 
-- 
2.18.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH net-next 2/7] bnxt_en: selectively allocate context memories
  2021-02-14 23:04 [PATCH net-next 0/7] bnxt_en: Error recovery optimizations Michael Chan
  2021-02-14 23:04 ` [PATCH net-next 1/7] bnxt_en: Update firmware interface spec to 1.10.2.16 Michael Chan
@ 2021-02-14 23:04 ` Michael Chan
  2021-02-14 23:04 ` [PATCH net-next 3/7] bnxt_en: Implement faster recovery for firmware fatal error Michael Chan
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Michael Chan @ 2021-02-14 23:04 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

From: Edwin Peer <edwin.peer@broadcom.com>

Newer devices may have local context memory instead of relying on the
host for backing store. In these cases, HWRM_FUNC_BACKING_STORE_QCAPS
will return a zero entry size to indicate contexts for which the host
should not allocate backing store.

Selectively allocate context memory based on device capabilities and
only enable backing store for the appropriate contexts.

Signed-off-by: Edwin Peer <edwin.peer@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 84 ++++++++++++++---------
 1 file changed, 52 insertions(+), 32 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f508c5c61a30..7512879a551a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -6834,6 +6834,9 @@ static void bnxt_hwrm_set_pg_attr(struct bnxt_ring_mem_info *rmem, u8 *pg_attr,
 {
 	u8 pg_size = 0;
 
+	if (!rmem->nr_pages)
+		return;
+
 	if (BNXT_PAGE_SHIFT == 13)
 		pg_size = 1 << 4;
 	else if (BNXT_PAGE_SIZE == 16)
@@ -7124,39 +7127,49 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
 	ctx_pg = &ctx->qp_mem;
 	ctx_pg->entries = ctx->qp_min_qp1_entries + ctx->qp_max_l2_entries +
 			  extra_qps;
-	mem_size = ctx->qp_entry_size * ctx_pg->entries;
-	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
-	if (rc)
-		return rc;
+	if (ctx->qp_entry_size) {
+		mem_size = ctx->qp_entry_size * ctx_pg->entries;
+		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
+		if (rc)
+			return rc;
+	}
 
 	ctx_pg = &ctx->srq_mem;
 	ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
-	mem_size = ctx->srq_entry_size * ctx_pg->entries;
-	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
-	if (rc)
-		return rc;
+	if (ctx->srq_entry_size) {
+		mem_size = ctx->srq_entry_size * ctx_pg->entries;
+		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
+		if (rc)
+			return rc;
+	}
 
 	ctx_pg = &ctx->cq_mem;
 	ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
-	mem_size = ctx->cq_entry_size * ctx_pg->entries;
-	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
-	if (rc)
-		return rc;
+	if (ctx->cq_entry_size) {
+		mem_size = ctx->cq_entry_size * ctx_pg->entries;
+		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
+		if (rc)
+			return rc;
+	}
 
 	ctx_pg = &ctx->vnic_mem;
 	ctx_pg->entries = ctx->vnic_max_vnic_entries +
 			  ctx->vnic_max_ring_table_entries;
-	mem_size = ctx->vnic_entry_size * ctx_pg->entries;
-	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
-	if (rc)
-		return rc;
+	if (ctx->vnic_entry_size) {
+		mem_size = ctx->vnic_entry_size * ctx_pg->entries;
+		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
+		if (rc)
+			return rc;
+	}
 
 	ctx_pg = &ctx->stat_mem;
 	ctx_pg->entries = ctx->stat_max_entries;
-	mem_size = ctx->stat_entry_size * ctx_pg->entries;
-	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
-	if (rc)
-		return rc;
+	if (ctx->stat_entry_size) {
+		mem_size = ctx->stat_entry_size * ctx_pg->entries;
+		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
+		if (rc)
+			return rc;
+	}
 
 	ena = 0;
 	if (!(bp->flags & BNXT_FLAG_ROCE_CAP))
@@ -7169,10 +7182,12 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
 	num_mr = 1024 * 256;
 	num_ah = 1024 * 128;
 	ctx_pg->entries = num_mr + num_ah;
-	mem_size = ctx->mrav_entry_size * ctx_pg->entries;
-	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
-	if (rc)
-		return rc;
+	if (ctx->mrav_entry_size) {
+		mem_size = ctx->mrav_entry_size * ctx_pg->entries;
+		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
+		if (rc)
+			return rc;
+	}
 	ena = FUNC_BACKING_STORE_CFG_REQ_ENABLES_MRAV;
 	if (ctx->mrav_num_entries_units)
 		ctx_pg->entries =
@@ -7181,10 +7196,12 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
 
 	ctx_pg = &ctx->tim_mem;
 	ctx_pg->entries = ctx->qp_mem.entries;
-	mem_size = ctx->tim_entry_size * ctx_pg->entries;
-	rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
-	if (rc)
-		return rc;
+	if (ctx->tim_entry_size) {
+		mem_size = ctx->tim_entry_size * ctx_pg->entries;
+		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
+		if (rc)
+			return rc;
+	}
 	ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TIM;
 
 skip_rdma:
@@ -7198,10 +7215,13 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
 	for (i = 0; i < ctx->tqm_fp_rings_count + 1; i++) {
 		ctx_pg = ctx->tqm_mem[i];
 		ctx_pg->entries = i ? entries : entries_sp;
-		mem_size = ctx->tqm_entry_size * ctx_pg->entries;
-		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
-		if (rc)
-			return rc;
+		if (ctx->tqm_entry_size) {
+			mem_size = ctx->tqm_entry_size * ctx_pg->entries;
+			rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
+						    false);
+			if (rc)
+				return rc;
+		}
 		ena |= FUNC_BACKING_STORE_CFG_REQ_ENABLES_TQM_SP << i;
 	}
 	ena |= FUNC_BACKING_STORE_CFG_REQ_DFLT_ENABLES;
-- 
2.18.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH net-next 3/7] bnxt_en: Implement faster recovery for firmware fatal error.
  2021-02-14 23:04 [PATCH net-next 0/7] bnxt_en: Error recovery optimizations Michael Chan
  2021-02-14 23:04 ` [PATCH net-next 1/7] bnxt_en: Update firmware interface spec to 1.10.2.16 Michael Chan
  2021-02-14 23:04 ` [PATCH net-next 2/7] bnxt_en: selectively allocate context memories Michael Chan
@ 2021-02-14 23:04 ` Michael Chan
  2021-02-14 23:04 ` [PATCH net-next 4/7] bnxt_en: Add context memory initialization infrastructure Michael Chan
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Michael Chan @ 2021-02-14 23:04 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

During some fatal firmware error conditions, the PCI config space
register 0x2e which normally contains the subsystem ID will become
0xffff.  This register will revert back to the normal value after
the chip has completed core reset.  If we detect this condition,
we can poll this config register immediately for the value to revert.
Because we use config read cycles to poll this register, there is no
possibility of Master Abort if we happen to read it during core reset.
This speeds up recovery significantly as we don't have to wait for the
conservative min_time before polling MMIO to see if the firmware has
come out of reset.  As soon as this register changes value we can
proceed to re-initialize the device.

Reviewed-by: Edwin Peer <edwin.peer@broadcom.com>
Reviewed-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
Reviewed-by: Andy Gospodarek <gospo@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 19 +++++++++++++++++++
 1 file changed, 19 insertions(+)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7512879a551a..f2cf89d61eb2 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -10935,6 +10935,11 @@ static void bnxt_fw_reset_close(struct bnxt *bp)
 	 * kernel memory.
 	 */
 	if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
+		u16 val = 0;
+
+		pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID, &val);
+		if (val == 0xffff)
+			bp->fw_reset_min_dsecs = 0;
 		bnxt_tx_disable(bp);
 		bnxt_disable_napi(bp);
 		bnxt_disable_int_sync(bp);
@@ -11620,6 +11625,20 @@ static void bnxt_fw_reset_task(struct work_struct *work)
 		if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) {
 			u32 val;
 
+			if (!bp->fw_reset_min_dsecs) {
+				u16 val;
+
+				pci_read_config_word(bp->pdev, PCI_SUBSYSTEM_ID,
+						     &val);
+				if (val == 0xffff) {
+					if (bnxt_fw_reset_timeout(bp)) {
+						netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n");
+						goto fw_reset_abort;
+					}
+					bnxt_queue_fw_reset_work(bp, HZ / 1000);
+					return;
+				}
+			}
 			val = bnxt_fw_health_readl(bp,
 						   BNXT_FW_RESET_INPROG_REG);
 			if (val)
-- 
2.18.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH net-next 4/7] bnxt_en: Add context memory initialization infrastructure.
  2021-02-14 23:04 [PATCH net-next 0/7] bnxt_en: Error recovery optimizations Michael Chan
                   ` (2 preceding siblings ...)
  2021-02-14 23:04 ` [PATCH net-next 3/7] bnxt_en: Implement faster recovery for firmware fatal error Michael Chan
@ 2021-02-14 23:04 ` Michael Chan
  2021-02-14 23:04 ` [PATCH net-next 5/7] bnxt_en: Initialize "context kind" field for context memory blocks Michael Chan
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Michael Chan @ 2021-02-14 23:04 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

Currently, the driver calls memset() to set all relevant context memory
used by the chip to the initial value.  This can take many milliseconds
with the potentially large number of context pages allocated for the
chip.

To make this faster, we only need to initialize the "context kind" field
of each block of context memory.  This patch sets up the infrastructure
to do that with the bnxt_mem_init structure.  In the next patch, we'll
add the logic to obtain the offset of the "context kind" from the
firmware.  This patch is not changing the current behavior of calling
memset() to initialize all relevant context memory.

Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
Reviewed-by: Edwin Peer <edwin.peer@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 52 ++++++++++++++++-------
 drivers/net/ethernet/broadcom/bnxt/bnxt.h | 19 ++++++++-
 2 files changed, 53 insertions(+), 18 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index f2cf89d61eb2..888466c3ed78 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2747,8 +2747,8 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
 		if (!rmem->pg_arr[i])
 			return -ENOMEM;
 
-		if (rmem->init_val)
-			memset(rmem->pg_arr[i], rmem->init_val,
+		if (rmem->mem_init && rmem->mem_init->init_val)
+			memset(rmem->pg_arr[i], rmem->mem_init->init_val,
 			       rmem->page_size);
 		if (rmem->nr_pages > 1 || rmem->depth > 0) {
 			if (i == rmem->nr_pages - 2 &&
@@ -6750,6 +6750,19 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp)
 	return rc;
 }
 
+static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
+			struct hwrm_func_backing_store_qcaps_output *resp)
+{
+	struct bnxt_mem_init *mem_init;
+	u8 init_val;
+	int i;
+
+	init_val = resp->ctx_kind_initializer;
+	mem_init = &ctx->mem_init[0];
+	for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++)
+		mem_init->init_val = init_val;
+}
+
 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
 {
 	struct hwrm_func_backing_store_qcaps_input req = {0};
@@ -6804,7 +6817,9 @@ static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
 			le16_to_cpu(resp->mrav_num_entries_units);
 		ctx->tim_entry_size = le16_to_cpu(resp->tim_entry_size);
 		ctx->tim_max_entries = le32_to_cpu(resp->tim_max_entries);
-		ctx->ctx_kind_initializer = resp->ctx_kind_initializer;
+
+		bnxt_init_ctx_initializer(ctx, resp);
+
 		ctx->tqm_fp_rings_count = resp->tqm_fp_rings_count;
 		if (!ctx->tqm_fp_rings_count)
 			ctx->tqm_fp_rings_count = bp->max_q;
@@ -6981,7 +6996,7 @@ static int bnxt_alloc_ctx_mem_blk(struct bnxt *bp,
 
 static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
 				  struct bnxt_ctx_pg_info *ctx_pg, u32 mem_size,
-				  u8 depth, bool use_init_val)
+				  u8 depth, struct bnxt_mem_init *mem_init)
 {
 	struct bnxt_ring_mem_info *rmem = &ctx_pg->ring_mem;
 	int rc;
@@ -7019,8 +7034,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
 			rmem->pg_tbl_map = ctx_pg->ctx_dma_arr[i];
 			rmem->depth = 1;
 			rmem->nr_pages = MAX_CTX_PAGES;
-			if (use_init_val)
-				rmem->init_val = bp->ctx->ctx_kind_initializer;
+			rmem->mem_init = mem_init;
 			if (i == (nr_tbls - 1)) {
 				int rem = ctx_pg->nr_pages % MAX_CTX_PAGES;
 
@@ -7035,8 +7049,7 @@ static int bnxt_alloc_ctx_pg_tbls(struct bnxt *bp,
 		rmem->nr_pages = DIV_ROUND_UP(mem_size, BNXT_PAGE_SIZE);
 		if (rmem->nr_pages > 1 || depth)
 			rmem->depth = 1;
-		if (use_init_val)
-			rmem->init_val = bp->ctx->ctx_kind_initializer;
+		rmem->mem_init = mem_init;
 		rc = bnxt_alloc_ctx_mem_blk(bp, ctx_pg);
 	}
 	return rc;
@@ -7100,6 +7113,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
 {
 	struct bnxt_ctx_pg_info *ctx_pg;
 	struct bnxt_ctx_mem_info *ctx;
+	struct bnxt_mem_init *init;
 	u32 mem_size, ena, entries;
 	u32 entries_sp, min;
 	u32 num_mr, num_ah;
@@ -7129,7 +7143,8 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
 			  extra_qps;
 	if (ctx->qp_entry_size) {
 		mem_size = ctx->qp_entry_size * ctx_pg->entries;
-		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
+		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
+		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
 		if (rc)
 			return rc;
 	}
@@ -7138,7 +7153,8 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
 	ctx_pg->entries = ctx->srq_max_l2_entries + extra_srqs;
 	if (ctx->srq_entry_size) {
 		mem_size = ctx->srq_entry_size * ctx_pg->entries;
-		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
+		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ];
+		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
 		if (rc)
 			return rc;
 	}
@@ -7147,7 +7163,8 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
 	ctx_pg->entries = ctx->cq_max_l2_entries + extra_qps * 2;
 	if (ctx->cq_entry_size) {
 		mem_size = ctx->cq_entry_size * ctx_pg->entries;
-		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, true);
+		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_CQ];
+		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, pg_lvl, init);
 		if (rc)
 			return rc;
 	}
@@ -7157,7 +7174,8 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
 			  ctx->vnic_max_ring_table_entries;
 	if (ctx->vnic_entry_size) {
 		mem_size = ctx->vnic_entry_size * ctx_pg->entries;
-		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
+		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC];
+		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
 		if (rc)
 			return rc;
 	}
@@ -7166,7 +7184,8 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
 	ctx_pg->entries = ctx->stat_max_entries;
 	if (ctx->stat_entry_size) {
 		mem_size = ctx->stat_entry_size * ctx_pg->entries;
-		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, true);
+		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_STAT];
+		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, init);
 		if (rc)
 			return rc;
 	}
@@ -7184,7 +7203,8 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
 	ctx_pg->entries = num_mr + num_ah;
 	if (ctx->mrav_entry_size) {
 		mem_size = ctx->mrav_entry_size * ctx_pg->entries;
-		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, true);
+		init = &ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV];
+		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 2, init);
 		if (rc)
 			return rc;
 	}
@@ -7198,7 +7218,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
 	ctx_pg->entries = ctx->qp_mem.entries;
 	if (ctx->tim_entry_size) {
 		mem_size = ctx->tim_entry_size * ctx_pg->entries;
-		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, false);
+		rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1, NULL);
 		if (rc)
 			return rc;
 	}
@@ -7218,7 +7238,7 @@ static int bnxt_alloc_ctx_mem(struct bnxt *bp)
 		if (ctx->tqm_entry_size) {
 			mem_size = ctx->tqm_entry_size * ctx_pg->entries;
 			rc = bnxt_alloc_ctx_pg_tbls(bp, ctx_pg, mem_size, 1,
-						    false);
+						    NULL);
 			if (rc)
 				return rc;
 		}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 36007b0d1177..f5c45ea52b44 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -714,6 +714,13 @@ struct bnxt_sw_rx_agg_bd {
 	dma_addr_t		mapping;
 };
 
+struct bnxt_mem_init {
+	u8	init_val;
+	u16	offset;
+#define	BNXT_MEM_INVALID_OFFSET	0xffff
+	u16	size;
+};
+
 struct bnxt_ring_mem_info {
 	int			nr_pages;
 	int			page_size;
@@ -723,7 +730,7 @@ struct bnxt_ring_mem_info {
 #define BNXT_RMEM_USE_FULL_PAGE_FLAG	4
 
 	u16			depth;
-	u8			init_val;
+	struct bnxt_mem_init	*mem_init;
 
 	void			**pg_arr;
 	dma_addr_t		*dma_arr;
@@ -1474,7 +1481,6 @@ struct bnxt_ctx_mem_info {
 	u32	tim_max_entries;
 	u16	mrav_num_entries_units;
 	u8	tqm_entries_multiple;
-	u8	ctx_kind_initializer;
 	u8	tqm_fp_rings_count;
 
 	u32	flags;
@@ -1488,6 +1494,15 @@ struct bnxt_ctx_mem_info {
 	struct bnxt_ctx_pg_info mrav_mem;
 	struct bnxt_ctx_pg_info tim_mem;
 	struct bnxt_ctx_pg_info *tqm_mem[BNXT_MAX_TQM_RINGS];
+
+#define BNXT_CTX_MEM_INIT_QP	0
+#define BNXT_CTX_MEM_INIT_SRQ	1
+#define BNXT_CTX_MEM_INIT_CQ	2
+#define BNXT_CTX_MEM_INIT_VNIC	3
+#define BNXT_CTX_MEM_INIT_STAT	4
+#define BNXT_CTX_MEM_INIT_MRAV	5
+#define BNXT_CTX_MEM_INIT_MAX	6
+	struct bnxt_mem_init	mem_init[BNXT_CTX_MEM_INIT_MAX];
 };
 
 struct bnxt_fw_health {
-- 
2.18.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH net-next 5/7] bnxt_en: Initialize "context kind" field for context memory blocks.
  2021-02-14 23:04 [PATCH net-next 0/7] bnxt_en: Error recovery optimizations Michael Chan
                   ` (3 preceding siblings ...)
  2021-02-14 23:04 ` [PATCH net-next 4/7] bnxt_en: Add context memory initialization infrastructure Michael Chan
@ 2021-02-14 23:04 ` Michael Chan
  2021-02-14 23:05 ` [PATCH net-next 6/7] bnxt_en: Reply to firmware's echo request async message Michael Chan
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Michael Chan @ 2021-02-14 23:04 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

If firmware provides the offset to the "context kind" field of the
relevant context memory blocks, we'll initialize just that field for
each block instead of initializing all of context memory.

Populate the bnxt_mem_init structure with the proper offset returned
by firmware.  If it is older firmware and the information is not
available, we set the offset to an invalid value and fall back to
the old behavior of initializing every byte.  Otherwise, we initialize
only the "context kind" byte at the offset.

Reviewed-by: Edwin Peer <edwin.peer@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 47 ++++++++++++++++++++---
 1 file changed, 42 insertions(+), 5 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 888466c3ed78..2bd9358c11e0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2688,6 +2688,23 @@ static void bnxt_free_skbs(struct bnxt *bp)
 	bnxt_free_rx_skbs(bp);
 }
 
+static void bnxt_init_ctx_mem(struct bnxt_mem_init *mem_init, void *p, int len)
+{
+	u8 init_val = mem_init->init_val;
+	u16 offset = mem_init->offset;
+	u8 *p2 = p;
+	int i;
+
+	if (!init_val)
+		return;
+	if (offset == BNXT_MEM_INVALID_OFFSET) {
+		memset(p, init_val, len);
+		return;
+	}
+	for (i = 0; i < len; i += mem_init->size)
+		*(p2 + i + offset) = init_val;
+}
+
 static void bnxt_free_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
 {
 	struct pci_dev *pdev = bp->pdev;
@@ -2747,9 +2764,9 @@ static int bnxt_alloc_ring(struct bnxt *bp, struct bnxt_ring_mem_info *rmem)
 		if (!rmem->pg_arr[i])
 			return -ENOMEM;
 
-		if (rmem->mem_init && rmem->mem_init->init_val)
-			memset(rmem->pg_arr[i], rmem->mem_init->init_val,
-			       rmem->page_size);
+		if (rmem->mem_init)
+			bnxt_init_ctx_mem(rmem->mem_init, rmem->pg_arr[i],
+					  rmem->page_size);
 		if (rmem->nr_pages > 1 || rmem->depth > 0) {
 			if (i == rmem->nr_pages - 2 &&
 			    (rmem->flags & BNXT_RMEM_RING_PTE_FLAG))
@@ -6754,13 +6771,33 @@ static void bnxt_init_ctx_initializer(struct bnxt_ctx_mem_info *ctx,
 			struct hwrm_func_backing_store_qcaps_output *resp)
 {
 	struct bnxt_mem_init *mem_init;
+	u16 init_mask;
 	u8 init_val;
+	u8 *offset;
 	int i;
 
 	init_val = resp->ctx_kind_initializer;
-	mem_init = &ctx->mem_init[0];
-	for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++)
+	init_mask = le16_to_cpu(resp->ctx_init_mask);
+	offset = &resp->qp_init_offset;
+	mem_init = &ctx->mem_init[BNXT_CTX_MEM_INIT_QP];
+	for (i = 0; i < BNXT_CTX_MEM_INIT_MAX; i++, mem_init++, offset++) {
 		mem_init->init_val = init_val;
+		mem_init->offset = BNXT_MEM_INVALID_OFFSET;
+		if (!init_mask)
+			continue;
+		if (i == BNXT_CTX_MEM_INIT_STAT)
+			offset = &resp->stat_init_offset;
+		if (init_mask & (1 << i))
+			mem_init->offset = *offset * 4;
+		else
+			mem_init->init_val = 0;
+	}
+	ctx->mem_init[BNXT_CTX_MEM_INIT_QP].size = ctx->qp_entry_size;
+	ctx->mem_init[BNXT_CTX_MEM_INIT_SRQ].size = ctx->srq_entry_size;
+	ctx->mem_init[BNXT_CTX_MEM_INIT_CQ].size = ctx->cq_entry_size;
+	ctx->mem_init[BNXT_CTX_MEM_INIT_VNIC].size = ctx->vnic_entry_size;
+	ctx->mem_init[BNXT_CTX_MEM_INIT_STAT].size = ctx->stat_entry_size;
+	ctx->mem_init[BNXT_CTX_MEM_INIT_MRAV].size = ctx->mrav_entry_size;
 }
 
 static int bnxt_hwrm_func_backing_store_qcaps(struct bnxt *bp)
-- 
2.18.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH net-next 6/7] bnxt_en: Reply to firmware's echo request async message.
  2021-02-14 23:04 [PATCH net-next 0/7] bnxt_en: Error recovery optimizations Michael Chan
                   ` (4 preceding siblings ...)
  2021-02-14 23:04 ` [PATCH net-next 5/7] bnxt_en: Initialize "context kind" field for context memory blocks Michael Chan
@ 2021-02-14 23:05 ` Michael Chan
  2021-02-14 23:05 ` [PATCH net-next 7/7] bnxt_en: Improve logging of error recovery settings information Michael Chan
  2021-02-15  1:40 ` [PATCH net-next 0/7] bnxt_en: Error recovery optimizations patchwork-bot+netdevbpf
  7 siblings, 0 replies; 9+ messages in thread
From: Michael Chan @ 2021-02-14 23:05 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

This is a new async message that the firmware can send to check if it
can communicate with the driver.  This is an added error detection
scheme that firmware can use if it suspects errors in the PCIe
interface.  When the driver receives this async message, it will reply
back echoing some data in the async message.  If the firmware is not
getting the reply with the proper data after some retries, error
recovery will kick in.

Reviewed-by: Andy Gospodarek <gospo@broadcom.com>
Reviewed-by: Edwin Peer <edwin.peer@broadcom.com>
Reviewed-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 29 +++++++++++++++++++++++
 drivers/net/ethernet/broadcom/bnxt/bnxt.h |  3 +++
 2 files changed, 32 insertions(+)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 2bd9358c11e0..65771be802af 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -257,6 +257,7 @@ static const u16 bnxt_async_events_arr[] = {
 	ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY,
 	ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION,
 	ASYNC_EVENT_CMPL_EVENT_ID_RING_MONITOR_MSG,
+	ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST,
 };
 
 static struct workqueue_struct *bnxt_pf_wq;
@@ -2099,6 +2100,20 @@ static int bnxt_async_event_process(struct bnxt *bp,
 		bnxt_sched_reset(bp, rxr);
 		goto async_event_process_exit;
 	}
+	case ASYNC_EVENT_CMPL_EVENT_ID_ECHO_REQUEST: {
+		struct bnxt_fw_health *fw_health = bp->fw_health;
+
+		netif_notice(bp, hw, bp->dev,
+			     "Received firmware echo request, data1: 0x%x, data2: 0x%x\n",
+			     data1, data2);
+		if (fw_health) {
+			fw_health->echo_req_data1 = data1;
+			fw_health->echo_req_data2 = data2;
+			set_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event);
+			break;
+		}
+		goto async_event_process_exit;
+	}
 	default:
 		goto async_event_process_exit;
 	}
@@ -11208,6 +11223,17 @@ static void bnxt_init_ethtool_link_settings(struct bnxt *bp)
 		link_info->req_flow_ctrl = link_info->force_pause_setting;
 }
 
+static void bnxt_fw_echo_reply(struct bnxt *bp)
+{
+	struct bnxt_fw_health *fw_health = bp->fw_health;
+	struct hwrm_func_echo_response_input req = {0};
+
+	bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_ECHO_RESPONSE, -1, -1);
+	req.event_data1 = cpu_to_le32(fw_health->echo_req_data1);
+	req.event_data2 = cpu_to_le32(fw_health->echo_req_data2);
+	hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+}
+
 static void bnxt_sp_task(struct work_struct *work)
 {
 	struct bnxt *bp = container_of(work, struct bnxt, sp_task);
@@ -11275,6 +11301,9 @@ static void bnxt_sp_task(struct work_struct *work)
 	if (test_and_clear_bit(BNXT_RING_COAL_NOW_SP_EVENT, &bp->sp_event))
 		bnxt_chk_missed_irq(bp);
 
+	if (test_and_clear_bit(BNXT_FW_ECHO_REQUEST_SP_EVENT, &bp->sp_event))
+		bnxt_fw_echo_reply(bp);
+
 	/* These functions below will clear BNXT_STATE_IN_SP_TASK.  They
 	 * must be the last functions to be called before exiting.
 	 */
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index f5c45ea52b44..1259e68cba2a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1531,6 +1531,8 @@ struct bnxt_fw_health {
 	u32 fw_reset_seq_regs[16];
 	u32 fw_reset_seq_vals[16];
 	u32 fw_reset_seq_delay_msec[16];
+	u32 echo_req_data1;
+	u32 echo_req_data2;
 	struct devlink_health_reporter	*fw_reporter;
 	struct devlink_health_reporter *fw_reset_reporter;
 	struct devlink_health_reporter *fw_fatal_reporter;
@@ -1940,6 +1942,7 @@ struct bnxt {
 #define BNXT_FW_RESET_NOTIFY_SP_EVENT	18
 #define BNXT_FW_EXCEPTION_SP_EVENT	19
 #define BNXT_LINK_CFG_CHANGE_SP_EVENT	21
+#define BNXT_FW_ECHO_REQUEST_SP_EVENT	23
 
 	struct delayed_work	fw_reset_task;
 	int			fw_reset_state;
-- 
2.18.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH net-next 7/7] bnxt_en: Improve logging of error recovery settings information.
  2021-02-14 23:04 [PATCH net-next 0/7] bnxt_en: Error recovery optimizations Michael Chan
                   ` (5 preceding siblings ...)
  2021-02-14 23:05 ` [PATCH net-next 6/7] bnxt_en: Reply to firmware's echo request async message Michael Chan
@ 2021-02-14 23:05 ` Michael Chan
  2021-02-15  1:40 ` [PATCH net-next 0/7] bnxt_en: Error recovery optimizations patchwork-bot+netdevbpf
  7 siblings, 0 replies; 9+ messages in thread
From: Michael Chan @ 2021-02-14 23:05 UTC (permalink / raw)
  To: davem; +Cc: netdev, kuba, gospo

We currently only log the error recovery settings if it is enabled.
In some cases, firmware disables error recovery after it was
initially enabled.  Without logging anything, the user will not be
aware of this change in setting.

Log it when error recovery is disabled.  Also, change the reset count
value from hexadecimal to decimal.

Reviewed-by: Edwin Peer <edwin.peer@broadcom.com>
Reviewed-by: Pavan Chebbi <pavan.chebbi@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 65771be802af..d0f3f68faa91 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -2055,14 +2055,11 @@ static int bnxt_async_event_process(struct bnxt *bp,
 
 		fw_health->enabled = EVENT_DATA1_RECOVERY_ENABLED(data1);
 		fw_health->master = EVENT_DATA1_RECOVERY_MASTER_FUNC(data1);
-		if (!fw_health->enabled)
+		if (!fw_health->enabled) {
+			netif_info(bp, drv, bp->dev,
+				   "Error recovery info: error recovery[0]\n");
 			break;
-
-		netif_info(bp, drv, bp->dev,
-			   "Error recovery info: error recovery[%d], master[%d], reset count[0x%x], health status: 0x%x\n",
-			   fw_health->enabled, fw_health->master,
-			   bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG),
-			   bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
+		}
 		fw_health->tmr_multiplier =
 			DIV_ROUND_UP(fw_health->polling_dsecs * HZ,
 				     bp->current_interval * 10);
@@ -2071,6 +2068,10 @@ static int bnxt_async_event_process(struct bnxt *bp,
 			bnxt_fw_health_readl(bp, BNXT_FW_HEARTBEAT_REG);
 		fw_health->last_fw_reset_cnt =
 			bnxt_fw_health_readl(bp, BNXT_FW_RESET_CNT_REG);
+		netif_info(bp, drv, bp->dev,
+			   "Error recovery info: error recovery[1], master[%d], reset count[%u], health status: 0x%x\n",
+			   fw_health->master, fw_health->last_fw_reset_cnt,
+			   bnxt_fw_health_readl(bp, BNXT_FW_HEALTH_REG));
 		goto async_event_process_exit;
 	}
 	case ASYNC_EVENT_CMPL_EVENT_ID_DEBUG_NOTIFICATION:
-- 
2.18.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH net-next 0/7] bnxt_en: Error recovery optimizations.
  2021-02-14 23:04 [PATCH net-next 0/7] bnxt_en: Error recovery optimizations Michael Chan
                   ` (6 preceding siblings ...)
  2021-02-14 23:05 ` [PATCH net-next 7/7] bnxt_en: Improve logging of error recovery settings information Michael Chan
@ 2021-02-15  1:40 ` patchwork-bot+netdevbpf
  7 siblings, 0 replies; 9+ messages in thread
From: patchwork-bot+netdevbpf @ 2021-02-15  1:40 UTC (permalink / raw)
  To: Michael Chan; +Cc: davem, netdev, kuba, gospo

Hello:

This series was applied to netdev/net-next.git (refs/heads/master):

On Sun, 14 Feb 2021 18:04:54 -0500 you wrote:
> This series implements some optimizations to error recovery.  One
> patch adds an echo/reply mechanism with firmware to enhance error
> detection.  The other patches speed up the recovery process by
> polling config space earlier and to selectively initialize
> context memory during re-initialization.
> 
> Edwin Peer (1):
>   bnxt_en: selectively allocate context memories
> 
> [...]

Here is the summary with links:
  - [net-next,1/7] bnxt_en: Update firmware interface spec to 1.10.2.16.
    https://git.kernel.org/netdev/net-next/c/31f67c2ee055
  - [net-next,2/7] bnxt_en: selectively allocate context memories
    https://git.kernel.org/netdev/net-next/c/be6d755f3d0c
  - [net-next,3/7] bnxt_en: Implement faster recovery for firmware fatal error.
    https://git.kernel.org/netdev/net-next/c/dab62e7c2de7
  - [net-next,4/7] bnxt_en: Add context memory initialization infrastructure.
    https://git.kernel.org/netdev/net-next/c/e9696ff33c79
  - [net-next,5/7] bnxt_en: Initialize "context kind" field for context memory blocks.
    https://git.kernel.org/netdev/net-next/c/41435c394000
  - [net-next,6/7] bnxt_en: Reply to firmware's echo request async message.
    https://git.kernel.org/netdev/net-next/c/df97b34d3ace
  - [net-next,7/7] bnxt_en: Improve logging of error recovery settings information.
    https://git.kernel.org/netdev/net-next/c/f4d95c3c194d

You are awesome, thank you!
--
Deet-doot-dot, I am a bot.
https://korg.docs.kernel.org/patchwork/pwbot.html



^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2021-02-15  1:41 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-14 23:04 [PATCH net-next 0/7] bnxt_en: Error recovery optimizations Michael Chan
2021-02-14 23:04 ` [PATCH net-next 1/7] bnxt_en: Update firmware interface spec to 1.10.2.16 Michael Chan
2021-02-14 23:04 ` [PATCH net-next 2/7] bnxt_en: selectively allocate context memories Michael Chan
2021-02-14 23:04 ` [PATCH net-next 3/7] bnxt_en: Implement faster recovery for firmware fatal error Michael Chan
2021-02-14 23:04 ` [PATCH net-next 4/7] bnxt_en: Add context memory initialization infrastructure Michael Chan
2021-02-14 23:04 ` [PATCH net-next 5/7] bnxt_en: Initialize "context kind" field for context memory blocks Michael Chan
2021-02-14 23:05 ` [PATCH net-next 6/7] bnxt_en: Reply to firmware's echo request async message Michael Chan
2021-02-14 23:05 ` [PATCH net-next 7/7] bnxt_en: Improve logging of error recovery settings information Michael Chan
2021-02-15  1:40 ` [PATCH net-next 0/7] bnxt_en: Error recovery optimizations patchwork-bot+netdevbpf

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.