From mboxrd@z Thu Jan 1 00:00:00 1970 From: Rasesh Mody Subject: [PATCH v4 41/62] net/qede/base: add support for previous driver unload Date: Mon, 27 Mar 2017 23:52:11 -0700 Message-ID: <1490683952-24919-42-git-send-email-rasesh.mody@cavium.com> References: <798af029-9a26-9065-350b-48781c1d3c55@intel.com> Mime-Version: 1.0 Content-Type: text/plain Cc: Rasesh Mody To: , Return-path: Received: from mx0b-0016ce01.pphosted.com (mx0a-0016ce01.pphosted.com [67.231.148.157]) by dpdk.org (Postfix) with ESMTP id 140F6101B for ; Tue, 28 Mar 2017 08:55:07 +0200 (CEST) In-Reply-To: <798af029-9a26-9065-350b-48781c1d3c55@intel.com> List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" New driver/management fw load request sequence for handling previous driver unload. Signed-off-by: Rasesh Mody --- drivers/net/qede/base/ecore.h | 13 ++ drivers/net/qede/base/ecore_dev.c | 43 ++-- drivers/net/qede/base/ecore_dev_api.h | 30 ++- drivers/net/qede/base/ecore_mcp.c | 369 ++++++++++++++++++++++++++++++--- drivers/net/qede/base/ecore_mcp.h | 40 ++-- drivers/net/qede/base/mcp_public.h | 56 ++++- drivers/net/qede/qede_main.c | 2 + 7 files changed, 482 insertions(+), 71 deletions(-) diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h index acf2244..60a8a6b 100644 --- a/drivers/net/qede/base/ecore.h +++ b/drivers/net/qede/base/ecore.h @@ -28,6 +28,19 @@ #include "ecore_proto_if.h" #include "mcp_public.h" +#define ECORE_MAJOR_VERSION 8 +#define ECORE_MINOR_VERSION 18 +#define ECORE_REVISION_VERSION 7 +#define ECORE_ENGINEERING_VERSION 0 + +#define ECORE_VERSION \ + ((ECORE_MAJOR_VERSION << 24) | (ECORE_MINOR_VERSION << 16) | \ + (ECORE_REVISION_VERSION << 8) | ECORE_ENGINEERING_VERSION) + +#define STORM_FW_VERSION \ + ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \ + (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION) + #define MAX_HWFNS_PER_DEVICE 2 #define NAME_SIZE 128 /* @DPDK */ #define ECORE_WFQ_UNIT 100 diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c index 6d75e60..29dd292 100644 --- a/drivers/net/qede/base/ecore_dev.c +++ b/drivers/net/qede/base/ecore_dev.c @@ -1901,10 +1901,11 @@ enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn, enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, struct ecore_hw_init_params *p_params) { - enum _ecore_status_t rc = ECORE_SUCCESS, mfw_rc; + struct ecore_load_req_params load_req_params; u32 load_code, param, drv_mb_param; - bool b_default_mtu = true; struct ecore_hwfn *p_hwfn; + bool b_default_mtu = true; + enum _ecore_status_t rc = ECORE_SUCCESS, mfw_rc; int i; if ((p_params->int_mode == ECORE_INT_MODE_MSI) && @@ -1943,17 +1944,25 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, if (rc != ECORE_SUCCESS) return rc; - /* @@@TBD need to add here: - * Check for fan failure - * Prev_unload - */ - rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code); - if (rc) { + OSAL_MEM_ZERO(&load_req_params, sizeof(load_req_params)); + load_req_params.drv_role = p_params->is_crash_kernel ? + ECORE_DRV_ROLE_KDUMP : + ECORE_DRV_ROLE_OS; + load_req_params.timeout_val = p_params->mfw_timeout_val; + load_req_params.avoid_eng_reset = p_params->avoid_eng_reset; + rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, + &load_req_params); + if (rc != ECORE_SUCCESS) { DP_NOTICE(p_hwfn, true, - "Failed sending LOAD_REQ command\n"); + "Failed sending a LOAD_REQ command\n"); return rc; } + load_code = load_req_params.load_code; + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Load request was sent. Load code: 0x%x\n", + load_code); + /* CQ75580: * When coming back from hiberbate state, the registers from * which shadow is read initially are not initialized. It turns @@ -1966,10 +1975,6 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, */ ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); - DP_VERBOSE(p_hwfn, ECORE_MSG_SP, - "Load request was sent. Resp:0x%x, Load code: 0x%x\n", - rc, load_code); - /* Only relevant for recovery: * Clear the indication after the LOAD_REQ command is responded * by the MFW. @@ -1988,13 +1993,13 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, case FW_MSG_CODE_DRV_LOAD_ENGINE: rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->hw_info.hw_mode); - if (rc) + if (rc != ECORE_SUCCESS) break; /* Fall into */ case FW_MSG_CODE_DRV_LOAD_PORT: rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->hw_info.hw_mode); - if (rc) + if (rc != ECORE_SUCCESS) break; /* Fall into */ case FW_MSG_CODE_DRV_LOAD_FUNCTION: @@ -2006,6 +2011,8 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, p_params->allow_npar_tx_switch); break; default: + DP_NOTICE(p_hwfn, false, + "Unexpected load code [0x%08x]", load_code); rc = ECORE_NOTIMPL; break; } @@ -2021,6 +2028,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, 0, &load_code, ¶m); if (rc != ECORE_SUCCESS) return rc; + if (mfw_rc != ECORE_SUCCESS) { DP_NOTICE(p_hwfn, true, "Failed sending LOAD_DONE command\n"); @@ -2045,10 +2053,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, if (IS_PF(p_dev)) { p_hwfn = ECORE_LEADING_HWFN(p_dev); - drv_mb_param = (FW_MAJOR_VERSION << 24) | - (FW_MINOR_VERSION << 16) | - (FW_REVISION_VERSION << 8) | - (FW_ENGINEERING_VERSION); + drv_mb_param = STORM_FW_VERSION; rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER, drv_mb_param, &load_code, ¶m); diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h index 356c5e4..7e90778 100644 --- a/drivers/net/qede/base/ecore_dev_api.h +++ b/drivers/net/qede/base/ecore_dev_api.h @@ -58,16 +58,38 @@ enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev); void ecore_resc_setup(struct ecore_dev *p_dev); struct ecore_hw_init_params { - /* tunnelling parameters */ + /* Tunnelling parameters */ struct ecore_tunnel_info *p_tunn; + bool b_hw_start; - /* interrupt mode [msix, inta, etc.] to use */ + + /* Interrupt mode [msix, inta, etc.] to use */ enum ecore_int_mode int_mode; -/* npar tx switching to be used for vports configured for tx-switching */ + /* NPAR tx switching to be used for vports configured for tx-switching + */ bool allow_npar_tx_switch; - /* binary fw data pointer in binary fw file */ + + /* Binary fw data pointer in binary fw file */ const u8 *bin_fw_data; + + /* Indicates whether the driver is running over a crash kernel. + * As part of the load request, this will be used for providing the + * driver role to the MFW. + * In case of a crash kernel over PDA - this should be set to false. + */ + bool is_crash_kernel; + + /* The timeout value that the MFW should use when locking the engine for + * the driver load process. + * A value of '0' means the default value, and '255' means no timeout. + */ + u8 mfw_timeout_val; +#define ECORE_LOAD_REQ_LOCK_TO_DEFAULT 0 +#define ECORE_LOAD_REQ_LOCK_TO_NONE 255 + + /* Avoid engine reset when first PF loads on it */ + bool avoid_eng_reset; }; /** diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c index 30cb76e..6c5b5db 100644 --- a/drivers/net/qede/base/ecore_mcp.c +++ b/drivers/net/qede/base/ecore_mcp.c @@ -518,51 +518,368 @@ static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn, } #endif +static bool ecore_mcp_can_force_load(u8 drv_role, u8 exist_drv_role) +{ + return (drv_role == DRV_ROLE_OS && + exist_drv_role == DRV_ROLE_PREBOOT) || + (drv_role == DRV_ROLE_KDUMP && exist_drv_role == DRV_ROLE_OS); +} + +static enum _ecore_status_t ecore_mcp_cancel_load_req(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 resp = 0, param = 0; + enum _ecore_status_t rc; + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CANCEL_LOAD_REQ, 0, + &resp, ¶m); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, false, + "Failed to send cancel load request, rc = %d\n", rc); + + return rc; +} + +#define CONFIG_ECORE_L2_BITMAP_IDX (0x1 << 0) +#define CONFIG_ECORE_SRIOV_BITMAP_IDX (0x1 << 1) +#define CONFIG_ECORE_ROCE_BITMAP_IDX (0x1 << 2) +#define CONFIG_ECORE_IWARP_BITMAP_IDX (0x1 << 3) +#define CONFIG_ECORE_FCOE_BITMAP_IDX (0x1 << 4) +#define CONFIG_ECORE_ISCSI_BITMAP_IDX (0x1 << 5) +#define CONFIG_ECORE_LL2_BITMAP_IDX (0x1 << 6) + +static u32 ecore_get_config_bitmap(void) +{ + u32 config_bitmap = 0x0; + +#ifdef CONFIG_ECORE_L2 + config_bitmap |= CONFIG_ECORE_L2_BITMAP_IDX; +#endif +#ifdef CONFIG_ECORE_SRIOV + config_bitmap |= CONFIG_ECORE_SRIOV_BITMAP_IDX; +#endif +#ifdef CONFIG_ECORE_ROCE + config_bitmap |= CONFIG_ECORE_ROCE_BITMAP_IDX; +#endif +#ifdef CONFIG_ECORE_IWARP + config_bitmap |= CONFIG_ECORE_IWARP_BITMAP_IDX; +#endif +#ifdef CONFIG_ECORE_FCOE + config_bitmap |= CONFIG_ECORE_FCOE_BITMAP_IDX; +#endif +#ifdef CONFIG_ECORE_ISCSI + config_bitmap |= CONFIG_ECORE_ISCSI_BITMAP_IDX; +#endif +#ifdef CONFIG_ECORE_LL2 + config_bitmap |= CONFIG_ECORE_LL2_BITMAP_IDX; +#endif + + return config_bitmap; +} + +struct ecore_load_req_in_params { + u8 hsi_ver; +#define ECORE_LOAD_REQ_HSI_VER_DEFAULT 0 +#define ECORE_LOAD_REQ_HSI_VER_1 1 + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u8 drv_role; + u8 timeout_val; + u8 force_cmd; + bool avoid_eng_reset; +}; + +struct ecore_load_req_out_params { + u32 load_code; + u32 exist_drv_ver_0; + u32 exist_drv_ver_1; + u32 exist_fw_ver; + u8 exist_drv_role; + u8 mfw_hsi_ver; + bool drv_exists; +}; + +static enum _ecore_status_t +__ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_load_req_in_params *p_in_params, + struct ecore_load_req_out_params *p_out_params) +{ + union drv_union_data union_data_src, union_data_dst; + struct ecore_mcp_mb_params mb_params; + struct load_req_stc *p_load_req; + struct load_rsp_stc *p_load_rsp; + u32 hsi_ver; + enum _ecore_status_t rc; + + p_load_req = &union_data_src.load_req; + OSAL_MEM_ZERO(p_load_req, sizeof(*p_load_req)); + p_load_req->drv_ver_0 = p_in_params->drv_ver_0; + p_load_req->drv_ver_1 = p_in_params->drv_ver_1; + p_load_req->fw_ver = p_in_params->fw_ver; + ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_ROLE, + p_in_params->drv_role); + ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_LOCK_TO, + p_in_params->timeout_val); + ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_FORCE, + p_in_params->force_cmd); + ECORE_MFW_SET_FIELD(p_load_req->misc0, LOAD_REQ_FLAGS0, + p_in_params->avoid_eng_reset); + + hsi_ver = (p_in_params->hsi_ver == ECORE_LOAD_REQ_HSI_VER_DEFAULT) ? + DRV_ID_MCP_HSI_VER_CURRENT : + (p_in_params->hsi_ver << DRV_ID_MCP_HSI_VER_SHIFT); + + OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); + mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; + mb_params.param = PDA_COMP | hsi_ver | p_hwfn->p_dev->drv_type; + mb_params.p_data_src = &union_data_src; + mb_params.p_data_dst = &union_data_dst; + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n", + mb_params.param, + ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_INIT_HW), + ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_DRV_TYPE), + ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_MCP_HSI_VER), + ECORE_MFW_GET_FIELD(mb_params.param, DRV_ID_PDA_COMP_VER)); + + if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1) + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n", + p_load_req->drv_ver_0, p_load_req->drv_ver_1, + p_load_req->fw_ver, p_load_req->misc0, + ECORE_MFW_GET_FIELD(p_load_req->misc0, + LOAD_REQ_ROLE), + ECORE_MFW_GET_FIELD(p_load_req->misc0, + LOAD_REQ_LOCK_TO), + ECORE_MFW_GET_FIELD(p_load_req->misc0, + LOAD_REQ_FORCE), + ECORE_MFW_GET_FIELD(p_load_req->misc0, + LOAD_REQ_FLAGS0)); + + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "Failed to send load request, rc = %d\n", rc); + return rc; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Load Response: resp 0x%08x\n", mb_params.mcp_resp); + p_out_params->load_code = mb_params.mcp_resp; + + if (p_in_params->hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 && + p_out_params->load_code != FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { + p_load_rsp = &union_data_dst.load_rsp; + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n", + p_load_rsp->drv_ver_0, p_load_rsp->drv_ver_1, + p_load_rsp->fw_ver, p_load_rsp->misc0, + ECORE_MFW_GET_FIELD(p_load_rsp->misc0, + LOAD_RSP_ROLE), + ECORE_MFW_GET_FIELD(p_load_rsp->misc0, + LOAD_RSP_HSI), + ECORE_MFW_GET_FIELD(p_load_rsp->misc0, + LOAD_RSP_FLAGS0)); + + p_out_params->exist_drv_ver_0 = p_load_rsp->drv_ver_0; + p_out_params->exist_drv_ver_1 = p_load_rsp->drv_ver_1; + p_out_params->exist_fw_ver = p_load_rsp->fw_ver; + p_out_params->exist_drv_role = + ECORE_MFW_GET_FIELD(p_load_rsp->misc0, LOAD_RSP_ROLE); + p_out_params->mfw_hsi_ver = + ECORE_MFW_GET_FIELD(p_load_rsp->misc0, LOAD_RSP_HSI); + p_out_params->drv_exists = + ECORE_MFW_GET_FIELD(p_load_rsp->misc0, + LOAD_RSP_FLAGS0) & + LOAD_RSP_FLAGS0_DRV_EXISTS; + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t eocre_get_mfw_drv_role(struct ecore_hwfn *p_hwfn, + enum ecore_drv_role drv_role, + u8 *p_mfw_drv_role) +{ + switch (drv_role) { + case ECORE_DRV_ROLE_OS: + *p_mfw_drv_role = DRV_ROLE_OS; + break; + case ECORE_DRV_ROLE_KDUMP: + *p_mfw_drv_role = DRV_ROLE_KDUMP; + break; + default: + DP_ERR(p_hwfn, "Unexpected driver role %d\n", drv_role); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +enum ecore_load_req_force { + ECORE_LOAD_REQ_FORCE_NONE, + ECORE_LOAD_REQ_FORCE_PF, + ECORE_LOAD_REQ_FORCE_ALL, +}; + +static enum _ecore_status_t +ecore_get_mfw_force_cmd(struct ecore_hwfn *p_hwfn, + enum ecore_load_req_force force_cmd, + u8 *p_mfw_force_cmd) +{ + switch (force_cmd) { + case ECORE_LOAD_REQ_FORCE_NONE: + *p_mfw_force_cmd = LOAD_REQ_FORCE_NONE; + break; + case ECORE_LOAD_REQ_FORCE_PF: + *p_mfw_force_cmd = LOAD_REQ_FORCE_PF; + break; + case ECORE_LOAD_REQ_FORCE_ALL: + *p_mfw_force_cmd = LOAD_REQ_FORCE_ALL; + break; + default: + DP_ERR(p_hwfn, "Unexpected force value %d\n", force_cmd); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, - u32 *p_load_code) + struct ecore_load_req_params *p_params) { - struct ecore_dev *p_dev = p_hwfn->p_dev; - struct ecore_mcp_mb_params mb_params; + struct ecore_load_req_out_params out_params; + struct ecore_load_req_in_params in_params; + u8 mfw_drv_role, mfw_force_cmd; enum _ecore_status_t rc; #ifndef ASIC_ONLY if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { - ecore_mcp_mf_workaround(p_hwfn, p_load_code); + ecore_mcp_mf_workaround(p_hwfn, &p_params->load_code); return ECORE_SUCCESS; } #endif - OSAL_MEM_ZERO(&mb_params, sizeof(mb_params)); - mb_params.cmd = DRV_MSG_CODE_LOAD_REQ; - mb_params.param = PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT | - p_dev->drv_type; - rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, &mb_params); + OSAL_MEM_ZERO(&in_params, sizeof(in_params)); + in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_DEFAULT; + in_params.drv_ver_0 = ECORE_VERSION; + in_params.drv_ver_1 = ecore_get_config_bitmap(); + in_params.fw_ver = STORM_FW_VERSION; + rc = eocre_get_mfw_drv_role(p_hwfn, p_params->drv_role, &mfw_drv_role); + if (rc != ECORE_SUCCESS) + return rc; - /* if mcp fails to respond we must abort */ - if (rc != ECORE_SUCCESS) { - DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + in_params.drv_role = mfw_drv_role; + in_params.timeout_val = p_params->timeout_val; + rc = ecore_get_mfw_force_cmd(p_hwfn, ECORE_LOAD_REQ_FORCE_NONE, + &mfw_force_cmd); + if (rc != ECORE_SUCCESS) return rc; - } - *p_load_code = mb_params.mcp_resp; + in_params.force_cmd = mfw_force_cmd; + in_params.avoid_eng_reset = p_params->avoid_eng_reset; - /* If MFW refused (e.g. other port is in diagnostic mode) we - * must abort. This can happen in the following cases: - * - Other port is in diagnostic mode - * - Previously loaded function on the engine is not compliant with - * the requester. - * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION. - * - + OSAL_MEM_ZERO(&out_params, sizeof(out_params)); + rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, &out_params); + if (rc != ECORE_SUCCESS) + return rc; + + /* First handle cases where another load request should/might be sent: + * - MFW expects the old interface [HSI version = 1] + * - MFW responds that a force load request is required */ - if (!(*p_load_code) || - ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) || - ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) || - ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) { - DP_ERR(p_hwfn, "MCP refused load request, aborting\n"); + if (out_params.load_code == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1) { + DP_INFO(p_hwfn, + "MFW refused a load request due to HSI > 1. Resending with HSI = 1.\n"); + + /* The previous load request set the mailbox blocking */ + p_hwfn->mcp_info->block_mb_sending = false; + + in_params.hsi_ver = ECORE_LOAD_REQ_HSI_VER_1; + OSAL_MEM_ZERO(&out_params, sizeof(out_params)); + rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, + &out_params); + if (rc != ECORE_SUCCESS) + return rc; + } else if (out_params.load_code == + FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE) { + /* The previous load request set the mailbox blocking */ + p_hwfn->mcp_info->block_mb_sending = false; + + if (ecore_mcp_can_force_load(in_params.drv_role, + out_params.exist_drv_role)) { + DP_INFO(p_hwfn, + "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Sending a force load request.\n", + out_params.exist_drv_role, + out_params.exist_fw_ver, + out_params.exist_drv_ver_0, + out_params.exist_drv_ver_1); + + rc = ecore_get_mfw_force_cmd(p_hwfn, + ECORE_LOAD_REQ_FORCE_ALL, + &mfw_force_cmd); + if (rc != ECORE_SUCCESS) + return rc; + + in_params.force_cmd = mfw_force_cmd; + OSAL_MEM_ZERO(&out_params, sizeof(out_params)); + rc = __ecore_mcp_load_req(p_hwfn, p_ptt, &in_params, + &out_params); + if (rc != ECORE_SUCCESS) + return rc; + } else { + DP_NOTICE(p_hwfn, false, + "A force load is required [existing: role %d, fw_ver 0x%08x, drv_ver 0x%08x_0x%08x]. Avoiding to prevent disruption of active PFs.\n", + out_params.exist_drv_role, + out_params.exist_fw_ver, + out_params.exist_drv_ver_0, + out_params.exist_drv_ver_1); + + ecore_mcp_cancel_load_req(p_hwfn, p_ptt); + return ECORE_BUSY; + } + } + + /* Now handle the other types of responses. + * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not + * expected here after the additional revised load requests were sent. + */ + switch (out_params.load_code) { + case FW_MSG_CODE_DRV_LOAD_ENGINE: + case FW_MSG_CODE_DRV_LOAD_PORT: + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + if (out_params.mfw_hsi_ver != ECORE_LOAD_REQ_HSI_VER_1 && + out_params.drv_exists) { + /* The role and fw/driver version match, but the PF is + * already loaded and has not been unloaded gracefully. + * This is unexpected since a quasi-FLR request was + * previously sent as part of ecore_hw_prepare(). + */ + DP_NOTICE(p_hwfn, false, + "PF is already loaded - shouldn't have got here since a quasi-FLR request was previously sent!\n"); + return ECORE_INVAL; + } + break; + case FW_MSG_CODE_DRV_LOAD_REFUSED_PDA: + case FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG: + case FW_MSG_CODE_DRV_LOAD_REFUSED_HSI: + case FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT: + DP_NOTICE(p_hwfn, false, + "MFW refused a load request [resp 0x%08x]. Aborting.\n", + out_params.load_code); return ECORE_BUSY; + default: + DP_NOTICE(p_hwfn, false, + "Unexpected response to load request [resp 0x%08x]. Aborting.\n", + out_params.load_code); + break; } + p_params->load_code = out_params.load_code; + return ECORE_SUCCESS; } diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h index 7a81516..4138a12 100644 --- a/drivers/net/qede/base/ecore_mcp.h +++ b/drivers/net/qede/base/ecore_mcp.h @@ -136,32 +136,36 @@ enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, * @param p_hwfn - hw function * @param p_ptt - PTT required for register access * @return enum _ecore_status_t - ECORE_SUCCESS - operation - * was successul. + * was successful. */ enum _ecore_status_t ecore_issue_pulse(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); +enum ecore_drv_role { + ECORE_DRV_ROLE_OS, + ECORE_DRV_ROLE_KDUMP, +}; + +struct ecore_load_req_params { + enum ecore_drv_role drv_role; + u8 timeout_val; /* 1..254, '0' - default value, '255' - no timeout */ + bool avoid_eng_reset; + u32 load_code; +}; + /** - * @brief Sends a LOAD_REQ to the MFW, and in case operation - * succeed, returns whether this PF is the first on the - * chip/engine/port or function. This function should be - * called when driver is ready to accept MFW events after - * Storms initializations are done. - * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param p_load_code - The MCP response param containing one - * of the following: - * FW_MSG_CODE_DRV_LOAD_ENGINE - * FW_MSG_CODE_DRV_LOAD_PORT - * FW_MSG_CODE_DRV_LOAD_FUNCTION - * @return enum _ecore_status_t - - * ECORE_SUCCESS - Operation was successul. - * ECORE_BUSY - Operation failed + * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds, + * returns whether this PF is the first on the engine/port or function. + * + * @param p_hwfn + * @param p_pt + * @param p_params + * + * @return enum _ecore_status_t - ECORE_SUCCESS - Operation was successful. */ enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, - u32 *p_load_code); + struct ecore_load_req_params *p_params); /** * @brief Read the MFW mailbox into Current buffer. diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h index d3cbc96..145f5ca 100644 --- a/drivers/net/qede/base/mcp_public.h +++ b/drivers/net/qede/base/mcp_public.h @@ -878,9 +878,11 @@ struct public_func { #define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff #define DRV_ID_PDA_COMP_VER_SHIFT 0 +#define LOAD_REQ_HSI_VERSION 2 #define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 #define DRV_ID_MCP_HSI_VER_SHIFT 16 -#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT) +#define DRV_ID_MCP_HSI_VER_CURRENT (LOAD_REQ_HSI_VERSION << \ + DRV_ID_MCP_HSI_VER_SHIFT) #define DRV_ID_DRV_TYPE_MASK 0x7f000000 #define DRV_ID_DRV_TYPE_SHIFT 24 @@ -1040,8 +1042,47 @@ struct resource_info { #define RESOURCE_ELEMENT_STRICT (1 << 0) }; +#define DRV_ROLE_NONE 0 +#define DRV_ROLE_PREBOOT 1 +#define DRV_ROLE_OS 2 +#define DRV_ROLE_KDUMP 3 + +struct load_req_stc { + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u32 misc0; +#define LOAD_REQ_ROLE_MASK 0x000000FF +#define LOAD_REQ_ROLE_SHIFT 0 +#define LOAD_REQ_LOCK_TO_MASK 0x0000FF00 +#define LOAD_REQ_LOCK_TO_SHIFT 0 /* @DPDK */ +#define LOAD_REQ_LOCK_TO_DEFAULT 0 +#define LOAD_REQ_LOCK_TO_NONE 255 +#define LOAD_REQ_FORCE_MASK 0x000F0000 +#define LOAD_REQ_FORCE_SHIFT 0 /* @DPDK */ +#define LOAD_REQ_FORCE_NONE 0 +#define LOAD_REQ_FORCE_PF 1 +#define LOAD_REQ_FORCE_ALL 2 +#define LOAD_REQ_FLAGS0_MASK 0x00F00000 +#define LOAD_REQ_FLAGS0_SHIFT 0 /* @DPDK */ +#define LOAD_REQ_FLAGS0_AVOID_RESET (0x1 << 0) +}; + +struct load_rsp_stc { + u32 drv_ver_0; + u32 drv_ver_1; + u32 fw_ver; + u32 misc0; +#define LOAD_RSP_ROLE_MASK 0x000000FF +#define LOAD_RSP_ROLE_SHIFT 0 +#define LOAD_RSP_HSI_MASK 0x0000FF00 +#define LOAD_RSP_HSI_SHIFT 8 +#define LOAD_RSP_FLAGS0_MASK 0x000F0000 +#define LOAD_RSP_FLAGS0_SHIFT 16 +#define LOAD_RSP_FLAGS0_DRV_EXISTS (0x1 << 0) +}; + union drv_union_data { - u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; /* LOAD_REQ */ struct mcp_mac wol_mac; /* UNLOAD_DONE */ /* This configuration should be set by the driver for the LINK_SET command. */ @@ -1068,6 +1109,9 @@ union drv_union_data { struct bist_nvm_image_att nvm_image_att; struct mdump_config_stc mdump_config; u32 dword; + + struct load_req_stc load_req; + struct load_rsp_stc load_rsp; /* ... */ }; @@ -1077,6 +1121,7 @@ struct public_drv_mb { #define DRV_MSG_CODE_LOAD_REQ 0x10000000 #define DRV_MSG_CODE_LOAD_DONE 0x11000000 #define DRV_MSG_CODE_INIT_HW 0x12000000 +#define DRV_MSG_CODE_CANCEL_LOAD_REQ 0x13000000 #define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 #define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 #define DRV_MSG_CODE_INIT_PHY 0x22000000 @@ -1448,8 +1493,11 @@ struct public_drv_mb { #define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 #define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 #define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000 -#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1 0x10210000 #define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10230000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE 0x10300000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_REJECT 0x10310000 #define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 #define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000 #define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000 @@ -1547,7 +1595,7 @@ struct public_drv_mb { u32 fw_mb_param; - /* Resource Allocation params - MFW version support*/ +/* Resource Allocation params - MFW version support */ #define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_MASK 0xFFFF0000 #define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT 16 #define FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_MASK 0x0000FFFF diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c index 5c79055..326e56f 100644 --- a/drivers/net/qede/qede_main.c +++ b/drivers/net/qede/qede_main.c @@ -276,6 +276,8 @@ static int qed_slowpath_start(struct ecore_dev *edev, hw_init_params.int_mode = ECORE_INT_MODE_MSIX; hw_init_params.allow_npar_tx_switch = allow_npar_tx_switching; hw_init_params.bin_fw_data = data; + hw_init_params.mfw_timeout_val = ECORE_LOAD_REQ_LOCK_TO_DEFAULT; + hw_init_params.avoid_eng_reset = false; rc = ecore_hw_init(edev, &hw_init_params); if (rc) { DP_ERR(edev, "ecore_hw_init failed\n"); -- 1.7.10.3