From mboxrd@z Thu Jan 1 00:00:00 1970 From: Rasesh Mody Subject: [PATCH v3 32/61] net/qede/base: add tunnelling support for VFs Date: Fri, 24 Mar 2017 00:28:22 -0700 Message-ID: <1490340531-11403-33-git-send-email-rasesh.mody@cavium.com> References: Mime-Version: 1.0 Content-Type: text/plain Cc: Rasesh Mody , To: , Return-path: Received: from mx0b-0016ce01.pphosted.com (mx0b-0016ce01.pphosted.com [67.231.156.153]) by dpdk.org (Postfix) with ESMTP id 54F32D040 for ; Fri, 24 Mar 2017 08:30:51 +0100 (CET) In-Reply-To: List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" Add new tunnelling support for VFs. Signed-off-by: Rasesh Mody --- drivers/net/qede/base/bcm_osal.h | 3 +- drivers/net/qede/base/ecore_dev.c | 15 ++- drivers/net/qede/base/ecore_sp_commands.c | 15 ++- drivers/net/qede/base/ecore_sriov.c | 144 +++++++++++++++++++++++++++ drivers/net/qede/base/ecore_vf.c | 154 +++++++++++++++++++++++++++++ drivers/net/qede/base/ecore_vf.h | 5 + drivers/net/qede/base/ecore_vfpf_if.h | 40 ++++++++ drivers/net/qede/qede_ethdev.c | 49 +++++---- 8 files changed, 390 insertions(+), 35 deletions(-) diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h index 513d542..4c91dc0 100644 --- a/drivers/net/qede/base/bcm_osal.h +++ b/drivers/net/qede/base/bcm_osal.h @@ -422,6 +422,5 @@ void qede_get_mcp_proto_stats(struct ecore_dev *, enum ecore_mcp_protocol_type, #define OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0) #define OSAL_MFW_TLV_REQ(p_hwfn) (0) #define OSAL_MFW_FILL_TLV_DATA(type, buf, data) (0) - - +#define OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, mask, b_update, tunn) 0 #endif /* __BCM_OSAL_H */ diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c index 0d3971c..21fec58 100644 --- a/drivers/net/qede/base/ecore_dev.c +++ b/drivers/net/qede/base/ecore_dev.c @@ -1876,6 +1876,19 @@ static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn, p_hwfn->mcp_info->mfw_mb_length); } +enum _ecore_status_t ecore_vf_start(struct ecore_hwfn *p_hwfn, + struct ecore_hw_init_params *p_params) +{ + if (p_params->p_tunn) { + ecore_vf_set_vf_start_tunn_update_param(p_params->p_tunn); + ecore_vf_pf_tunnel_param_update(p_hwfn, p_params->p_tunn); + } + + p_hwfn->b_int_enabled = 1; + + return ECORE_SUCCESS; +} + enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, struct ecore_hw_init_params *p_params) { @@ -1908,7 +1921,7 @@ enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, } if (IS_VF(p_dev)) { - p_hwfn->b_int_enabled = 1; + ecore_vf_start(p_hwfn, p_params); continue; } diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c index 4cacce8..8fd64d7 100644 --- a/drivers/net/qede/base/ecore_sp_commands.c +++ b/drivers/net/qede/base/ecore_sp_commands.c @@ -22,6 +22,7 @@ #include "ecore_hw.h" #include "ecore_dcbx.h" #include "ecore_sriov.h" +#include "ecore_vf.h" enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent, @@ -137,16 +138,17 @@ static void ecore_set_tunn_cls_info(struct ecore_tunnel_info *p_tun, p_tun->b_update_rx_cls = p_src->b_update_rx_cls; p_tun->b_update_tx_cls = p_src->b_update_tx_cls; + /* @DPDK - typecast tunnul class */ type = ecore_tunn_clss_to_fw_clss(p_src->vxlan.tun_cls); - p_tun->vxlan.tun_cls = type; + p_tun->vxlan.tun_cls = (enum ecore_tunn_clss)type; type = ecore_tunn_clss_to_fw_clss(p_src->l2_gre.tun_cls); - p_tun->l2_gre.tun_cls = type; + p_tun->l2_gre.tun_cls = (enum ecore_tunn_clss)type; type = ecore_tunn_clss_to_fw_clss(p_src->ip_gre.tun_cls); - p_tun->ip_gre.tun_cls = type; + p_tun->ip_gre.tun_cls = (enum ecore_tunn_clss)type; type = ecore_tunn_clss_to_fw_clss(p_src->l2_geneve.tun_cls); - p_tun->l2_geneve.tun_cls = type; + p_tun->l2_geneve.tun_cls = (enum ecore_tunn_clss)type; type = ecore_tunn_clss_to_fw_clss(p_src->ip_geneve.tun_cls); - p_tun->ip_geneve.tun_cls = type; + p_tun->ip_geneve.tun_cls = (enum ecore_tunn_clss)type; } static void ecore_set_tunn_ports(struct ecore_tunnel_info *p_tun, @@ -486,6 +488,9 @@ ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn, struct ecore_sp_init_data init_data; enum _ecore_status_t rc = ECORE_NOTIMPL; + if (IS_VF(p_hwfn->p_dev)) + return ecore_vf_pf_tunnel_param_update(p_hwfn, p_tunn); + if (ECORE_IS_BB_A0(p_hwfn->p_dev)) { DP_NOTICE(p_hwfn, true, "A0 chip: tunnel pf update config is not supported\n"); diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c index 7378420..6cec7b2 100644 --- a/drivers/net/qede/base/ecore_sriov.c +++ b/drivers/net/qede/base/ecore_sriov.c @@ -51,6 +51,7 @@ const char *ecore_channel_tlvs_string[] = { "CHANNEL_TLV_VPORT_UPDATE_RSS", "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN", "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA", + "CHANNEL_TLV_UPDATE_TUNN_PARAM", "CHANNEL_TLV_MAX" }; @@ -2137,6 +2138,146 @@ out: b_legacy_vf); } +static void +ecore_iov_pf_update_tun_response(struct pfvf_update_tunn_param_tlv *p_resp, + struct ecore_tunnel_info *p_tun, + u16 tunn_feature_mask) +{ + p_resp->tunn_feature_mask = tunn_feature_mask; + p_resp->vxlan_mode = p_tun->vxlan.b_mode_enabled; + p_resp->l2geneve_mode = p_tun->l2_geneve.b_mode_enabled; + p_resp->ipgeneve_mode = p_tun->ip_geneve.b_mode_enabled; + p_resp->l2gre_mode = p_tun->l2_gre.b_mode_enabled; + p_resp->ipgre_mode = p_tun->l2_gre.b_mode_enabled; + p_resp->vxlan_clss = p_tun->vxlan.tun_cls; + p_resp->l2gre_clss = p_tun->l2_gre.tun_cls; + p_resp->ipgre_clss = p_tun->ip_gre.tun_cls; + p_resp->l2geneve_clss = p_tun->l2_geneve.tun_cls; + p_resp->ipgeneve_clss = p_tun->ip_geneve.tun_cls; + p_resp->geneve_udp_port = p_tun->geneve_port.port; + p_resp->vxlan_udp_port = p_tun->vxlan_port.port; +} + +static void +__ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, + struct ecore_tunn_update_type *p_tun, + enum ecore_tunn_mode mask, u8 tun_cls) +{ + if (p_req->tun_mode_update_mask & (1 << mask)) { + p_tun->b_update_mode = true; + + if (p_req->tunn_mode & (1 << mask)) + p_tun->b_mode_enabled = true; + } + + p_tun->tun_cls = tun_cls; +} + +static void +ecore_iov_pf_update_tun_param(struct vfpf_update_tunn_param_tlv *p_req, + struct ecore_tunn_update_type *p_tun, + struct ecore_tunn_update_udp_port *p_port, + enum ecore_tunn_mode mask, + u8 tun_cls, u8 update_port, u16 port) +{ + if (update_port) { + p_port->b_update_port = true; + p_port->port = port; + } + + __ecore_iov_pf_update_tun_param(p_req, p_tun, mask, tun_cls); +} + +static bool +ecore_iov_pf_validate_tunn_param(struct vfpf_update_tunn_param_tlv *p_req) +{ + bool b_update_requested = false; + + if (p_req->tun_mode_update_mask || p_req->update_tun_cls || + p_req->update_geneve_port || p_req->update_vxlan_port) + b_update_requested = true; + + return b_update_requested; +} + +static void ecore_iov_vf_mbx_update_tunn_param(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *p_vf) +{ + struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel; + struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct pfvf_update_tunn_param_tlv *p_resp; + struct vfpf_update_tunn_param_tlv *p_req; + enum _ecore_status_t rc = ECORE_SUCCESS; + u8 status = PFVF_STATUS_SUCCESS; + bool b_update_required = false; + struct ecore_tunnel_info tunn; + u16 tunn_feature_mask = 0; + + mbx->offset = (u8 *)mbx->reply_virt; + + OSAL_MEM_ZERO(&tunn, sizeof(tunn)); + p_req = &mbx->req_virt->tunn_param_update; + + if (!ecore_iov_pf_validate_tunn_param(p_req)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "No tunnel update requested by VF\n"); + status = PFVF_STATUS_FAILURE; + goto send_resp; + } + + tunn.b_update_rx_cls = p_req->update_tun_cls; + tunn.b_update_tx_cls = p_req->update_tun_cls; + + ecore_iov_pf_update_tun_param(p_req, &tunn.vxlan, &tunn.vxlan_port, + ECORE_MODE_VXLAN_TUNN, p_req->vxlan_clss, + p_req->update_vxlan_port, + p_req->vxlan_port); + ecore_iov_pf_update_tun_param(p_req, &tunn.l2_geneve, &tunn.geneve_port, + ECORE_MODE_L2GENEVE_TUNN, + p_req->l2geneve_clss, + p_req->update_geneve_port, + p_req->geneve_port); + __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_geneve, + ECORE_MODE_IPGENEVE_TUNN, + p_req->ipgeneve_clss); + __ecore_iov_pf_update_tun_param(p_req, &tunn.l2_gre, + ECORE_MODE_L2GRE_TUNN, + p_req->l2gre_clss); + __ecore_iov_pf_update_tun_param(p_req, &tunn.ip_gre, + ECORE_MODE_IPGRE_TUNN, + p_req->ipgre_clss); + + /* If PF modifies VF's req then it should + * still return an error in case of partial configuration + * or modified configuration as opposed to requested one. + */ + rc = OSAL_PF_VALIDATE_MODIFY_TUNN_CONFIG(p_hwfn, &tunn_feature_mask, + &b_update_required, &tunn); + + if (rc != ECORE_SUCCESS) + status = PFVF_STATUS_FAILURE; + + /* If ECORE client is willing to update anything ? */ + if (b_update_required) { + rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn, + ECORE_SPQ_MODE_EBLOCK, + OSAL_NULL); + if (rc != ECORE_SUCCESS) + status = PFVF_STATUS_FAILURE; + } + +send_resp: + p_resp = ecore_add_tlv(p_hwfn, &mbx->offset, + CHANNEL_TLV_UPDATE_TUNN_PARAM, sizeof(*p_resp)); + + ecore_iov_pf_update_tun_response(p_resp, p_tun, tunn_feature_mask); + ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + ecore_iov_send_response(p_hwfn, p_ptt, p_vf, sizeof(*p_resp), status); +} + static void ecore_iov_vf_mbx_start_txq_resp(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, struct ecore_vf_info *p_vf, @@ -3405,6 +3546,9 @@ void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn, case CHANNEL_TLV_RELEASE: ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); break; + case CHANNEL_TLV_UPDATE_TUNN_PARAM: + ecore_iov_vf_mbx_update_tunn_param(p_hwfn, p_ptt, p_vf); + break; } } else if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) { /* If we've received a message from a VF we consider malicious diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c index 60ecd16..3182621 100644 --- a/drivers/net/qede/base/ecore_vf.c +++ b/drivers/net/qede/base/ecore_vf.c @@ -451,6 +451,160 @@ free_p_iov: #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) +/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */ +static void +__ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, + struct ecore_tunn_update_type *p_src, + enum ecore_tunn_mode mask, u8 *p_cls) +{ + if (p_src->b_update_mode) { + p_req->tun_mode_update_mask |= (1 << mask); + + if (p_src->b_mode_enabled) + p_req->tunn_mode |= (1 << mask); + } + + *p_cls = p_src->tun_cls; +} + +/* @DPDK - changed enum ecore_tunn_clss to enum ecore_tunn_mode */ +static void +ecore_vf_prep_tunn_req_tlv(struct vfpf_update_tunn_param_tlv *p_req, + struct ecore_tunn_update_type *p_src, + enum ecore_tunn_mode mask, u8 *p_cls, + struct ecore_tunn_update_udp_port *p_port, + u8 *p_update_port, u16 *p_udp_port) +{ + if (p_port->b_update_port) { + *p_update_port = 1; + *p_udp_port = p_port->port; + } + + __ecore_vf_prep_tunn_req_tlv(p_req, p_src, mask, p_cls); +} + +void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun) +{ + if (p_tun->vxlan.b_mode_enabled) + p_tun->vxlan.b_update_mode = true; + if (p_tun->l2_geneve.b_mode_enabled) + p_tun->l2_geneve.b_update_mode = true; + if (p_tun->ip_geneve.b_mode_enabled) + p_tun->ip_geneve.b_update_mode = true; + if (p_tun->l2_gre.b_mode_enabled) + p_tun->l2_gre.b_update_mode = true; + if (p_tun->ip_gre.b_mode_enabled) + p_tun->ip_gre.b_update_mode = true; + + p_tun->b_update_rx_cls = true; + p_tun->b_update_tx_cls = true; +} + +static void +__ecore_vf_update_tunn_param(struct ecore_tunn_update_type *p_tun, + u16 feature_mask, u8 tunn_mode, u8 tunn_cls, + enum ecore_tunn_mode val) +{ + if (feature_mask & (1 << val)) { + p_tun->b_mode_enabled = tunn_mode; + p_tun->tun_cls = tunn_cls; + } else { + p_tun->b_mode_enabled = false; + } +} + +static void +ecore_vf_update_tunn_param(struct ecore_hwfn *p_hwfn, + struct ecore_tunnel_info *p_tun, + struct pfvf_update_tunn_param_tlv *p_resp) +{ + /* Update mode and classes provided by PF */ + u16 feat_mask = p_resp->tunn_feature_mask; + + __ecore_vf_update_tunn_param(&p_tun->vxlan, feat_mask, + p_resp->vxlan_mode, p_resp->vxlan_clss, + ECORE_MODE_VXLAN_TUNN); + __ecore_vf_update_tunn_param(&p_tun->l2_geneve, feat_mask, + p_resp->l2geneve_mode, + p_resp->l2geneve_clss, + ECORE_MODE_L2GENEVE_TUNN); + __ecore_vf_update_tunn_param(&p_tun->ip_geneve, feat_mask, + p_resp->ipgeneve_mode, + p_resp->ipgeneve_clss, + ECORE_MODE_IPGENEVE_TUNN); + __ecore_vf_update_tunn_param(&p_tun->l2_gre, feat_mask, + p_resp->l2gre_mode, p_resp->l2gre_clss, + ECORE_MODE_L2GRE_TUNN); + __ecore_vf_update_tunn_param(&p_tun->ip_gre, feat_mask, + p_resp->ipgre_mode, p_resp->ipgre_clss, + ECORE_MODE_IPGRE_TUNN); + p_tun->geneve_port.port = p_resp->geneve_udp_port; + p_tun->vxlan_port.port = p_resp->vxlan_udp_port; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "tunn mode: vxlan=0x%x, l2geneve=0x%x, ipgeneve=0x%x, l2gre=0x%x, ipgre=0x%x", + p_tun->vxlan.b_mode_enabled, p_tun->l2_geneve.b_mode_enabled, + p_tun->ip_geneve.b_mode_enabled, + p_tun->l2_gre.b_mode_enabled, + p_tun->ip_gre.b_mode_enabled); +} + +enum _ecore_status_t +ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn, + struct ecore_tunnel_info *p_src) +{ + struct ecore_tunnel_info *p_tun = &p_hwfn->p_dev->tunnel; + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_update_tunn_param_tlv *p_resp; + struct vfpf_update_tunn_param_tlv *p_req; + enum _ecore_status_t rc; + + p_req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_TUNN_PARAM, + sizeof(*p_req)); + + if (p_src->b_update_rx_cls && p_src->b_update_tx_cls) + p_req->update_tun_cls = 1; + + ecore_vf_prep_tunn_req_tlv(p_req, &p_src->vxlan, ECORE_MODE_VXLAN_TUNN, + &p_req->vxlan_clss, &p_src->vxlan_port, + &p_req->update_vxlan_port, + &p_req->vxlan_port); + ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_geneve, + ECORE_MODE_L2GENEVE_TUNN, + &p_req->l2geneve_clss, &p_src->geneve_port, + &p_req->update_geneve_port, + &p_req->geneve_port); + __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_geneve, + ECORE_MODE_IPGENEVE_TUNN, + &p_req->ipgeneve_clss); + __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->l2_gre, + ECORE_MODE_L2GRE_TUNN, &p_req->l2gre_clss); + __ecore_vf_prep_tunn_req_tlv(p_req, &p_src->ip_gre, + ECORE_MODE_IPGRE_TUNN, &p_req->ipgre_clss); + + /* add list termination tlv */ + ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + p_resp = &p_iov->pf2vf_reply->tunn_param_resp; + rc = ecore_send_msg2pf(p_hwfn, &p_resp->hdr.status, sizeof(*p_resp)); + + if (rc) + goto exit; + + if (p_resp->hdr.status != PFVF_STATUS_SUCCESS) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Failed to update tunnel parameters\n"); + rc = ECORE_INVAL; + } + + ecore_vf_update_tunn_param(p_hwfn, p_tun, p_resp); +exit: + ecore_vf_pf_req_end(p_hwfn, rc); + return rc; +} + enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn, struct ecore_queue_cid *p_cid, diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h index 1afd667..0d67054 100644 --- a/drivers/net/qede/base/ecore_vf.h +++ b/drivers/net/qede/base/ecore_vf.h @@ -258,5 +258,10 @@ void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn, struct ecore_mcp_link_capabilities *p_link_caps, struct ecore_bulletin_content *p_bulletin); +enum _ecore_status_t +ecore_vf_pf_tunnel_param_update(struct ecore_hwfn *p_hwfn, + struct ecore_tunnel_info *p_tunn); + +void ecore_vf_set_vf_start_tunn_update_param(struct ecore_tunnel_info *p_tun); #endif #endif /* __ECORE_VF_H__ */ diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h index 149d092..82ed4f5 100644 --- a/drivers/net/qede/base/ecore_vfpf_if.h +++ b/drivers/net/qede/base/ecore_vfpf_if.h @@ -416,6 +416,43 @@ struct vfpf_ucast_filter_tlv { u16 padding[3]; }; +/* tunnel update param tlv */ +struct vfpf_update_tunn_param_tlv { + struct vfpf_first_tlv first_tlv; + + u8 tun_mode_update_mask; + u8 tunn_mode; + u8 update_tun_cls; + u8 vxlan_clss; + u8 l2gre_clss; + u8 ipgre_clss; + u8 l2geneve_clss; + u8 ipgeneve_clss; + u8 update_geneve_port; + u8 update_vxlan_port; + u16 geneve_port; + u16 vxlan_port; + u8 padding[2]; +}; + +struct pfvf_update_tunn_param_tlv { + struct pfvf_tlv hdr; + + u16 tunn_feature_mask; + u8 vxlan_mode; + u8 l2geneve_mode; + u8 ipgeneve_mode; + u8 l2gre_mode; + u8 ipgre_mode; + u8 vxlan_clss; + u8 l2gre_clss; + u8 ipgre_clss; + u8 l2geneve_clss; + u8 ipgeneve_clss; + u16 vxlan_udp_port; + u16 geneve_udp_port; +}; + struct tlv_buffer_size { u8 tlv_buffer[TLV_BUFFER_SIZE]; }; @@ -431,6 +468,7 @@ union vfpf_tlvs { struct vfpf_vport_start_tlv start_vport; struct vfpf_vport_update_tlv vport_update; struct vfpf_ucast_filter_tlv ucast_filter; + struct vfpf_update_tunn_param_tlv tunn_param_update; struct tlv_buffer_size tlv_buf_size; }; @@ -439,6 +477,7 @@ union pfvf_tlvs { struct pfvf_acquire_resp_tlv acquire_resp; struct tlv_buffer_size tlv_buf_size; struct pfvf_start_queue_resp_tlv queue_start; + struct pfvf_update_tunn_param_tlv tunn_param_resp; }; /* This is a structure which is allocated in the VF, which the PF may update @@ -552,6 +591,7 @@ enum { CHANNEL_TLV_VPORT_UPDATE_RSS, CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, + CHANNEL_TLV_UPDATE_TUNN_PARAM, CHANNEL_TLV_MAX, /* Required for iterating over vport-update tlvs. diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c index 4ef93d4..257e5b2 100644 --- a/drivers/net/qede/qede_ethdev.c +++ b/drivers/net/qede/qede_ethdev.c @@ -335,15 +335,15 @@ static void qede_set_ucast_cmn_params(struct ecore_filter_ucast *ucast) /* ucast->assert_on_error = true; - For debug */ } -static void qede_set_cmn_tunn_param(struct qed_tunn_update_params *params, - uint8_t clss, uint64_t mode, uint64_t mask) +static void qede_set_cmn_tunn_param(struct ecore_tunnel_info *p_tunn, + uint8_t clss, bool mode, bool mask) { - memset(params, 0, sizeof(struct qed_tunn_update_params)); - params->tunn_mode = mode; - params->tunn_mode_update_mask = mask; - params->update_tx_pf_clss = 1; - params->update_rx_pf_clss = 1; - params->tunn_clss_vxlan = clss; + memset(p_tunn, 0, sizeof(struct ecore_tunnel_info)); + p_tunn->vxlan.b_update_mode = mode; + p_tunn->vxlan.b_mode_enabled = mask; + p_tunn->b_update_rx_cls = true; + p_tunn->b_update_tx_cls = true; + p_tunn->vxlan.tun_cls = clss; } static int @@ -1707,25 +1707,24 @@ qede_conf_udp_dst_port(struct rte_eth_dev *eth_dev, { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - struct qed_tunn_update_params params; - struct ecore_tunnel_info *p_tunn; + struct ecore_tunnel_info tunn; /* @DPDK */ struct ecore_hwfn *p_hwfn; int rc, i; PMD_INIT_FUNC_TRACE(edev); - memset(¶ms, 0, sizeof(params)); + memset(&tunn, 0, sizeof(tunn)); if (tunnel_udp->prot_type == RTE_TUNNEL_TYPE_VXLAN) { - params.update_vxlan_udp_port = 1; - params.vxlan_udp_port = (add) ? tunnel_udp->udp_port : - QEDE_VXLAN_DEF_PORT; + tunn.vxlan_port.b_update_port = true; + tunn.vxlan_port.port = (add) ? tunnel_udp->udp_port : + QEDE_VXLAN_DEF_PORT; for_each_hwfn(edev, i) { p_hwfn = &edev->hwfns[i]; - rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_tunn, + rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn, ECORE_SPQ_MODE_CB, NULL); if (rc != ECORE_SUCCESS) { DP_ERR(edev, "Unable to config UDP port %u\n", - params.vxlan_udp_port); + tunn.vxlan_port.port); return rc; } } @@ -1818,8 +1817,7 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev, { struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); - struct qed_tunn_update_params params; - struct ecore_tunnel_info *p_tunn; + struct ecore_tunnel_info tunn; struct ecore_hwfn *p_hwfn; enum ecore_filter_ucast_type type; enum ecore_tunn_clss clss; @@ -1868,16 +1866,14 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev, qdev->vxlan_filter_type = filter_type; DP_INFO(edev, "Enabling VXLAN tunneling\n"); - qede_set_cmn_tunn_param(¶ms, clss, - (1 << ECORE_MODE_VXLAN_TUNN), - (1 << ECORE_MODE_VXLAN_TUNN)); + qede_set_cmn_tunn_param(&tunn, clss, true, true); for_each_hwfn(edev, i) { p_hwfn = &edev->hwfns[i]; rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, - p_tunn, ECORE_SPQ_MODE_CB, NULL); + &tunn, ECORE_SPQ_MODE_CB, NULL); if (rc != ECORE_SUCCESS) { DP_ERR(edev, "Failed to update tunn_clss %u\n", - params.tunn_clss_vxlan); + tunn.vxlan.tun_cls); } } qdev->num_tunn_filters++; /* Filter added successfully */ @@ -1904,16 +1900,15 @@ static int qede_vxlan_tunn_config(struct rte_eth_dev *eth_dev, DP_INFO(edev, "Disabling VXLAN tunneling\n"); /* Use 0 as tunnel mode */ - qede_set_cmn_tunn_param(¶ms, clss, 0, - (1 << ECORE_MODE_VXLAN_TUNN)); + qede_set_cmn_tunn_param(&tunn, clss, false, true); for_each_hwfn(edev, i) { p_hwfn = &edev->hwfns[i]; - rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, p_tunn, + rc = ecore_sp_pf_update_tunn_cfg(p_hwfn, &tunn, ECORE_SPQ_MODE_CB, NULL); if (rc != ECORE_SUCCESS) { DP_ERR(edev, "Failed to update tunn_clss %u\n", - params.tunn_clss_vxlan); + tunn.vxlan.tun_cls); break; } } -- 1.7.10.3