From mboxrd@z Thu Jan 1 00:00:00 1970 From: Xueming Li Subject: [PATCH v2 11/15] net/mlx5: support MPLS-in-GRE and MPLS-in-UDP Date: Tue, 10 Apr 2018 21:34:11 +0800 Message-ID: <20180410133415.189905-12-xuemingl@mellanox.com> References: <20180410133415.189905-1-xuemingl@mellanox.com> Cc: Xueming Li , dev@dpdk.org To: Nelio Laranjeiro , Shahaf Shuler Return-path: Received: from mellanox.co.il (mail-il-dmz.mellanox.com [193.47.165.129]) by dpdk.org (Postfix) with ESMTP id A75B41BA9E for ; Tue, 10 Apr 2018 15:34:41 +0200 (CEST) In-Reply-To: <20180410133415.189905-1-xuemingl@mellanox.com> List-Id: DPDK patches and discussions List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Errors-To: dev-bounces@dpdk.org Sender: "dev" This patch supports new tunnel type MPLS-in-GRE and MPLS-in-UDP. Flow pattern example: ipv4 proto is 47 / gre proto is 0x8847 / mpls ipv4 / udp dst is 6635 / mpls / end Signed-off-by: Xueming Li --- drivers/net/mlx5/Makefile | 5 ++ drivers/net/mlx5/mlx5.c | 15 +++++ drivers/net/mlx5/mlx5.h | 1 + drivers/net/mlx5/mlx5_flow.c | 148 ++++++++++++++++++++++++++++++++++++++++++- 4 files changed, 166 insertions(+), 3 deletions(-) diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile index f9a6c460b..33553483e 100644 --- a/drivers/net/mlx5/Makefile +++ b/drivers/net/mlx5/Makefile @@ -131,6 +131,11 @@ mlx5_autoconf.h.new: $(RTE_SDK)/buildtools/auto-config-h.sh enum MLX5DV_CONTEXT_MASK_TUNNEL_OFFLOADS \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ + HAVE_IBV_DEVICE_MPLS_SUPPORT \ + infiniband/verbs.h \ + enum IBV_FLOW_SPEC_MPLS \ + $(AUTOCONF_OUTPUT) + $Q sh -- '$<' '$@' \ HAVE_IBV_WQ_FLAG_RX_END_PADDING \ infiniband/verbs.h \ enum IBV_WQ_FLAG_RX_END_PADDING \ diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index a1f2799e5..2124439b3 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -613,6 +613,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, unsigned int mps; unsigned int cqe_comp; unsigned int tunnel_en = 0; + unsigned int mpls_en = 0; int idx; int i; struct mlx5dv_context attrs_out = {0}; @@ -719,12 +720,25 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN) && (attrs_out.tunnel_offloads_caps & MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE)); +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + mpls_en = ((attrs_out.tunnel_offloads_caps & + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_MPLS_GRE) && + (attrs_out.tunnel_offloads_caps & + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_MPLS_UDP) && + (attrs_out.tunnel_offloads_caps & + MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_CTRL_DW_MPLS)); +#endif } DRV_LOG(DEBUG, "tunnel offloading is %ssupported", tunnel_en ? "" : "not "); + DRV_LOG(DEBUG, "MPLS over GRE/UDP offloading is %ssupported", + mpls_en ? "" : "not "); #else DRV_LOG(WARNING, "tunnel offloading disabled due to old OFED/rdma-core version"); + DRV_LOG(WARNING, + "MPLS over GRE/UDP offloading disabled due to old" + " OFED/rdma-core version or firmware configuration"); #endif if (mlx5_glue->query_device_ex(attr_ctx, NULL, &device_attr)) { err = errno; @@ -748,6 +762,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv __rte_unused, .cqe_comp = cqe_comp, .mps = mps, .tunnel_en = tunnel_en, + .mpls_en = mpls_en, .tx_vec_en = 1, .rx_vec_en = 1, .mpw_hdr_dseg = 0, diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 708272f6d..1868abd8d 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -81,6 +81,7 @@ struct mlx5_dev_config { unsigned int vf:1; /* This is a VF. */ unsigned int mps:2; /* Multi-packet send supported mode. */ unsigned int tunnel_en:1; + unsigned int mpls_en:1; /* MPLS over GRE/UDP is enabled. */ /* Whether tunnel stateless offloads are supported. */ unsigned int flow_counter_en:1; /* Whether flow counter is supported. */ unsigned int cqe_comp:1; /* CQE compression is enabled. */ diff --git a/drivers/net/mlx5/mlx5_flow.c b/drivers/net/mlx5/mlx5_flow.c index 58d437308..5784f2ee0 100644 --- a/drivers/net/mlx5/mlx5_flow.c +++ b/drivers/net/mlx5/mlx5_flow.c @@ -97,6 +97,11 @@ mlx5_flow_create_gre(const struct rte_flow_item *item, const void *default_mask, struct mlx5_flow_data *data); +static int +mlx5_flow_create_mpls(const struct rte_flow_item *item, + const void *default_mask, + struct mlx5_flow_data *data); + struct mlx5_flow_parse; static void @@ -244,12 +249,14 @@ struct rte_flow { #define IS_TUNNEL(type) ( \ (type) == RTE_FLOW_ITEM_TYPE_VXLAN || \ (type) == RTE_FLOW_ITEM_TYPE_VXLAN_GPE || \ + (type) == RTE_FLOW_ITEM_TYPE_MPLS || \ (type) == RTE_FLOW_ITEM_TYPE_GRE) const uint32_t flow_ptype[] = { [RTE_FLOW_ITEM_TYPE_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN, [RTE_FLOW_ITEM_TYPE_VXLAN_GPE] = RTE_PTYPE_TUNNEL_VXLAN_GPE, [RTE_FLOW_ITEM_TYPE_GRE] = RTE_PTYPE_TUNNEL_GRE, + [RTE_FLOW_ITEM_TYPE_MPLS] = RTE_PTYPE_TUNNEL_MPLS_IN_GRE, }; #define PTYPE_IDX(t) ((RTE_PTYPE_TUNNEL_MASK & (t)) >> 12) @@ -260,6 +267,10 @@ const uint32_t ptype_ext[] = { [PTYPE_IDX(RTE_PTYPE_TUNNEL_VXLAN_GPE)] = RTE_PTYPE_TUNNEL_VXLAN_GPE | RTE_PTYPE_L4_UDP, [PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)] = RTE_PTYPE_TUNNEL_GRE, + [PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)] = + RTE_PTYPE_TUNNEL_MPLS_IN_GRE, + [PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)] = + RTE_PTYPE_TUNNEL_MPLS_IN_GRE | RTE_PTYPE_L4_UDP, }; /** Structure to generate a simple graph of layers supported by the NIC. */ @@ -396,7 +407,8 @@ static const struct mlx5_flow_items mlx5_flow_items[] = { }, [RTE_FLOW_ITEM_TYPE_UDP] = { .items = ITEMS(RTE_FLOW_ITEM_TYPE_VXLAN, - RTE_FLOW_ITEM_TYPE_VXLAN_GPE), + RTE_FLOW_ITEM_TYPE_VXLAN_GPE, + RTE_FLOW_ITEM_TYPE_MPLS), .actions = valid_actions, .mask = &(const struct rte_flow_item_udp){ .hdr = { @@ -425,7 +437,8 @@ static const struct mlx5_flow_items mlx5_flow_items[] = { [RTE_FLOW_ITEM_TYPE_GRE] = { .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, RTE_FLOW_ITEM_TYPE_IPV4, - RTE_FLOW_ITEM_TYPE_IPV6), + RTE_FLOW_ITEM_TYPE_IPV6, + RTE_FLOW_ITEM_TYPE_MPLS), .actions = valid_actions, .mask = &(const struct rte_flow_item_gre){ .protocol = -1, @@ -433,7 +446,11 @@ static const struct mlx5_flow_items mlx5_flow_items[] = { .default_mask = &rte_flow_item_gre_mask, .mask_sz = sizeof(struct rte_flow_item_gre), .convert = mlx5_flow_create_gre, +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + .dst_sz = sizeof(struct ibv_flow_spec_gre), +#else .dst_sz = sizeof(struct ibv_flow_spec_tunnel), +#endif }, [RTE_FLOW_ITEM_TYPE_VXLAN] = { .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, @@ -461,6 +478,21 @@ static const struct mlx5_flow_items mlx5_flow_items[] = { .convert = mlx5_flow_create_vxlan_gpe, .dst_sz = sizeof(struct ibv_flow_spec_tunnel), }, + [RTE_FLOW_ITEM_TYPE_MPLS] = { + .items = ITEMS(RTE_FLOW_ITEM_TYPE_ETH, + RTE_FLOW_ITEM_TYPE_IPV4, + RTE_FLOW_ITEM_TYPE_IPV6), + .actions = valid_actions, + .mask = &(const struct rte_flow_item_mpls){ + .label_tc_s = "\xff\xff\xf0", + }, + .default_mask = &rte_flow_item_mpls_mask, + .mask_sz = sizeof(struct rte_flow_item_mpls), + .convert = mlx5_flow_create_mpls, +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + .dst_sz = sizeof(struct ibv_flow_spec_mpls), +#endif + }, }; /** Structure to pass to the conversion function. */ @@ -916,7 +948,9 @@ mlx5_flow_convert_items_validate(struct rte_eth_dev *dev, if (ret) goto exit_item_not_supported; if (IS_TUNNEL(items->type)) { - if (parser->tunnel) { + if (parser->tunnel && + !(parser->tunnel == RTE_PTYPE_TUNNEL_GRE && + items->type == RTE_FLOW_ITEM_TYPE_MPLS)) { rte_flow_error_set(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, items, @@ -924,6 +958,16 @@ mlx5_flow_convert_items_validate(struct rte_eth_dev *dev, " tunnel encapsulations."); return -rte_errno; } + if (items->type == RTE_FLOW_ITEM_TYPE_MPLS && + !priv->config.mpls_en) { + rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ITEM, + items, + "MPLS not supported or" + " disabled in firmware" + " configuration."); + return -rte_errno; + } if (!priv->config.tunnel_en && parser->rss_conf.level) { rte_flow_error_set(error, ENOTSUP, @@ -1883,6 +1927,80 @@ mlx5_flow_create_vxlan_gpe(const struct rte_flow_item *item, } /** + * Convert MPLS item to Verbs specification. + * Tunnel types currently supported are MPLS-in-GRE and MPLS-in-UDP. + * + * @param item[in] + * Item specification. + * @param default_mask[in] + * Default bit-masks to use when item->mask is not provided. + * @param data[in, out] + * User structure. + * + * @return + * 0 on success, a negative errno value otherwise and rte_errno is set. + */ +static int +mlx5_flow_create_mpls(const struct rte_flow_item *item __rte_unused, + const void *default_mask __rte_unused, + struct mlx5_flow_data *data __rte_unused) +{ +#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT + return rte_flow_error_set(data->error, EINVAL, + RTE_FLOW_ERROR_TYPE_ITEM, + item, + "MPLS not supported by driver"); +#else + unsigned int i; + const struct rte_flow_item_mpls *spec = item->spec; + const struct rte_flow_item_mpls *mask = item->mask; + struct mlx5_flow_parse *parser = data->parser; + unsigned int size = sizeof(struct ibv_flow_spec_mpls); + struct ibv_flow_spec_mpls mpls = { + .type = IBV_FLOW_SPEC_MPLS, + .size = size, + }; + union tag { + uint32_t tag; + uint8_t label[4]; + } id; + + id.tag = 0; + parser->inner = IBV_FLOW_SPEC_INNER; + if (parser->layer == HASH_RXQ_UDPV4 || + parser->layer == HASH_RXQ_UDPV6) { + parser->tunnel = + ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_UDP)]; + parser->out_layer = parser->layer; + } else { + parser->tunnel = + ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_MPLS_IN_GRE)]; + } + parser->layer = HASH_RXQ_TUNNEL; + if (spec) { + if (!mask) + mask = default_mask; + memcpy(&id.label[1], spec->label_tc_s, 3); + id.label[0] = spec->ttl; + mpls.val.tag = id.tag; + memcpy(&id.label[1], mask->label_tc_s, 3); + id.label[0] = mask->ttl; + mpls.mask.tag = id.tag; + /* Remove unwanted bits from values. */ + mpls.val.tag &= mpls.mask.tag; + } + mlx5_flow_create_copy(parser, &mpls, size); + for (i = 0; i != hash_rxq_init_n; ++i) { + if (!parser->queue[i].ibv_attr) + continue; + parser->queue[i].ibv_attr->flags |= + IBV_FLOW_ATTR_FLAGS_ORDERED_SPEC_LIST; + } + return 0; +#endif +} + +/** * Convert GRE item to Verbs specification. * * @param item[in] @@ -1901,16 +2019,40 @@ mlx5_flow_create_gre(const struct rte_flow_item *item __rte_unused, struct mlx5_flow_data *data) { struct mlx5_flow_parse *parser = data->parser; +#ifndef HAVE_IBV_DEVICE_MPLS_SUPPORT unsigned int size = sizeof(struct ibv_flow_spec_tunnel); struct ibv_flow_spec_tunnel tunnel = { .type = parser->inner | IBV_FLOW_SPEC_VXLAN_TUNNEL, .size = size, }; +#else + const struct rte_flow_item_gre *spec = item->spec; + const struct rte_flow_item_gre *mask = item->mask; + unsigned int size = sizeof(struct ibv_flow_spec_gre); + struct ibv_flow_spec_gre tunnel = { + .type = parser->inner | IBV_FLOW_SPEC_GRE, + .size = size, + }; +#endif parser->inner = IBV_FLOW_SPEC_INNER; parser->tunnel = ptype_ext[PTYPE_IDX(RTE_PTYPE_TUNNEL_GRE)]; parser->out_layer = parser->layer; parser->layer = HASH_RXQ_TUNNEL; +#ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT + if (spec) { + if (!mask) + mask = default_mask; + tunnel.val.c_ks_res0_ver = spec->c_rsvd0_ver; + tunnel.val.protocol = spec->protocol; + tunnel.val.c_ks_res0_ver = mask->c_rsvd0_ver; + tunnel.val.protocol = mask->protocol; + /* Remove unwanted bits from values. */ + tunnel.val.c_ks_res0_ver &= tunnel.mask.c_ks_res0_ver; + tunnel.val.protocol &= tunnel.mask.protocol; + tunnel.val.key &= tunnel.mask.key; + } +#endif mlx5_flow_create_copy(parser, &tunnel, size); return 0; } -- 2.13.3