All of lore.kernel.org
 help / color / mirror / Atom feed
From: John Fastabend <john.fastabend@gmail.com>
To: tgraf@suug.ch, sfeldma@gmail.com, jiri@resnulli.us,
	jhs@mojatatu.com, simon.horman@netronome.com
Cc: netdev@vger.kernel.org, davem@davemloft.net, andy@greyhouse.net
Subject: [net-next PATCH v1 05/11] net: rocker: add set flow rules
Date: Wed, 31 Dec 2014 11:47:37 -0800	[thread overview]
Message-ID: <20141231194735.31070.55480.stgit@nitbit.x32> (raw)
In-Reply-To: <20141231194057.31070.5244.stgit@nitbit.x32>

Implement set flow operations for existing rocker tables.

Signed-off-by: John Fastabend <john.r.fastabend@intel.com>
---
 drivers/net/ethernet/rocker/rocker.c          |  517 +++++++++++++++++++++++++
 drivers/net/ethernet/rocker/rocker_pipeline.h |    3 
 2 files changed, 519 insertions(+), 1 deletion(-)

diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 4c6787a..c40c58d 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -3806,6 +3806,520 @@ static struct net_flow_hdr_node **rocker_get_hgraph(struct net_device *d)
 {
 	return rocker_header_nodes;
 }
+
+static int is_valid_net_flow_action_arg(struct net_flow_action *a, int id)
+{
+	struct net_flow_action_arg *args = a->args;
+	int i;
+
+	for (i = 0; args[i].type != NET_FLOW_ACTION_ARG_TYPE_NULL; i++) {
+		if (a->args[i].type == NET_FLOW_ACTION_ARG_TYPE_NULL ||
+		    args[i].type != a->args[i].type)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int is_valid_net_flow_action(struct net_flow_action *a, int *actions)
+{
+	int i;
+
+	for (i = 0; actions[i]; i++) {
+		if (actions[i] == a->uid)
+			return is_valid_net_flow_action_arg(a, a->uid);
+	}
+	return -EINVAL;
+}
+
+static int is_valid_net_flow_match(struct net_flow_field_ref *f,
+				   struct net_flow_field_ref *fields)
+{
+	int i;
+
+	for (i = 0; fields[i].header; i++) {
+		if (f->header == fields[i].header &&
+		    f->field == fields[i].field)
+			return 0;
+	}
+
+	return -EINVAL;
+}
+
+int is_valid_net_flow(struct net_flow_table *table, struct net_flow_flow *flow)
+{
+	struct net_flow_field_ref *fields = table->matches;
+	int *actions = table->actions;
+	int i, err;
+
+	for (i = 0; flow->actions[i].uid; i++) {
+		err = is_valid_net_flow_action(&flow->actions[i], actions);
+		if (err)
+			return -EINVAL;
+	}
+
+	for (i = 0; flow->matches[i].header; i++) {
+		err = is_valid_net_flow_match(&flow->matches[i], fields);
+		if (err)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static u32 rocker_goto_value(u32 id)
+{
+	switch (id) {
+	case ROCKER_FLOW_TABLE_ID_INGRESS_PORT:
+		return ROCKER_OF_DPA_TABLE_ID_INGRESS_PORT;
+	case ROCKER_FLOW_TABLE_ID_VLAN:
+		return ROCKER_OF_DPA_TABLE_ID_VLAN;
+	case ROCKER_FLOW_TABLE_ID_TERMINATION_MAC:
+		return ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
+	case ROCKER_FLOW_TABLE_ID_UNICAST_ROUTING:
+		return ROCKER_OF_DPA_TABLE_ID_UNICAST_ROUTING;
+	case ROCKER_FLOW_TABLE_ID_MULTICAST_ROUTING:
+		return ROCKER_OF_DPA_TABLE_ID_MULTICAST_ROUTING;
+	case ROCKER_FLOW_TABLE_ID_BRIDGING:
+		return ROCKER_OF_DPA_TABLE_ID_BRIDGING;
+	case ROCKER_FLOW_TABLE_ID_ACL_POLICY:
+		return ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+	default:
+		return 0;
+	}
+}
+
+static int rocker_flow_set_ig_port(struct net_device *dev,
+				   struct net_flow_flow *flow)
+{
+	struct rocker_port *rocker_port = netdev_priv(dev);
+	enum rocker_of_dpa_table_id goto_tbl;
+	u32 in_lport_mask = 0xffff0000;
+	u32 in_lport = 0;
+	int err, flags = 0;
+
+	err = is_valid_net_flow(&ingress_port_table, flow);
+	if (err)
+		return err;
+
+	/* ingress port table only supports one field/mask/action this
+	 * simplifies the key construction and we can assume the values
+	 * are the correct types/mask/action by valid check above. The
+	 * user could pass multiple match/actions in a message with the
+	 * same field multiple times currently the valid test does not
+	 * catch this and we just use the first specified.
+	 */
+	in_lport = flow->matches[0].value_u32;
+	in_lport_mask = flow->matches[0].mask_u32;
+	goto_tbl = rocker_goto_value(flow->actions[0].args[0].value_u16);
+
+	err = rocker_flow_tbl_ig_port(rocker_port, flags,
+				      in_lport, in_lport_mask,
+				      goto_tbl);
+	return err;
+}
+
+static int rocker_flow_set_vlan(struct net_device *dev,
+				struct net_flow_flow *flow)
+{
+	enum rocker_of_dpa_table_id goto_tbl;
+	struct rocker_port *rocker_port = netdev_priv(dev);
+	int i, err = 0, flags = 0;
+	u32 in_lport;
+	__be16 vlan_id, vlan_id_mask, new_vlan_id;
+	bool untagged, have_in_lport = false;
+
+	err = is_valid_net_flow(&vlan_table, flow);
+	if (err)
+		return err;
+
+	goto_tbl = ROCKER_OF_DPA_TABLE_ID_TERMINATION_MAC;
+
+	/* If user does not specify vid match default to any */
+	vlan_id = 1;
+	vlan_id_mask = 0;
+
+	for (i = 0; flow->matches && flow->matches[i].instance; i++) {
+		switch (flow->matches[i].instance) {
+		case HEADER_INSTANCE_IN_LPORT:
+			in_lport = flow->matches[i].value_u32;
+			have_in_lport = true;
+			break;
+		case HEADER_INSTANCE_VLAN_OUTER:
+			if (flow->matches[i].field != HEADER_VLAN_VID)
+				break;
+
+			vlan_id = htons(flow->matches[i].value_u16);
+			vlan_id_mask = htons(flow->matches[i].mask_u16);
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	/* If user does not specify a new vlan id use default vlan id */
+	new_vlan_id = rocker_port_vid_to_vlan(rocker_port, vlan_id, &untagged);
+
+	for (i = 0; flow->actions && flow->actions[i].uid; i++) {
+		struct net_flow_action_arg *arg = &flow->actions[i].args[0];
+
+		switch (flow->actions[i].uid) {
+		case ACTION_SET_GOTO_TABLE:
+			goto_tbl = rocker_goto_value(arg->value_u16);
+			break;
+		case ACTION_SET_VLAN_ID:
+			new_vlan_id = htons(arg->value_u16);
+			if (new_vlan_id)
+				untagged = false;
+			break;
+		}
+	}
+
+	if (!have_in_lport)
+		return -EINVAL;
+
+	err = rocker_flow_tbl_vlan(rocker_port, flags, in_lport,
+				   vlan_id, vlan_id_mask, goto_tbl,
+				   untagged, new_vlan_id);
+	return err;
+}
+
+static int rocker_flow_set_term_mac(struct net_device *dev,
+				    struct net_flow_flow *flow)
+{
+	struct rocker_port *rocker_port = netdev_priv(dev);
+	__be16 vlan_id, vlan_id_mask, ethtype = 0;
+	const u8 *eth_dst, *eth_dst_mask;
+	u32 in_lport, in_lport_mask;
+	int i, err = 0, flags = 0;
+	bool copy_to_cpu;
+
+	eth_dst = NULL;
+	eth_dst_mask = NULL;
+
+	err = is_valid_net_flow(&term_mac_table, flow);
+	if (err)
+		return err;
+
+	/* If user does not specify vid match default to any */
+	vlan_id = rocker_port->internal_vlan_id;
+	vlan_id_mask = 0;
+
+	/* If user does not specify in_lport match default to any */
+	in_lport = rocker_port->lport;
+	in_lport_mask = 0;
+
+	/* If user does not specify a mac address match any */
+	eth_dst = rocker_port->dev->dev_addr;
+	eth_dst_mask = zero_mac;
+
+	for (i = 0; flow->matches && flow->matches[i].instance; i++) {
+		switch (flow->matches[i].instance) {
+		case HEADER_INSTANCE_IN_LPORT:
+			in_lport = flow->matches[i].value_u32;
+			in_lport_mask = flow->matches[i].mask_u32;
+			break;
+		case HEADER_INSTANCE_VLAN_OUTER:
+			if (flow->matches[i].field != HEADER_VLAN_VID)
+				break;
+
+			vlan_id = htons(flow->matches[i].value_u16);
+			vlan_id_mask = htons(flow->matches[i].mask_u16);
+			break;
+		case HEADER_INSTANCE_ETHERNET:
+			switch (flow->matches[i].field) {
+			case HEADER_ETHERNET_DST_MAC:
+				eth_dst = (u8 *)&flow->matches[i].value_u64;
+				eth_dst_mask = (u8 *)&flow->matches[i].mask_u64;
+				break;
+			case HEADER_ETHERNET_ETHERTYPE:
+				ethtype = htons(flow->matches[i].value_u16);
+				break;
+			default:
+				return -EINVAL;
+			}
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	if (!ethtype)
+		return -EINVAL;
+
+	/* By default do not copy to cpu */
+	copy_to_cpu = false;
+
+	for (i = 0; flow->actions && flow->actions[i].uid; i++) {
+		switch (flow->actions[i].uid) {
+		case ACTION_COPY_TO_CPU:
+			copy_to_cpu = true;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	err = rocker_flow_tbl_term_mac(rocker_port, in_lport, in_lport_mask,
+				       ethtype, eth_dst, eth_dst_mask,
+				       vlan_id, vlan_id_mask,
+				       copy_to_cpu, flags);
+	return err;
+}
+
+static int rocker_flow_set_ucast_routing(struct net_device *dev,
+					 struct net_flow_flow *flow)
+{
+	return -EOPNOTSUPP;
+}
+
+static int rocker_flow_set_mcast_routing(struct net_device *dev,
+					 struct net_flow_flow *flow)
+{
+	return -EOPNOTSUPP;
+}
+
+static int rocker_flow_set_bridge(struct net_device *dev,
+				  struct net_flow_flow *flow)
+{
+	enum rocker_of_dpa_table_id goto_tbl;
+	struct rocker_port *rocker_port = netdev_priv(dev);
+	u32 in_lport, in_lport_mask, group_id, tunnel_id;
+	__be16 vlan_id, vlan_id_mask;
+	const u8 *eth_dst, *eth_dst_mask;
+	int i, err = 0, flags = 0;
+	bool copy_to_cpu;
+
+	err = is_valid_net_flow(&bridge_table, flow);
+	if (err)
+		return err;
+
+	goto_tbl = ROCKER_OF_DPA_TABLE_ID_ACL_POLICY;
+
+	/* If user does not specify vid match default to any */
+	vlan_id = rocker_port->internal_vlan_id;
+	vlan_id_mask = 0;
+
+	/* If user does not specify in_lport match default to any */
+	in_lport = rocker_port->lport;
+	in_lport_mask = 0;
+
+	/* If user does not specify a mac address match any */
+	eth_dst = rocker_port->dev->dev_addr;
+	eth_dst_mask = NULL;
+
+	/* Do not support for tunnel_id yet. */
+	tunnel_id = 0;
+
+	for (i = 0; flow->matches && flow->matches[i].instance; i++) {
+		switch (flow->matches[i].instance) {
+		case HEADER_INSTANCE_IN_LPORT:
+			in_lport = flow->matches[i].value_u32;
+			in_lport_mask = flow->matches[i].mask_u32;
+			break;
+		case HEADER_INSTANCE_VLAN_OUTER:
+			if (flow->matches[i].field != HEADER_VLAN_VID)
+				break;
+
+			vlan_id = htons(flow->matches[i].value_u16);
+			vlan_id_mask = htons(flow->matches[i].mask_u16);
+			break;
+		case HEADER_INSTANCE_ETHERNET:
+			switch (flow->matches[i].field) {
+			case HEADER_ETHERNET_DST_MAC:
+				eth_dst = (u8 *)&flow->matches[i].value_u64;
+				eth_dst_mask = (u8 *)&flow->matches[i].mask_u64;
+				break;
+			default:
+				return -EINVAL;
+			}
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	/* By default do not copy to cpu and skip group assignment */
+	copy_to_cpu = false;
+	group_id = ROCKER_GROUP_NONE;
+
+	for (i = 0; flow->actions && flow->actions[i].uid; i++) {
+		struct net_flow_action_arg *arg = &flow->actions[i].args[0];
+
+		switch (flow->actions[i].uid) {
+		case ACTION_SET_GOTO_TABLE:
+			goto_tbl = rocker_goto_value(arg->value_u16);
+			break;
+		case ACTION_COPY_TO_CPU:
+			copy_to_cpu = true;
+			break;
+		case ACTION_SET_GROUP_ID:
+			group_id = arg->value_u32;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	/* Ignoring eth_dst_mask it seems to cause a EINVAL return code */
+	err = rocker_flow_tbl_bridge(rocker_port, flags,
+				     eth_dst, eth_dst_mask,
+				     vlan_id, tunnel_id,
+				     goto_tbl, group_id, copy_to_cpu);
+	return err;
+}
+
+static int rocker_flow_set_acl(struct net_device *dev,
+			       struct net_flow_flow *flow)
+{
+	struct rocker_port *rocker_port = netdev_priv(dev);
+	u32 in_lport, in_lport_mask, group_id, tunnel_id;
+	__be16 vlan_id, vlan_id_mask, ethtype = 0;
+	const u8 *eth_dst, *eth_src, *eth_dst_mask, *eth_src_mask;
+	u8 protocol, protocol_mask, dscp, dscp_mask;
+	int i, err = 0, flags = 0;
+
+	err = is_valid_net_flow(&bridge_table, flow);
+	if (err)
+		return err;
+
+	/* If user does not specify vid match default to any */
+	vlan_id = rocker_port->internal_vlan_id;
+	vlan_id_mask = 0;
+
+	/* If user does not specify in_lport match default to any */
+	in_lport = rocker_port->lport;
+	in_lport_mask = 0;
+
+	/* If user does not specify a mac address match any */
+	eth_dst = rocker_port->dev->dev_addr;
+	eth_src = zero_mac;
+	eth_dst_mask = NULL;
+	eth_src_mask = NULL;
+
+	/* If user does not set protocol/dscp mask them out */
+	protocol = 0;
+	dscp = 0;
+	protocol_mask = 0;
+	dscp_mask = 0;
+
+	/* Do not support for tunnel_id yet. */
+	tunnel_id = 0;
+
+	for (i = 0; flow->matches && flow->matches[i].instance; i++) {
+		switch (flow->matches[i].instance) {
+		case HEADER_INSTANCE_IN_LPORT:
+			in_lport = flow->matches[i].value_u32;
+			in_lport_mask = flow->matches[i].mask_u32;
+			break;
+		case HEADER_INSTANCE_VLAN_OUTER:
+			if (flow->matches[i].field != HEADER_VLAN_VID)
+				break;
+
+			vlan_id = htons(flow->matches[i].value_u16);
+			vlan_id_mask = htons(flow->matches[i].mask_u16);
+			break;
+		case HEADER_INSTANCE_ETHERNET:
+			switch (flow->matches[i].field) {
+			case HEADER_ETHERNET_SRC_MAC:
+				eth_src = (u8 *)&flow->matches[i].value_u64;
+				eth_src_mask = (u8 *)&flow->matches[i].mask_u64;
+				break;
+			case HEADER_ETHERNET_DST_MAC:
+				eth_dst = (u8 *)&flow->matches[i].value_u64;
+				eth_dst_mask = (u8 *)&flow->matches[i].mask_u64;
+				break;
+			case HEADER_ETHERNET_ETHERTYPE:
+				ethtype = htons(flow->matches[i].value_u16);
+				break;
+			default:
+				return -EINVAL;
+			}
+			break;
+		case HEADER_INSTANCE_IPV4:
+			switch (flow->matches[i].field) {
+			case HEADER_IPV4_PROTOCOL:
+				protocol = flow->matches[i].value_u8;
+				protocol_mask = flow->matches[i].mask_u8;
+				break;
+			case HEADER_IPV4_DSCP:
+				dscp = flow->matches[i].value_u8;
+				dscp_mask = flow->matches[i].mask_u8;
+				break;
+			default:
+				return -EINVAL;
+			}
+		default:
+			return -EINVAL;
+		}
+	}
+
+	/* By default do not copy to cpu and skip group assignment */
+	group_id = ROCKER_GROUP_NONE;
+
+	for (i = 0; flow->actions && flow->actions[i].uid; i++) {
+		switch (flow->actions[i].uid) {
+		case ACTION_SET_GROUP_ID:
+			group_id = flow->actions[i].args[0].value_u32;
+			break;
+		default:
+			return -EINVAL;
+		}
+	}
+
+	err = rocker_flow_tbl_acl(rocker_port, flags,
+				  in_lport, in_lport_mask,
+				  eth_src, eth_src_mask,
+				  eth_dst, eth_dst_mask, ethtype,
+				  vlan_id, vlan_id_mask,
+				  protocol, protocol_mask,
+				  dscp, dscp_mask,
+				  group_id);
+	return err;
+}
+
+static int rocker_set_flows(struct net_device *dev,
+			    struct net_flow_flow *flow)
+{
+	int err = -EINVAL;
+
+	if (!flow->matches || !flow->actions)
+		return -EINVAL;
+
+	switch (flow->table_id) {
+	case ROCKER_FLOW_TABLE_ID_INGRESS_PORT:
+		err = rocker_flow_set_ig_port(dev, flow);
+		break;
+	case ROCKER_FLOW_TABLE_ID_VLAN:
+		err = rocker_flow_set_vlan(dev, flow);
+		break;
+	case ROCKER_FLOW_TABLE_ID_TERMINATION_MAC:
+		err = rocker_flow_set_term_mac(dev, flow);
+		break;
+	case ROCKER_FLOW_TABLE_ID_UNICAST_ROUTING:
+		err = rocker_flow_set_ucast_routing(dev, flow);
+		break;
+	case ROCKER_FLOW_TABLE_ID_MULTICAST_ROUTING:
+		err = rocker_flow_set_mcast_routing(dev, flow);
+		break;
+	case ROCKER_FLOW_TABLE_ID_BRIDGING:
+		err = rocker_flow_set_bridge(dev, flow);
+		break;
+	case ROCKER_FLOW_TABLE_ID_ACL_POLICY:
+		err = rocker_flow_set_acl(dev, flow);
+		break;
+	default:
+		break;
+	}
+
+	return err;
+}
+
+static int rocker_del_flows(struct net_device *dev,
+			    struct net_flow_flow *flow)
+{
+	return -EOPNOTSUPP;
+}
 #endif
 
 static const struct net_device_ops rocker_port_netdev_ops = {
@@ -3828,6 +4342,9 @@ static const struct net_device_ops rocker_port_netdev_ops = {
 	.ndo_flow_get_actions		= rocker_get_actions,
 	.ndo_flow_get_tbl_graph		= rocker_get_tgraph,
 	.ndo_flow_get_hdr_graph		= rocker_get_hgraph,
+
+	.ndo_flow_set_flows		= rocker_set_flows,
+	.ndo_flow_del_flows		= rocker_del_flows,
 #endif
 };
 
diff --git a/drivers/net/ethernet/rocker/rocker_pipeline.h b/drivers/net/ethernet/rocker/rocker_pipeline.h
index 9544339..701e139 100644
--- a/drivers/net/ethernet/rocker/rocker_pipeline.h
+++ b/drivers/net/ethernet/rocker/rocker_pipeline.h
@@ -527,6 +527,7 @@ enum rocker_flow_table_id_space {
 	ROCKER_FLOW_TABLE_ID_VLAN,
 	ROCKER_FLOW_TABLE_ID_TERMINATION_MAC,
 	ROCKER_FLOW_TABLE_ID_UNICAST_ROUTING,
+	ROCKER_FLOW_TABLE_ID_MULTICAST_ROUTING,
 	ROCKER_FLOW_TABLE_ID_BRIDGING,
 	ROCKER_FLOW_TABLE_ID_ACL_POLICY,
 	ROCKER_FLOW_TABLE_NULL = 0,
@@ -588,7 +589,7 @@ struct net_flow_table acl_table = {
 
 struct net_flow_table null_table = {
 	.name = "",
-	.uid = 0,
+	.uid = ROCKER_FLOW_TABLE_NULL,
 	.source = 0,
 	.size = 0,
 	.matches = NULL,

  parent reply	other threads:[~2014-12-31 19:47 UTC|newest]

Thread overview: 56+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-12-31 19:45 [net-next PATCH v1 00/11] A flow API John Fastabend
2014-12-31 19:45 ` [net-next PATCH v1 01/11] net: flow_table: create interface for hw match/action tables John Fastabend
2014-12-31 20:10   ` John Fastabend
2015-01-04 11:12   ` Thomas Graf
2015-01-05 18:59     ` John Fastabend
2015-01-05 21:48       ` Thomas Graf
2015-01-05 23:29       ` John Fastabend
2015-01-06  0:45       ` John Fastabend
2015-01-06  1:09         ` Simon Horman
2015-01-06  1:19           ` John Fastabend
2015-01-06  2:05             ` Simon Horman
2015-01-06  2:54               ` Simon Horman
2015-01-06  3:31                 ` John Fastabend
2015-01-07 10:07       ` Or Gerlitz
2015-01-07 16:35         ` John Fastabend
2015-01-06  5:25   ` Scott Feldman
2015-01-06  6:04     ` John Fastabend
2015-01-06  6:40       ` Scott Feldman
2014-12-31 19:46 ` [net-next PATCH v1 02/11] net: flow_table: add flow, delete flow John Fastabend
2015-01-06  6:19   ` Scott Feldman
2015-01-08 17:39   ` Jiri Pirko
2015-01-09  6:21     ` John Fastabend
2014-12-31 19:46 ` [net-next PATCH v1 03/11] net: flow_table: add apply action argument to tables John Fastabend
2015-01-08 17:41   ` Jiri Pirko
2015-01-09  6:17     ` John Fastabend
2014-12-31 19:47 ` [net-next PATCH v1 04/11] rocker: add pipeline model for rocker switch John Fastabend
2015-01-04  8:43   ` Or Gerlitz
2015-01-05  5:18     ` John Fastabend
2015-01-06  7:01   ` Scott Feldman
2015-01-06 17:00     ` John Fastabend
2015-01-06 17:16       ` Scott Feldman
2015-01-06 17:49         ` John Fastabend
2014-12-31 19:47 ` John Fastabend [this message]
2015-01-06  7:23   ` [net-next PATCH v1 05/11] net: rocker: add set flow rules Scott Feldman
2015-01-06 15:31     ` John Fastabend
2014-12-31 19:48 ` [net-next PATCH v1 06/11] net: rocker: add group_id slices and drop explicit goto John Fastabend
2014-12-31 19:48 ` [net-next PATCH v1 07/11] net: rocker: add multicast path to bridging John Fastabend
2014-12-31 19:48 ` [net-next PATCH v1 08/11] net: rocker: add get flow API operation John Fastabend
     [not found]   ` <CAKoUArm4z_i6Su9Q4ODB1QYR_Z098MjT2yN=WR7LbN387AvPsg@mail.gmail.com>
2015-01-02 21:15     ` John Fastabend
2015-01-06  7:40   ` Scott Feldman
2015-01-06 14:59     ` John Fastabend
2015-01-06 16:57       ` Scott Feldman
2015-01-06 17:50         ` John Fastabend
2014-12-31 19:49 ` [net-next PATCH v1 09/11] net: rocker: add cookie to group acls and use flow_id to set cookie John Fastabend
2014-12-31 19:50 ` [net-next PATCH v1 10/11] net: rocker: have flow api calls set cookie value John Fastabend
2014-12-31 19:50 ` [net-next PATCH v1 11/11] net: rocker: implement delete flow routine John Fastabend
2015-01-04  8:30 ` [net-next PATCH v1 00/11] A flow API Or Gerlitz
2015-01-05  5:17   ` John Fastabend
2015-01-06  2:42 ` Scott Feldman
2015-01-06 12:23 ` Jamal Hadi Salim
2015-01-09 18:27   ` John Fastabend
2015-01-14 19:02     ` Thomas Graf
2015-01-08 15:14 ` Or Gerlitz
2015-01-09 17:26   ` John Fastabend
2015-01-08 18:03 ` Jiri Pirko
2015-01-09 18:10   ` John Fastabend

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20141231194735.31070.55480.stgit@nitbit.x32 \
    --to=john.fastabend@gmail.com \
    --cc=andy@greyhouse.net \
    --cc=davem@davemloft.net \
    --cc=jhs@mojatatu.com \
    --cc=jiri@resnulli.us \
    --cc=netdev@vger.kernel.org \
    --cc=sfeldma@gmail.com \
    --cc=simon.horman@netronome.com \
    --cc=tgraf@suug.ch \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.