All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next 0/2] net/sched: act_skbedit: lockless data path
@ 2018-07-06 10:40 Davide Caratti
  2018-07-06 10:40 ` [PATCH net-next 1/2] net/sched: skbedit: use per-cpu counters Davide Caratti
                   ` (2 more replies)
  0 siblings, 3 replies; 4+ messages in thread
From: Davide Caratti @ 2018-07-06 10:40 UTC (permalink / raw)
  To: Cong Wang, David S. Miller; +Cc: netdev

the data path of act_skbedit can be faster if we avoid using spinlocks:
 - patch 1 converts act_skbedit statistics to use per-cpu counters
 - patch 2 lets act_skbedit use RCU to read/update its configuration 

test procedure (using pktgen from https://github.com/netoptimizer):

 # ip link add name eth1 type dummy
 # ip link set dev eth1 up
 # tc qdisc add dev eth1 clsact
 # tc filter add dev eth1 egress matchall action skbedit priority c1a0:c1a0
 # for c in 1 2 4 ; do
 > ./pktgen_bench_xmit_mode_queue_xmit.sh -v -s 64 -t $c -n 5000000 -i eth1
 > done

test results (avg. pps/thread)

  $c | before patch |  after patch | improvement
 ----+--------------+--------------+------------
   1 | 3917464 ± 3% | 4000458 ± 3% |  irrelevant
   2 | 3455367 ± 4% | 3953076 ± 1% |        +14%
   4 | 2496594 ± 2% | 3801123 ± 3% |        +52%


Davide Caratti (2):
  net/sched: skbedit: use per-cpu counters
  net/sched: act_skbedit: don't use spinlock in the data path

 include/net/tc_act/tc_skbedit.h |  37 ++++++++---
 net/sched/act_skbedit.c         | 113 ++++++++++++++++++++------------
 2 files changed, 98 insertions(+), 52 deletions(-)

-- 
2.17.1

^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH net-next 1/2] net/sched: skbedit: use per-cpu counters
  2018-07-06 10:40 [PATCH net-next 0/2] net/sched: act_skbedit: lockless data path Davide Caratti
@ 2018-07-06 10:40 ` Davide Caratti
  2018-07-06 10:40 ` [PATCH net-next 2/2] net/sched: act_skbedit: don't use spinlock in the data path Davide Caratti
  2018-07-10 23:58 ` [PATCH net-next 0/2] net/sched: act_skbedit: lockless " David Miller
  2 siblings, 0 replies; 4+ messages in thread
From: Davide Caratti @ 2018-07-06 10:40 UTC (permalink / raw)
  To: Cong Wang, David S. Miller; +Cc: netdev

use per-CPU counters, instead of sharing a single set of stats with all
cores: this removes the need of spinlocks when stats are read/updated.

Signed-off-by: Davide Caratti <dcaratti@redhat.com>
---
 net/sched/act_skbedit.c | 8 ++++----
 1 file changed, 4 insertions(+), 4 deletions(-)

diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index dfaf5d8028dd..c1bfa28ba477 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -38,10 +38,10 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
 {
 	struct tcf_skbedit *d = to_skbedit(a);
 
-	spin_lock(&d->tcf_lock);
 	tcf_lastuse_update(&d->tcf_tm);
-	bstats_update(&d->tcf_bstats, skb);
+	bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
 
+	spin_lock(&d->tcf_lock);
 	if (d->flags & SKBEDIT_F_PRIORITY)
 		skb->priority = d->priority;
 	if (d->flags & SKBEDIT_F_INHERITDSFIELD) {
@@ -77,8 +77,8 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
 	return d->tcf_action;
 
 err:
-	d->tcf_qstats.drops++;
 	spin_unlock(&d->tcf_lock);
+	qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats));
 	return TC_ACT_SHOT;
 }
 
@@ -163,7 +163,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
 
 	if (!exists) {
 		ret = tcf_idr_create(tn, parm->index, est, a,
-				     &act_skbedit_ops, bind, false);
+				     &act_skbedit_ops, bind, true);
 		if (ret)
 			return ret;
 
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH net-next 2/2] net/sched: act_skbedit: don't use spinlock in the data path
  2018-07-06 10:40 [PATCH net-next 0/2] net/sched: act_skbedit: lockless data path Davide Caratti
  2018-07-06 10:40 ` [PATCH net-next 1/2] net/sched: skbedit: use per-cpu counters Davide Caratti
@ 2018-07-06 10:40 ` Davide Caratti
  2018-07-10 23:58 ` [PATCH net-next 0/2] net/sched: act_skbedit: lockless " David Miller
  2 siblings, 0 replies; 4+ messages in thread
From: Davide Caratti @ 2018-07-06 10:40 UTC (permalink / raw)
  To: Cong Wang, David S. Miller; +Cc: netdev

use RCU instead of spin_{,un}lock_bh, to protect concurrent read/write on
act_skbedit configuration. This reduces the effects of contention in the
data path, in case multiple readers are present.

Signed-off-by: Davide Caratti <dcaratti@redhat.com>
---
 include/net/tc_act/tc_skbedit.h |  37 ++++++++---
 net/sched/act_skbedit.c         | 107 ++++++++++++++++++++------------
 2 files changed, 95 insertions(+), 49 deletions(-)

diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index 19cd3d345804..911bbac838a2 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -22,14 +22,19 @@
 #include <net/act_api.h>
 #include <linux/tc_act/tc_skbedit.h>
 
+struct tcf_skbedit_params {
+	u32 flags;
+	u32 priority;
+	u32 mark;
+	u32 mask;
+	u16 queue_mapping;
+	u16 ptype;
+	struct rcu_head rcu;
+};
+
 struct tcf_skbedit {
-	struct tc_action	common;
-	u32		flags;
-	u32		priority;
-	u32		mark;
-	u32		mask;
-	u16		queue_mapping;
-	u16		ptype;
+	struct tc_action common;
+	struct tcf_skbedit_params __rcu *params;
 };
 #define to_skbedit(a) ((struct tcf_skbedit *)a)
 
@@ -37,15 +42,27 @@ struct tcf_skbedit {
 static inline bool is_tcf_skbedit_mark(const struct tc_action *a)
 {
 #ifdef CONFIG_NET_CLS_ACT
-	if (a->ops && a->ops->type == TCA_ACT_SKBEDIT)
-		return to_skbedit(a)->flags == SKBEDIT_F_MARK;
+	u32 flags;
+
+	if (a->ops && a->ops->type == TCA_ACT_SKBEDIT) {
+		rcu_read_lock();
+		flags = rcu_dereference(to_skbedit(a)->params)->flags;
+		rcu_read_unlock();
+		return flags == SKBEDIT_F_MARK;
+	}
 #endif
 	return false;
 }
 
 static inline u32 tcf_skbedit_mark(const struct tc_action *a)
 {
-	return to_skbedit(a)->mark;
+	u32 mark;
+
+	rcu_read_lock();
+	mark = rcu_dereference(to_skbedit(a)->params)->mark;
+	rcu_read_unlock();
+
+	return mark;
 }
 
 #endif /* __NET_TC_SKBEDIT_H */
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index c1bfa28ba477..028eb679e532 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -37,14 +37,19 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
 		       struct tcf_result *res)
 {
 	struct tcf_skbedit *d = to_skbedit(a);
+	struct tcf_skbedit_params *params;
+	int action;
 
 	tcf_lastuse_update(&d->tcf_tm);
 	bstats_cpu_update(this_cpu_ptr(d->common.cpu_bstats), skb);
 
-	spin_lock(&d->tcf_lock);
-	if (d->flags & SKBEDIT_F_PRIORITY)
-		skb->priority = d->priority;
-	if (d->flags & SKBEDIT_F_INHERITDSFIELD) {
+	rcu_read_lock();
+	params = rcu_dereference(d->params);
+	action = READ_ONCE(d->tcf_action);
+
+	if (params->flags & SKBEDIT_F_PRIORITY)
+		skb->priority = params->priority;
+	if (params->flags & SKBEDIT_F_INHERITDSFIELD) {
 		int wlen = skb_network_offset(skb);
 
 		switch (tc_skb_protocol(skb)) {
@@ -63,23 +68,23 @@ static int tcf_skbedit(struct sk_buff *skb, const struct tc_action *a,
 			break;
 		}
 	}
-	if (d->flags & SKBEDIT_F_QUEUE_MAPPING &&
-	    skb->dev->real_num_tx_queues > d->queue_mapping)
-		skb_set_queue_mapping(skb, d->queue_mapping);
-	if (d->flags & SKBEDIT_F_MARK) {
-		skb->mark &= ~d->mask;
-		skb->mark |= d->mark & d->mask;
+	if (params->flags & SKBEDIT_F_QUEUE_MAPPING &&
+	    skb->dev->real_num_tx_queues > params->queue_mapping)
+		skb_set_queue_mapping(skb, params->queue_mapping);
+	if (params->flags & SKBEDIT_F_MARK) {
+		skb->mark &= ~params->mask;
+		skb->mark |= params->mark & params->mask;
 	}
-	if (d->flags & SKBEDIT_F_PTYPE)
-		skb->pkt_type = d->ptype;
-
-	spin_unlock(&d->tcf_lock);
-	return d->tcf_action;
+	if (params->flags & SKBEDIT_F_PTYPE)
+		skb->pkt_type = params->ptype;
 
+unlock:
+	rcu_read_unlock();
+	return action;
 err:
-	spin_unlock(&d->tcf_lock);
 	qstats_drop_inc(this_cpu_ptr(d->common.cpu_qstats));
-	return TC_ACT_SHOT;
+	action = TC_ACT_SHOT;
+	goto unlock;
 }
 
 static const struct nla_policy skbedit_policy[TCA_SKBEDIT_MAX + 1] = {
@@ -97,6 +102,7 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
 			    int ovr, int bind, struct netlink_ext_ack *extack)
 {
 	struct tc_action_net *tn = net_generic(net, skbedit_net_id);
+	struct tcf_skbedit_params *params_old, *params_new;
 	struct nlattr *tb[TCA_SKBEDIT_MAX + 1];
 	struct tc_skbedit *parm;
 	struct tcf_skbedit *d;
@@ -176,25 +182,34 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
 			return -EEXIST;
 	}
 
-	spin_lock_bh(&d->tcf_lock);
+	ASSERT_RTNL();
 
-	d->flags = flags;
+	params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
+	if (unlikely(!params_new)) {
+		if (ret == ACT_P_CREATED)
+			tcf_idr_release(*a, bind);
+		return -ENOMEM;
+	}
+
+	params_new->flags = flags;
 	if (flags & SKBEDIT_F_PRIORITY)
-		d->priority = *priority;
+		params_new->priority = *priority;
 	if (flags & SKBEDIT_F_QUEUE_MAPPING)
-		d->queue_mapping = *queue_mapping;
+		params_new->queue_mapping = *queue_mapping;
 	if (flags & SKBEDIT_F_MARK)
-		d->mark = *mark;
+		params_new->mark = *mark;
 	if (flags & SKBEDIT_F_PTYPE)
-		d->ptype = *ptype;
+		params_new->ptype = *ptype;
 	/* default behaviour is to use all the bits */
-	d->mask = 0xffffffff;
+	params_new->mask = 0xffffffff;
 	if (flags & SKBEDIT_F_MASK)
-		d->mask = *mask;
+		params_new->mask = *mask;
 
 	d->tcf_action = parm->action;
-
-	spin_unlock_bh(&d->tcf_lock);
+	params_old = rtnl_dereference(d->params);
+	rcu_assign_pointer(d->params, params_new);
+	if (params_old)
+		kfree_rcu(params_old, rcu);
 
 	if (ret == ACT_P_CREATED)
 		tcf_idr_insert(tn, *a);
@@ -206,33 +221,36 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
 {
 	unsigned char *b = skb_tail_pointer(skb);
 	struct tcf_skbedit *d = to_skbedit(a);
+	struct tcf_skbedit_params *params;
 	struct tc_skbedit opt = {
 		.index   = d->tcf_index,
 		.refcnt  = d->tcf_refcnt - ref,
 		.bindcnt = d->tcf_bindcnt - bind,
 		.action  = d->tcf_action,
 	};
-	struct tcf_t t;
 	u64 pure_flags = 0;
+	struct tcf_t t;
+
+	params = rtnl_dereference(d->params);
 
 	if (nla_put(skb, TCA_SKBEDIT_PARMS, sizeof(opt), &opt))
 		goto nla_put_failure;
-	if ((d->flags & SKBEDIT_F_PRIORITY) &&
-	    nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, d->priority))
+	if ((params->flags & SKBEDIT_F_PRIORITY) &&
+	    nla_put_u32(skb, TCA_SKBEDIT_PRIORITY, params->priority))
 		goto nla_put_failure;
-	if ((d->flags & SKBEDIT_F_QUEUE_MAPPING) &&
-	    nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, d->queue_mapping))
+	if ((params->flags & SKBEDIT_F_QUEUE_MAPPING) &&
+	    nla_put_u16(skb, TCA_SKBEDIT_QUEUE_MAPPING, params->queue_mapping))
 		goto nla_put_failure;
-	if ((d->flags & SKBEDIT_F_MARK) &&
-	    nla_put_u32(skb, TCA_SKBEDIT_MARK, d->mark))
+	if ((params->flags & SKBEDIT_F_MARK) &&
+	    nla_put_u32(skb, TCA_SKBEDIT_MARK, params->mark))
 		goto nla_put_failure;
-	if ((d->flags & SKBEDIT_F_PTYPE) &&
-	    nla_put_u16(skb, TCA_SKBEDIT_PTYPE, d->ptype))
+	if ((params->flags & SKBEDIT_F_PTYPE) &&
+	    nla_put_u16(skb, TCA_SKBEDIT_PTYPE, params->ptype))
 		goto nla_put_failure;
-	if ((d->flags & SKBEDIT_F_MASK) &&
-	    nla_put_u32(skb, TCA_SKBEDIT_MASK, d->mask))
+	if ((params->flags & SKBEDIT_F_MASK) &&
+	    nla_put_u32(skb, TCA_SKBEDIT_MASK, params->mask))
 		goto nla_put_failure;
-	if (d->flags & SKBEDIT_F_INHERITDSFIELD)
+	if (params->flags & SKBEDIT_F_INHERITDSFIELD)
 		pure_flags |= SKBEDIT_F_INHERITDSFIELD;
 	if (pure_flags != 0 &&
 	    nla_put(skb, TCA_SKBEDIT_FLAGS, sizeof(pure_flags), &pure_flags))
@@ -248,6 +266,16 @@ static int tcf_skbedit_dump(struct sk_buff *skb, struct tc_action *a,
 	return -1;
 }
 
+static void tcf_skbedit_cleanup(struct tc_action *a)
+{
+	struct tcf_skbedit *d = to_skbedit(a);
+	struct tcf_skbedit_params *params;
+
+	params = rcu_dereference_protected(d->params, 1);
+	if (params)
+		kfree_rcu(params, rcu);
+}
+
 static int tcf_skbedit_walker(struct net *net, struct sk_buff *skb,
 			      struct netlink_callback *cb, int type,
 			      const struct tc_action_ops *ops,
@@ -273,6 +301,7 @@ static struct tc_action_ops act_skbedit_ops = {
 	.act		=	tcf_skbedit,
 	.dump		=	tcf_skbedit_dump,
 	.init		=	tcf_skbedit_init,
+	.cleanup	=	tcf_skbedit_cleanup,
 	.walk		=	tcf_skbedit_walker,
 	.lookup		=	tcf_skbedit_search,
 	.size		=	sizeof(struct tcf_skbedit),
-- 
2.17.1

^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH net-next 0/2] net/sched: act_skbedit: lockless data path
  2018-07-06 10:40 [PATCH net-next 0/2] net/sched: act_skbedit: lockless data path Davide Caratti
  2018-07-06 10:40 ` [PATCH net-next 1/2] net/sched: skbedit: use per-cpu counters Davide Caratti
  2018-07-06 10:40 ` [PATCH net-next 2/2] net/sched: act_skbedit: don't use spinlock in the data path Davide Caratti
@ 2018-07-10 23:58 ` David Miller
  2 siblings, 0 replies; 4+ messages in thread
From: David Miller @ 2018-07-10 23:58 UTC (permalink / raw)
  To: dcaratti; +Cc: xiyou.wangcong, netdev

From: Davide Caratti <dcaratti@redhat.com>
Date: Fri,  6 Jul 2018 12:40:12 +0200

> the data path of act_skbedit can be faster if we avoid using spinlocks:
>  - patch 1 converts act_skbedit statistics to use per-cpu counters
>  - patch 2 lets act_skbedit use RCU to read/update its configuration 

This doesn't apply cleanly to net-next, please respin.

Thank you.

^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2018-07-10 23:59 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-07-06 10:40 [PATCH net-next 0/2] net/sched: act_skbedit: lockless data path Davide Caratti
2018-07-06 10:40 ` [PATCH net-next 1/2] net/sched: skbedit: use per-cpu counters Davide Caratti
2018-07-06 10:40 ` [PATCH net-next 2/2] net/sched: act_skbedit: don't use spinlock in the data path Davide Caratti
2018-07-10 23:58 ` [PATCH net-next 0/2] net/sched: act_skbedit: lockless " David Miller

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.