All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next 1/1] net: sched: ensure tc flower reoffload takes filter ref
@ 2019-04-02 22:53 John Hurley
  2019-04-03 15:53 ` Vlad Buslov
  2019-04-05  0:20 ` David Miller
  0 siblings, 2 replies; 3+ messages in thread
From: John Hurley @ 2019-04-02 22:53 UTC (permalink / raw)
  To: jiri, davem, xiyou.wangcong; +Cc: netdev, vladbu, oss-drivers, John Hurley

Recent changes to TC flower remove the requirement for rtnl lock when
accessing and modifying filters. Refcounts now ensure access and deletion
do not happen concurrently. However, the reoffload function which cycles
through all filters and replays them to registered hw drivers is not
protected.

Use the fl_get_next_filter() function to cycle the filters for reoffload
and ensure the ref taken by this function is put when done with each
filter.

Signed-off-by: John Hurley <john.hurley@netronome.com>
Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>
---
 net/sched/cls_flower.c | 88 ++++++++++++++++++++++++++------------------------
 1 file changed, 46 insertions(+), 42 deletions(-)

diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 0638f17..6050e3c 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1683,59 +1683,63 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
 static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
 			void *cb_priv, struct netlink_ext_ack *extack)
 {
-	struct cls_fl_head *head = fl_head_dereference(tp);
 	struct tc_cls_flower_offload cls_flower = {};
 	struct tcf_block *block = tp->chain->block;
-	struct fl_flow_mask *mask;
+	unsigned long handle = 0;
 	struct cls_fl_filter *f;
 	int err;
 
-	list_for_each_entry(mask, &head->masks, list) {
-		list_for_each_entry(f, &mask->filters, list) {
-			if (tc_skip_hw(f->flags))
-				continue;
-
-			cls_flower.rule =
-				flow_rule_alloc(tcf_exts_num_actions(&f->exts));
-			if (!cls_flower.rule)
-				return -ENOMEM;
-
-			tc_cls_common_offload_init(&cls_flower.common, tp,
-						   f->flags, extack);
-			cls_flower.command = add ?
-				TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
-			cls_flower.cookie = (unsigned long)f;
-			cls_flower.rule->match.dissector = &mask->dissector;
-			cls_flower.rule->match.mask = &mask->key;
-			cls_flower.rule->match.key = &f->mkey;
-
-			err = tc_setup_flow_action(&cls_flower.rule->action,
-						   &f->exts);
-			if (err) {
-				kfree(cls_flower.rule);
-				if (tc_skip_sw(f->flags)) {
-					NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
-					return err;
-				}
-				continue;
-			}
+	while ((f = fl_get_next_filter(tp, &handle))) {
+		if (tc_skip_hw(f->flags))
+			goto next_flow;
 
-			cls_flower.classid = f->res.classid;
+		cls_flower.rule =
+			flow_rule_alloc(tcf_exts_num_actions(&f->exts));
+		if (!cls_flower.rule) {
+			__fl_put(f);
+			return -ENOMEM;
+		}
 
-			err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
+		tc_cls_common_offload_init(&cls_flower.common, tp, f->flags,
+					   extack);
+		cls_flower.command = add ?
+			TC_CLSFLOWER_REPLACE : TC_CLSFLOWER_DESTROY;
+		cls_flower.cookie = (unsigned long)f;
+		cls_flower.rule->match.dissector = &f->mask->dissector;
+		cls_flower.rule->match.mask = &f->mask->key;
+		cls_flower.rule->match.key = &f->mkey;
+
+		err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts);
+		if (err) {
 			kfree(cls_flower.rule);
-
-			if (err) {
-				if (add && tc_skip_sw(f->flags))
-					return err;
-				continue;
+			if (tc_skip_sw(f->flags)) {
+				NL_SET_ERR_MSG_MOD(extack, "Failed to setup flow action");
+				__fl_put(f);
+				return err;
 			}
+			goto next_flow;
+		}
 
-			spin_lock(&tp->lock);
-			tc_cls_offload_cnt_update(block, &f->in_hw_count,
-						  &f->flags, add);
-			spin_unlock(&tp->lock);
+		cls_flower.classid = f->res.classid;
+
+		err = cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
+		kfree(cls_flower.rule);
+
+		if (err) {
+			if (add && tc_skip_sw(f->flags)) {
+				__fl_put(f);
+				return err;
+			}
+			goto next_flow;
 		}
+
+		spin_lock(&tp->lock);
+		tc_cls_offload_cnt_update(block, &f->in_hw_count, &f->flags,
+					  add);
+		spin_unlock(&tp->lock);
+next_flow:
+		handle++;
+		__fl_put(f);
 	}
 
 	return 0;
-- 
2.7.4


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH net-next 1/1] net: sched: ensure tc flower reoffload takes filter ref
  2019-04-02 22:53 [PATCH net-next 1/1] net: sched: ensure tc flower reoffload takes filter ref John Hurley
@ 2019-04-03 15:53 ` Vlad Buslov
  2019-04-05  0:20 ` David Miller
  1 sibling, 0 replies; 3+ messages in thread
From: Vlad Buslov @ 2019-04-03 15:53 UTC (permalink / raw)
  To: John Hurley
  Cc: Jiri Pirko, davem, xiyou.wangcong, netdev, Vlad Buslov, oss-drivers

On Wed 03 Apr 2019 at 01:53, John Hurley <john.hurley@netronome.com> wrote:
> Recent changes to TC flower remove the requirement for rtnl lock when
> accessing and modifying filters. Refcounts now ensure access and deletion
> do not happen concurrently. However, the reoffload function which cycles
> through all filters and replays them to registered hw drivers is not
> protected.
>
> Use the fl_get_next_filter() function to cycle the filters for reoffload
> and ensure the ref taken by this function is put when done with each
> filter.
>
> Signed-off-by: John Hurley <john.hurley@netronome.com>
> Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>

Hi John,

I have a very similar implementation in my next patch set that
implements unlocked hw offloads API, though I implemented helpers
fl_get_next_mask() and fl_get_next_hw_filter_on_mask() to traverse
filters with their linked list instead of performing idr lookup on each
iteration. However, I'm not sure this optimization is necessary because
offloading to hardware is supposedly much more costly than idr lookup
anyway.

Thanks for doing this and FWIW:

Reviewed-by: Vlad Buslov <vladbu@mellanox.com>

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH net-next 1/1] net: sched: ensure tc flower reoffload takes filter ref
  2019-04-02 22:53 [PATCH net-next 1/1] net: sched: ensure tc flower reoffload takes filter ref John Hurley
  2019-04-03 15:53 ` Vlad Buslov
@ 2019-04-05  0:20 ` David Miller
  1 sibling, 0 replies; 3+ messages in thread
From: David Miller @ 2019-04-05  0:20 UTC (permalink / raw)
  To: john.hurley; +Cc: jiri, xiyou.wangcong, netdev, vladbu, oss-drivers

From: John Hurley <john.hurley@netronome.com>
Date: Tue,  2 Apr 2019 23:53:20 +0100

> Recent changes to TC flower remove the requirement for rtnl lock when
> accessing and modifying filters. Refcounts now ensure access and deletion
> do not happen concurrently. However, the reoffload function which cycles
> through all filters and replays them to registered hw drivers is not
> protected.
> 
> Use the fl_get_next_filter() function to cycle the filters for reoffload
> and ensure the ref taken by this function is put when done with each
> filter.
> 
> Signed-off-by: John Hurley <john.hurley@netronome.com>
> Reviewed-by: Jakub Kicinski <jakub.kicinski@netronome.com>

Applied, thank you.

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2019-04-05  0:20 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-04-02 22:53 [PATCH net-next 1/1] net: sched: ensure tc flower reoffload takes filter ref John Hurley
2019-04-03 15:53 ` Vlad Buslov
2019-04-05  0:20 ` David Miller

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.