From: Vlad Buslov <vladbu@mellanox.com>
To: netdev@vger.kernel.org
Cc: jhs@mojatatu.com, xiyou.wangcong@gmail.com, jiri@resnulli.us,
davem@davemloft.net, Vlad Buslov <vladbu@mellanox.com>
Subject: [PATCH net-next 10/12] net: sched: flower: protect flower classifier state with spinlock
Date: Thu, 14 Feb 2019 09:47:10 +0200 [thread overview]
Message-ID: <20190214074712.17846-11-vladbu@mellanox.com> (raw)
In-Reply-To: <20190214074712.17846-1-vladbu@mellanox.com>
struct tcf_proto was extended with spinlock to be used by classifiers
instead of global rtnl lock. Use it to protect shared flower classifier
data structures (handle_idr, mask hashtable and list) and fields of
individual filters that can be accessed concurrently. This patch set uses
tcf_proto->lock as per instance lock that protects all filters on
tcf_proto.
Signed-off-by: Vlad Buslov <vladbu@mellanox.com>
Acked-by: Jiri Pirko <jiri@mellanox.com>
---
net/sched/cls_flower.c | 36 ++++++++++++++++++++++++++++++------
1 file changed, 30 insertions(+), 6 deletions(-)
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index bfef7d6c597d..556f7a1c694a 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -384,7 +384,9 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f,
cls_flower.cookie = (unsigned long) f;
tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false);
+ spin_lock(&tp->lock);
tcf_block_offload_dec(block, &f->flags);
+ spin_unlock(&tp->lock);
}
static int fl_hw_replace_filter(struct tcf_proto *tp,
@@ -422,7 +424,9 @@ static int fl_hw_replace_filter(struct tcf_proto *tp,
return err;
} else if (err > 0) {
f->in_hw_count = err;
+ spin_lock(&tp->lock);
tcf_block_offload_inc(block, &f->flags);
+ spin_unlock(&tp->lock);
}
if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW))
@@ -510,18 +514,22 @@ static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
(*last) = false;
+ spin_lock(&tp->lock);
if (!f->deleted) {
f->deleted = true;
rhashtable_remove_fast(&f->mask->ht, &f->ht_node,
f->mask->filter_ht_params);
idr_remove(&head->handle_idr, f->handle);
list_del_rcu(&f->list);
+ spin_unlock(&tp->lock);
+
(*last) = fl_mask_put(head, f->mask, async);
if (!tc_skip_hw(f->flags))
fl_hw_destroy_filter(tp, f, extack);
tcf_unbind_filter(tp, &f->res);
__fl_put(f);
} else {
+ spin_unlock(&tp->lock);
err = -ENOENT;
}
@@ -1497,6 +1505,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
if (!tc_in_hw(fnew->flags))
fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
+ spin_lock(&tp->lock);
+
/* tp was deleted concurrently. EAGAIN will cause caller to lookup proto
* again or create new one, if necessary.
*/
@@ -1527,6 +1537,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
list_replace_rcu(&fold->list, &fnew->list);
fold->deleted = true;
+ spin_unlock(&tp->lock);
+
fl_mask_put(head, fold->mask, true);
if (!tc_skip_hw(fold->flags))
fl_hw_destroy_filter(tp, fold, NULL);
@@ -1571,6 +1583,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
goto errout_idr;
list_add_tail_rcu(&fnew->list, &fnew->mask->filters);
+ spin_unlock(&tp->lock);
}
*arg = fnew;
@@ -1583,6 +1596,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
if (!fold)
idr_remove(&head->handle_idr, fnew->handle);
errout_hw:
+ spin_unlock(&tp->lock);
if (!tc_skip_hw(fnew->flags))
fl_hw_destroy_filter(tp, fnew, NULL);
errout_mask:
@@ -1681,8 +1695,10 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb,
continue;
}
+ spin_lock(&tp->lock);
tc_cls_offload_cnt_update(block, &f->in_hw_count,
&f->flags, add);
+ spin_unlock(&tp->lock);
}
}
@@ -2216,6 +2232,7 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
struct cls_fl_filter *f = fh;
struct nlattr *nest;
struct fl_flow_key *key, *mask;
+ bool skip_hw;
if (!f)
return skb->len;
@@ -2226,21 +2243,26 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
if (!nest)
goto nla_put_failure;
+ spin_lock(&tp->lock);
+
if (f->res.classid &&
nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid))
- goto nla_put_failure;
+ goto nla_put_failure_locked;
key = &f->key;
mask = &f->mask->key;
+ skip_hw = tc_skip_hw(f->flags);
if (fl_dump_key(skb, net, key, mask))
- goto nla_put_failure;
-
- if (!tc_skip_hw(f->flags))
- fl_hw_update_stats(tp, f);
+ goto nla_put_failure_locked;
if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags))
- goto nla_put_failure;
+ goto nla_put_failure_locked;
+
+ spin_unlock(&tp->lock);
+
+ if (!skip_hw)
+ fl_hw_update_stats(tp, f);
if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count))
goto nla_put_failure;
@@ -2255,6 +2277,8 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh,
return skb->len;
+nla_put_failure_locked:
+ spin_unlock(&tp->lock);
nla_put_failure:
nla_nest_cancel(skb, nest);
return -1;
--
2.13.6
next prev parent reply other threads:[~2019-02-14 7:47 UTC|newest]
Thread overview: 44+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-02-14 7:47 [PATCH net-next 00/12] Refactor flower classifier to remove dependency on rtnl lock Vlad Buslov
2019-02-14 7:47 ` [PATCH net-next 01/12] net: sched: flower: don't check for rtnl on head dereference Vlad Buslov
2019-02-18 19:08 ` Cong Wang
2019-02-19 9:45 ` Vlad Buslov
2019-02-20 22:33 ` Cong Wang
2019-02-21 17:45 ` Vlad Buslov
2019-02-22 19:32 ` Cong Wang
2019-02-25 16:11 ` Vlad Buslov
2019-02-25 22:39 ` Cong Wang
2019-02-26 14:57 ` Vlad Buslov
2019-02-28 0:49 ` Cong Wang
2019-02-28 18:35 ` Vlad Buslov
2019-03-02 0:51 ` Cong Wang
2019-02-14 7:47 ` [PATCH net-next 02/12] net: sched: flower: refactor fl_change Vlad Buslov
2019-02-14 20:34 ` Stefano Brivio
2019-02-15 10:38 ` Vlad Buslov
2019-02-15 10:47 ` Stefano Brivio
2019-02-15 16:25 ` Vlad Buslov
2019-02-18 18:20 ` Stefano Brivio
2019-02-14 7:47 ` [PATCH net-next 03/12] net: sched: flower: introduce reference counting for filters Vlad Buslov
2019-02-14 20:34 ` Stefano Brivio
2019-02-15 11:22 ` Vlad Buslov
2019-02-15 12:32 ` Stefano Brivio
2019-02-14 7:47 ` [PATCH net-next 04/12] net: sched: flower: track filter deletion with flag Vlad Buslov
2019-02-14 20:49 ` Stefano Brivio
2019-02-15 15:54 ` Vlad Buslov
2019-02-14 7:47 ` [PATCH net-next 05/12] net: sched: flower: add reference counter to flower mask Vlad Buslov
2019-02-14 7:47 ` [PATCH net-next 06/12] net: sched: flower: handle concurrent mask insertion Vlad Buslov
2019-02-15 22:46 ` Stefano Brivio
2019-02-14 7:47 ` [PATCH net-next 07/12] net: sched: flower: protect masks list with spinlock Vlad Buslov
2019-02-14 7:47 ` [PATCH net-next 08/12] net: sched: flower: handle concurrent filter insertion in fl_change Vlad Buslov
2019-02-14 7:47 ` [PATCH net-next 09/12] net: sched: flower: handle concurrent tcf proto deletion Vlad Buslov
2019-02-18 20:47 ` Cong Wang
2019-02-19 14:08 ` Vlad Buslov
2019-02-14 7:47 ` Vlad Buslov [this message]
2019-02-14 7:47 ` [PATCH net-next 11/12] net: sched: flower: track rtnl lock state Vlad Buslov
2019-02-15 22:46 ` Stefano Brivio
2019-02-18 9:35 ` Vlad Buslov
2019-02-14 7:47 ` [PATCH net-next 12/12] net: sched: flower: set unlocked flag for flower proto ops Vlad Buslov
2019-02-18 19:27 ` Cong Wang
2019-02-19 10:15 ` Vlad Buslov
2019-02-20 22:36 ` Cong Wang
2019-02-18 19:15 ` [PATCH net-next 00/12] Refactor flower classifier to remove dependency on rtnl lock Cong Wang
2019-02-19 10:00 ` Vlad Buslov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20190214074712.17846-11-vladbu@mellanox.com \
--to=vladbu@mellanox.com \
--cc=davem@davemloft.net \
--cc=jhs@mojatatu.com \
--cc=jiri@resnulli.us \
--cc=netdev@vger.kernel.org \
--cc=xiyou.wangcong@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).