netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: John Hurley <john.hurley@netronome.com>
To: vladbu@mellanox.com
Cc: jiri@mellanox.com, netdev@vger.kernel.org,
	simon.horman@netronome.com, jakub.kicinski@netronome.com,
	oss-drivers@netronome.com,
	John Hurley <john.hurley@netronome.com>
Subject: [RFC net-next 2/2] net: sched: fix tp destroy race conditions in flower
Date: Thu,  3 Oct 2019 00:14:32 +0100	[thread overview]
Message-ID: <1570058072-12004-3-git-send-email-john.hurley@netronome.com> (raw)
In-Reply-To: <1570058072-12004-1-git-send-email-john.hurley@netronome.com>

Flower has rule HW offload functions available that drivers can choose to
register for. For the deletion case, these are triggered after filters
have been removed from lookup tables both at the flower level, and the
higher cls_api level. With flower running without RTNL locking, this can
lead to races where HW offload messages get out of order.

Ensure HW offloads stay in line with the kernel tables by triggering
the sending of messages before the kernel processing is completed. For
destroyed tcf_protos, do this at the new pre_destroy hook. Similarly, if
a filter is being added, check that it is not concurrently being deleted
before offloading to hw, rather than the current approach of offloading,
then checking and reversing the offload if required.

Fixes: 1d965c4def07 ("Refactor flower classifier to remove dependency on rtnl lock")
Fixes: 272ffaadeb3e ("net: sched: flower: handle concurrent tcf proto deletion")
Signed-off-by: John Hurley <john.hurley@netronome.com>
Reported-by: Louis Peens <louis.peens@netronome.com>
---
 net/sched/cls_flower.c | 55 +++++++++++++++++++++++++++-----------------------
 1 file changed, 30 insertions(+), 25 deletions(-)

diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 74221e3..3ac47b5 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -513,13 +513,16 @@ static struct cls_fl_filter *__fl_get(struct cls_fl_head *head, u32 handle)
 }
 
 static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
-		       bool *last, bool rtnl_held,
+		       bool *last, bool rtnl_held, bool do_hw,
 		       struct netlink_ext_ack *extack)
 {
 	struct cls_fl_head *head = fl_head_dereference(tp);
 
 	*last = false;
 
+	if (do_hw && !tc_skip_hw(f->flags))
+		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
+
 	spin_lock(&tp->lock);
 	if (f->deleted) {
 		spin_unlock(&tp->lock);
@@ -534,8 +537,6 @@ static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f,
 	spin_unlock(&tp->lock);
 
 	*last = fl_mask_put(head, f->mask);
-	if (!tc_skip_hw(f->flags))
-		fl_hw_destroy_filter(tp, f, rtnl_held, extack);
 	tcf_unbind_filter(tp, &f->res);
 	__fl_put(f);
 
@@ -563,7 +564,7 @@ static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
 
 	list_for_each_entry_safe(mask, next_mask, &head->masks, list) {
 		list_for_each_entry_safe(f, next, &mask->filters, list) {
-			__fl_delete(tp, f, &last, rtnl_held, extack);
+			__fl_delete(tp, f, &last, rtnl_held, false, extack);
 			if (last)
 				break;
 		}
@@ -574,6 +575,19 @@ static void fl_destroy(struct tcf_proto *tp, bool rtnl_held,
 	tcf_queue_work(&head->rwork, fl_destroy_sleepable);
 }
 
+static void fl_pre_destroy(struct tcf_proto *tp, bool rtnl_held,
+			   struct netlink_ext_ack *extack)
+{
+	struct cls_fl_head *head = fl_head_dereference(tp);
+	struct fl_flow_mask *mask, *next_mask;
+	struct cls_fl_filter *f, *next;
+
+	list_for_each_entry_safe(mask, next_mask, &head->masks, list)
+		list_for_each_entry_safe(f, next, &mask->filters, list)
+			if (!tc_skip_hw(f->flags))
+				fl_hw_destroy_filter(tp, f, rtnl_held, extack);
+}
+
 static void fl_put(struct tcf_proto *tp, void *arg)
 {
 	struct cls_fl_filter *f = arg;
@@ -1588,6 +1602,13 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 	if (err)
 		goto errout_mask;
 
+	spin_lock(&tp->lock);
+	if (tp->deleting || (fold && fold->deleted)) {
+		err = -EAGAIN;
+		goto errout_lock;
+	}
+	spin_unlock(&tp->lock);
+
 	if (!tc_skip_hw(fnew->flags)) {
 		err = fl_hw_replace_filter(tp, fnew, rtnl_held, extack);
 		if (err)
@@ -1598,22 +1619,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 		fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW;
 
 	spin_lock(&tp->lock);
-
-	/* tp was deleted concurrently. -EAGAIN will cause caller to lookup
-	 * proto again or create new one, if necessary.
-	 */
-	if (tp->deleting) {
-		err = -EAGAIN;
-		goto errout_hw;
-	}
-
 	if (fold) {
-		/* Fold filter was deleted concurrently. Retry lookup. */
-		if (fold->deleted) {
-			err = -EAGAIN;
-			goto errout_hw;
-		}
-
 		fnew->handle = handle;
 
 		if (!in_ht) {
@@ -1624,7 +1630,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 						     &fnew->ht_node,
 						     params);
 			if (err)
-				goto errout_hw;
+				goto errout_lock;
 			in_ht = true;
 		}
 
@@ -1667,7 +1673,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 					    INT_MAX, GFP_ATOMIC);
 		}
 		if (err)
-			goto errout_hw;
+			goto errout_lock;
 
 		refcount_inc(&fnew->refcnt);
 		fnew->handle = handle;
@@ -1683,11 +1689,9 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
 
 errout_ht:
 	spin_lock(&tp->lock);
-errout_hw:
+errout_lock:
 	fnew->deleted = true;
 	spin_unlock(&tp->lock);
-	if (!tc_skip_hw(fnew->flags))
-		fl_hw_destroy_filter(tp, fnew, rtnl_held, NULL);
 	if (in_ht)
 		rhashtable_remove_fast(&fnew->mask->ht, &fnew->ht_node,
 				       fnew->mask->filter_ht_params);
@@ -1713,7 +1717,7 @@ static int fl_delete(struct tcf_proto *tp, void *arg, bool *last,
 	bool last_on_mask;
 	int err = 0;
 
-	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, extack);
+	err = __fl_delete(tp, f, &last_on_mask, rtnl_held, true, extack);
 	*last = list_empty(&head->masks);
 	__fl_put(f);
 
@@ -2509,6 +2513,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
 	.kind		= "flower",
 	.classify	= fl_classify,
 	.init		= fl_init,
+	.pre_destroy	= fl_pre_destroy,
 	.destroy	= fl_destroy,
 	.get		= fl_get,
 	.put		= fl_put,
-- 
2.7.4


  parent reply	other threads:[~2019-10-02 23:15 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-10-02 23:14 [RFC net-next 0/2] prevent sync issues with hw offload of flower John Hurley
2019-10-02 23:14 ` [RFC net-next 1/2] net: sched: add tp_op for pre_destroy John Hurley
2019-10-02 23:14 ` John Hurley [this message]
2019-10-03 16:18   ` [RFC net-next 2/2] net: sched: fix tp destroy race conditions in flower Vlad Buslov
2019-10-03 16:39     ` John Hurley
2019-10-03 16:26 ` [RFC net-next 0/2] prevent sync issues with hw offload of flower Vlad Buslov
2019-10-03 16:59   ` John Hurley
2019-10-03 17:19     ` Vlad Buslov
2019-10-04 15:39       ` John Hurley
2019-10-04 15:58         ` Vlad Buslov
2019-10-04 16:06           ` John Hurley

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1570058072-12004-3-git-send-email-john.hurley@netronome.com \
    --to=john.hurley@netronome.com \
    --cc=jakub.kicinski@netronome.com \
    --cc=jiri@mellanox.com \
    --cc=netdev@vger.kernel.org \
    --cc=oss-drivers@netronome.com \
    --cc=simon.horman@netronome.com \
    --cc=vladbu@mellanox.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).