From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-9.0 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY,SPF_PASS,UNPARSEABLE_RELAY, USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 7C409C43381 for ; Thu, 14 Feb 2019 07:47:45 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 5276E222B6 for ; Thu, 14 Feb 2019 07:47:45 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S2437375AbfBNHro (ORCPT ); Thu, 14 Feb 2019 02:47:44 -0500 Received: from mail-il-dmz.mellanox.com ([193.47.165.129]:38567 "EHLO mellanox.co.il" rhost-flags-OK-OK-OK-FAIL) by vger.kernel.org with ESMTP id S2391717AbfBNHrn (ORCPT ); Thu, 14 Feb 2019 02:47:43 -0500 Received: from Internal Mail-Server by MTLPINE1 (envelope-from vladbu@mellanox.com) with ESMTPS (AES256-SHA encrypted); 14 Feb 2019 09:47:38 +0200 Received: from reg-r-vrt-018-180.mtr.labs.mlnx. (reg-r-vrt-018-180.mtr.labs.mlnx [10.213.18.180]) by labmailer.mlnx (8.13.8/8.13.8) with ESMTP id x1E7lbe8014807; Thu, 14 Feb 2019 09:47:38 +0200 From: Vlad Buslov To: netdev@vger.kernel.org Cc: jhs@mojatatu.com, xiyou.wangcong@gmail.com, jiri@resnulli.us, davem@davemloft.net, Vlad Buslov Subject: [PATCH net-next 10/12] net: sched: flower: protect flower classifier state with spinlock Date: Thu, 14 Feb 2019 09:47:10 +0200 Message-Id: <20190214074712.17846-11-vladbu@mellanox.com> X-Mailer: git-send-email 2.13.6 In-Reply-To: <20190214074712.17846-1-vladbu@mellanox.com> References: <20190214074712.17846-1-vladbu@mellanox.com> Sender: netdev-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: netdev@vger.kernel.org struct tcf_proto was extended with spinlock to be used by classifiers instead of global rtnl lock. Use it to protect shared flower classifier data structures (handle_idr, mask hashtable and list) and fields of individual filters that can be accessed concurrently. This patch set uses tcf_proto->lock as per instance lock that protects all filters on tcf_proto. Signed-off-by: Vlad Buslov Acked-by: Jiri Pirko --- net/sched/cls_flower.c | 36 ++++++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index bfef7d6c597d..556f7a1c694a 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -384,7 +384,9 @@ static void fl_hw_destroy_filter(struct tcf_proto *tp, struct cls_fl_filter *f, cls_flower.cookie = (unsigned long) f; tc_setup_cb_call(block, TC_SETUP_CLSFLOWER, &cls_flower, false); + spin_lock(&tp->lock); tcf_block_offload_dec(block, &f->flags); + spin_unlock(&tp->lock); } static int fl_hw_replace_filter(struct tcf_proto *tp, @@ -422,7 +424,9 @@ static int fl_hw_replace_filter(struct tcf_proto *tp, return err; } else if (err > 0) { f->in_hw_count = err; + spin_lock(&tp->lock); tcf_block_offload_inc(block, &f->flags); + spin_unlock(&tp->lock); } if (skip_sw && !(f->flags & TCA_CLS_FLAGS_IN_HW)) @@ -510,18 +514,22 @@ static int __fl_delete(struct tcf_proto *tp, struct cls_fl_filter *f, (*last) = false; + spin_lock(&tp->lock); if (!f->deleted) { f->deleted = true; rhashtable_remove_fast(&f->mask->ht, &f->ht_node, f->mask->filter_ht_params); idr_remove(&head->handle_idr, f->handle); list_del_rcu(&f->list); + spin_unlock(&tp->lock); + (*last) = fl_mask_put(head, f->mask, async); if (!tc_skip_hw(f->flags)) fl_hw_destroy_filter(tp, f, extack); tcf_unbind_filter(tp, &f->res); __fl_put(f); } else { + spin_unlock(&tp->lock); err = -ENOENT; } @@ -1497,6 +1505,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, if (!tc_in_hw(fnew->flags)) fnew->flags |= TCA_CLS_FLAGS_NOT_IN_HW; + spin_lock(&tp->lock); + /* tp was deleted concurrently. EAGAIN will cause caller to lookup proto * again or create new one, if necessary. */ @@ -1527,6 +1537,8 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, list_replace_rcu(&fold->list, &fnew->list); fold->deleted = true; + spin_unlock(&tp->lock); + fl_mask_put(head, fold->mask, true); if (!tc_skip_hw(fold->flags)) fl_hw_destroy_filter(tp, fold, NULL); @@ -1571,6 +1583,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, goto errout_idr; list_add_tail_rcu(&fnew->list, &fnew->mask->filters); + spin_unlock(&tp->lock); } *arg = fnew; @@ -1583,6 +1596,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb, if (!fold) idr_remove(&head->handle_idr, fnew->handle); errout_hw: + spin_unlock(&tp->lock); if (!tc_skip_hw(fnew->flags)) fl_hw_destroy_filter(tp, fnew, NULL); errout_mask: @@ -1681,8 +1695,10 @@ static int fl_reoffload(struct tcf_proto *tp, bool add, tc_setup_cb_t *cb, continue; } + spin_lock(&tp->lock); tc_cls_offload_cnt_update(block, &f->in_hw_count, &f->flags, add); + spin_unlock(&tp->lock); } } @@ -2216,6 +2232,7 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, struct cls_fl_filter *f = fh; struct nlattr *nest; struct fl_flow_key *key, *mask; + bool skip_hw; if (!f) return skb->len; @@ -2226,21 +2243,26 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, if (!nest) goto nla_put_failure; + spin_lock(&tp->lock); + if (f->res.classid && nla_put_u32(skb, TCA_FLOWER_CLASSID, f->res.classid)) - goto nla_put_failure; + goto nla_put_failure_locked; key = &f->key; mask = &f->mask->key; + skip_hw = tc_skip_hw(f->flags); if (fl_dump_key(skb, net, key, mask)) - goto nla_put_failure; - - if (!tc_skip_hw(f->flags)) - fl_hw_update_stats(tp, f); + goto nla_put_failure_locked; if (f->flags && nla_put_u32(skb, TCA_FLOWER_FLAGS, f->flags)) - goto nla_put_failure; + goto nla_put_failure_locked; + + spin_unlock(&tp->lock); + + if (!skip_hw) + fl_hw_update_stats(tp, f); if (nla_put_u32(skb, TCA_FLOWER_IN_HW_COUNT, f->in_hw_count)) goto nla_put_failure; @@ -2255,6 +2277,8 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, void *fh, return skb->len; +nla_put_failure_locked: + spin_unlock(&tp->lock); nla_put_failure: nla_nest_cancel(skb, nest); return -1; -- 2.13.6