* [PATCH] net: openvswitch: introduce common code for flushing flows
@ 2020-08-11 1:10 xiangxia.m.yue
2020-08-11 17:23 ` David Miller
2020-08-12 0:47 ` Cong Wang
0 siblings, 2 replies; 3+ messages in thread
From: xiangxia.m.yue @ 2020-08-11 1:10 UTC (permalink / raw)
To: joel, jknoos, gvrose8192, urezki, paulmck; +Cc: dev, netdev, rcu, Tonghao Zhang
From: Tonghao Zhang <xiangxia.m.yue@gmail.com>
To avoid some issues, for example RCU usage warning, we should
flush the flows under ovs_lock. This patch refactors
table_instance_destroy and introduces table_instance_flow_flush
which can be invoked by __dp_destroy or ovs_flow_tbl_flush.
Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
---
net/openvswitch/datapath.c | 11 ++++++++++-
net/openvswitch/flow_table.c | 37 ++++++++++++++++--------------------
net/openvswitch/flow_table.h | 3 +++
3 files changed, 29 insertions(+), 22 deletions(-)
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 42f8cc70bb2c..5fec47e62615 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1756,6 +1756,9 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
/* Called with ovs_mutex. */
static void __dp_destroy(struct datapath *dp)
{
+ struct flow_table *table = &dp->table;
+ struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
+ struct table_instance *ti = ovsl_dereference(table->ti);
int i;
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
@@ -1774,7 +1777,13 @@ static void __dp_destroy(struct datapath *dp)
*/
ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
- /* RCU destroy the flow table */
+ /* Flush sw_flow in the tables. RCU cb only releases resource
+ * such as dp, ports and tables. That may avoid some issue
+ * (e.g RCU usage warning).
+ */
+ table_instance_flow_flush(table, ti, ufid_ti);
+
+ /* RCU destroy the ports, meters and flow tables. */
call_rcu(&dp->rcu, destroy_dp_rcu);
}
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 8c12675cbb67..513024265294 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -473,42 +473,36 @@ static void table_instance_flow_free(struct flow_table *table,
flow_mask_remove(table, flow->mask);
}
-static void table_instance_destroy(struct flow_table *table,
- struct table_instance *ti,
- struct table_instance *ufid_ti,
- bool deferred)
+/* Must be called with OVS mutex held. */
+void table_instance_flow_flush(struct flow_table *table,
+ struct table_instance *ti,
+ struct table_instance *ufid_ti)
{
int i;
- if (!ti)
- return;
-
- BUG_ON(!ufid_ti);
if (ti->keep_flows)
- goto skip_flows;
+ return;
for (i = 0; i < ti->n_buckets; i++) {
- struct sw_flow *flow;
struct hlist_head *head = &ti->buckets[i];
struct hlist_node *n;
+ struct sw_flow *flow;
hlist_for_each_entry_safe(flow, n, head,
flow_table.node[ti->node_ver]) {
table_instance_flow_free(table, ti, ufid_ti,
flow, false);
- ovs_flow_free(flow, deferred);
+ ovs_flow_free(flow, true);
}
}
+}
-skip_flows:
- if (deferred) {
- call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
- call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
- } else {
- __table_instance_destroy(ti);
- __table_instance_destroy(ufid_ti);
- }
+static void table_instance_destroy(struct table_instance *ti,
+ struct table_instance *ufid_ti)
+{
+ call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
+ call_rcu(&ufid_ti->rcu, flow_tbl_destroy_rcu_cb);
}
/* No need for locking this function is called from RCU callback or
@@ -523,7 +517,7 @@ void ovs_flow_tbl_destroy(struct flow_table *table)
call_rcu(&mc->rcu, mask_cache_rcu_cb);
call_rcu(&ma->rcu, mask_array_rcu_cb);
- table_instance_destroy(table, ti, ufid_ti, false);
+ table_instance_destroy(ti, ufid_ti);
}
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
@@ -641,7 +635,8 @@ int ovs_flow_tbl_flush(struct flow_table *flow_table)
flow_table->count = 0;
flow_table->ufid_count = 0;
- table_instance_destroy(flow_table, old_ti, old_ufid_ti, true);
+ table_instance_flow_flush(flow_table, old_ti, old_ufid_ti);
+ table_instance_destroy(old_ti, old_ufid_ti);
return 0;
err_free_ti:
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 74ce48fecba9..6e7d4ac59353 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -105,5 +105,8 @@ void ovs_flow_mask_key(struct sw_flow_key *dst, const struct sw_flow_key *src,
bool full, const struct sw_flow_mask *mask);
void ovs_flow_masks_rebalance(struct flow_table *table);
+void table_instance_flow_flush(struct flow_table *table,
+ struct table_instance *ti,
+ struct table_instance *ufid_ti);
#endif /* flow_table.h */
--
2.23.0
^ permalink raw reply related [flat|nested] 3+ messages in thread
* Re: [PATCH] net: openvswitch: introduce common code for flushing flows
2020-08-11 1:10 [PATCH] net: openvswitch: introduce common code for flushing flows xiangxia.m.yue
@ 2020-08-11 17:23 ` David Miller
2020-08-12 0:47 ` Cong Wang
1 sibling, 0 replies; 3+ messages in thread
From: David Miller @ 2020-08-11 17:23 UTC (permalink / raw)
To: xiangxia.m.yue
Cc: joel, jknoos, gvrose8192, urezki, paulmck, dev, netdev, rcu
From: xiangxia.m.yue@gmail.com
Date: Tue, 11 Aug 2020 09:10:01 +0800
> diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
> index 42f8cc70bb2c..5fec47e62615 100644
> --- a/net/openvswitch/datapath.c
> +++ b/net/openvswitch/datapath.c
> @@ -1756,6 +1756,9 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
> /* Called with ovs_mutex. */
> static void __dp_destroy(struct datapath *dp)
> {
> + struct flow_table *table = &dp->table;
> + struct table_instance *ufid_ti = ovsl_dereference(table->ufid_ti);
> + struct table_instance *ti = ovsl_dereference(table->ti);
> int i;
>
> for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
Please use reverse christmas tree ordering for local variables.
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH] net: openvswitch: introduce common code for flushing flows
2020-08-11 1:10 [PATCH] net: openvswitch: introduce common code for flushing flows xiangxia.m.yue
2020-08-11 17:23 ` David Miller
@ 2020-08-12 0:47 ` Cong Wang
1 sibling, 0 replies; 3+ messages in thread
From: Cong Wang @ 2020-08-12 0:47 UTC (permalink / raw)
To: Tonghao Zhang
Cc: Joel Fernandes, Johan Knöös, Gregory Rose,
Uladzislau Rezki (Sony),
Paul E . McKenney, dev, Linux Kernel Network Developers, rcu
On Mon, Aug 10, 2020 at 6:14 PM <xiangxia.m.yue@gmail.com> wrote:
>
> From: Tonghao Zhang <xiangxia.m.yue@gmail.com>
>
> To avoid some issues, for example RCU usage warning, we should
> flush the flows under ovs_lock. This patch refactors
> table_instance_destroy and introduces table_instance_flow_flush
> which can be invoked by __dp_destroy or ovs_flow_tbl_flush.
>
> Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
Please add a Fixes tag here, I think it is probably your memory leak fix
which introduced this issue. And a Reported-by, to give credits to bug
reporters.
Plus one minor issue below:
> -static void table_instance_destroy(struct flow_table *table,
> - struct table_instance *ti,
> - struct table_instance *ufid_ti,
> - bool deferred)
> +/* Must be called with OVS mutex held. */
> +void table_instance_flow_flush(struct flow_table *table,
> + struct table_instance *ti,
> + struct table_instance *ufid_ti)
> {
> int i;
>
> - if (!ti)
> - return;
> -
> - BUG_ON(!ufid_ti);
> if (ti->keep_flows)
> - goto skip_flows;
> + return;
>
> for (i = 0; i < ti->n_buckets; i++) {
> - struct sw_flow *flow;
> struct hlist_head *head = &ti->buckets[i];
> struct hlist_node *n;
> + struct sw_flow *flow;
This is at most a coding style change, please do not mix
coding style changes in bug fixes. You can always push coding
style changes separately when net-next is open.
Thanks.
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2020-08-12 0:48 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-08-11 1:10 [PATCH] net: openvswitch: introduce common code for flushing flows xiangxia.m.yue
2020-08-11 17:23 ` David Miller
2020-08-12 0:47 ` Cong Wang
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).