All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net v2] flow_offload: fix incorrect cleanup for indirect flow_blocks
@ 2020-06-11 10:03 wenxu
  2020-06-11 11:05 ` Pablo Neira Ayuso
  0 siblings, 1 reply; 3+ messages in thread
From: wenxu @ 2020-06-11 10:03 UTC (permalink / raw)
  To: netdev; +Cc: davem, pablo

From: wenxu <wenxu@ucloud.cn>

If the representor is removed, then identify the indirect
flow_blocks that need to be removed by the release callback.

Fixes: 1fac52da5942 ("net: flow_offload: consolidate indirect flow_block infrastructure")
Signed-off-by: wenxu <wenxu@ucloud.cn>
---
 drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c        | 2 +-
 drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c | 2 +-
 drivers/net/ethernet/netronome/nfp/flower/main.c    | 2 +-
 drivers/net/ethernet/netronome/nfp/flower/main.h    | 3 +--
 drivers/net/ethernet/netronome/nfp/flower/offload.c | 6 +++---
 include/net/flow_offload.h                          | 2 +-
 net/core/flow_offload.c                             | 9 ++++-----
 7 files changed, 12 insertions(+), 14 deletions(-)

diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 0eef4f5..ef7f6bc 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -2074,7 +2074,7 @@ void bnxt_shutdown_tc(struct bnxt *bp)
 		return;
 
 	flow_indr_dev_unregister(bnxt_tc_setup_indr_cb, bp,
-				 bnxt_tc_setup_indr_block_cb);
+				 bnxt_tc_setup_indr_rel);
 	rhashtable_destroy(&tc_info->flow_table);
 	rhashtable_destroy(&tc_info->l2_table);
 	rhashtable_destroy(&tc_info->decap_l2_table);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
index 80713123..a62bcf0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/tc.c
@@ -496,7 +496,7 @@ int mlx5e_rep_tc_netdevice_event_register(struct mlx5e_rep_priv *rpriv)
 void mlx5e_rep_tc_netdevice_event_unregister(struct mlx5e_rep_priv *rpriv)
 {
 	flow_indr_dev_unregister(mlx5e_rep_indr_setup_cb, rpriv,
-				 mlx5e_rep_indr_setup_tc_cb);
+				 mlx5e_rep_indr_block_unbind);
 }
 
 #if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.c b/drivers/net/ethernet/netronome/nfp/flower/main.c
index c393276..bb448c8 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.c
@@ -861,7 +861,7 @@ static void nfp_flower_clean(struct nfp_app *app)
 	flush_work(&app_priv->cmsg_work);
 
 	flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
-				 nfp_flower_setup_indr_block_cb);
+				 nfp_flower_setup_indr_tc_release);
 
 	if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
 		nfp_flower_qos_cleanup(app);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h
index 6c3dc3b..c983337 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/main.h
+++ b/drivers/net/ethernet/netronome/nfp/flower/main.h
@@ -460,8 +460,7 @@ int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
 void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb);
 int nfp_flower_indr_setup_tc_cb(struct net_device *netdev, void *cb_priv,
 				enum tc_setup_type type, void *type_data);
-int nfp_flower_setup_indr_block_cb(enum tc_setup_type type, void *type_data,
-				   void *cb_priv);
+void nfp_flower_setup_indr_tc_release(void *cb_priv);
 
 void
 __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv);
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index 695d24b9..28de905 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -1619,8 +1619,8 @@ struct nfp_flower_indr_block_cb_priv {
 	return NULL;
 }
 
-int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
-				   void *type_data, void *cb_priv)
+static int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
+					  void *type_data, void *cb_priv)
 {
 	struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
 	struct flow_cls_offload *flower = type_data;
@@ -1637,7 +1637,7 @@ int nfp_flower_setup_indr_block_cb(enum tc_setup_type type,
 	}
 }
 
-static void nfp_flower_setup_indr_tc_release(void *cb_priv)
+void nfp_flower_setup_indr_tc_release(void *cb_priv)
 {
 	struct nfp_flower_indr_block_cb_priv *priv = cb_priv;
 
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index f2c8311..3a2d6b4 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -536,7 +536,7 @@ typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
 
 int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv);
 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
-			      flow_setup_cb_t *setup_cb);
+			      void (*release)(void *cb_priv));
 int flow_indr_dev_setup_offload(struct net_device *dev,
 				enum tc_setup_type type, void *data,
 				struct flow_block_offload *bo,
diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
index 0cfc35e..40eaf64 100644
--- a/net/core/flow_offload.c
+++ b/net/core/flow_offload.c
@@ -372,14 +372,13 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
 }
 EXPORT_SYMBOL(flow_indr_dev_register);
 
-static void __flow_block_indr_cleanup(flow_setup_cb_t *setup_cb, void *cb_priv,
+static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
 				      struct list_head *cleanup_list)
 {
 	struct flow_block_cb *this, *next;
 
 	list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
-		if (this->cb == setup_cb &&
-		    this->cb_priv == cb_priv) {
+		if (this->release == release) {
 			list_move(&this->indr.list, cleanup_list);
 			return;
 		}
@@ -397,7 +396,7 @@ static void flow_block_indr_notify(struct list_head *cleanup_list)
 }
 
 void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
-			      flow_setup_cb_t *setup_cb)
+			      void (*release)(void *cb_priv))
 {
 	struct flow_indr_dev *this, *next, *indr_dev = NULL;
 	LIST_HEAD(cleanup_list);
@@ -418,7 +417,7 @@ void flow_indr_dev_unregister(flow_indr_block_bind_cb_t *cb, void *cb_priv,
 		return;
 	}
 
-	__flow_block_indr_cleanup(setup_cb, cb_priv, &cleanup_list);
+	__flow_block_indr_cleanup(release, &cleanup_list);
 	mutex_unlock(&flow_indr_block_lock);
 
 	flow_block_indr_notify(&cleanup_list);
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH net v2] flow_offload: fix incorrect cleanup for indirect flow_blocks
  2020-06-11 10:03 [PATCH net v2] flow_offload: fix incorrect cleanup for indirect flow_blocks wenxu
@ 2020-06-11 11:05 ` Pablo Neira Ayuso
  2020-06-11 16:36   ` wenxu
  0 siblings, 1 reply; 3+ messages in thread
From: Pablo Neira Ayuso @ 2020-06-11 11:05 UTC (permalink / raw)
  To: wenxu; +Cc: netdev, davem

On Thu, Jun 11, 2020 at 06:03:17PM +0800, wenxu@ucloud.cn wrote:
[...]
> diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
> index 0cfc35e..40eaf64 100644
> --- a/net/core/flow_offload.c
> +++ b/net/core/flow_offload.c
> @@ -372,14 +372,13 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
>  }
>  EXPORT_SYMBOL(flow_indr_dev_register);
>  
> -static void __flow_block_indr_cleanup(flow_setup_cb_t *setup_cb, void *cb_priv,
> +static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
>  				      struct list_head *cleanup_list)
>  {
>  	struct flow_block_cb *this, *next;
>  
>  	list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
> -		if (this->cb == setup_cb &&
> -		    this->cb_priv == cb_priv) {
> +		if (this->release == release) {

Are you sure this is correct?

This will remove _all_ existing representors in this driver.

This will not work if only one representor is gone?

Please, describe what scenario you are trying to fix.

Thank you.

^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH net v2] flow_offload: fix incorrect cleanup for indirect flow_blocks
  2020-06-11 11:05 ` Pablo Neira Ayuso
@ 2020-06-11 16:36   ` wenxu
  0 siblings, 0 replies; 3+ messages in thread
From: wenxu @ 2020-06-11 16:36 UTC (permalink / raw)
  To: Pablo Neira Ayuso; +Cc: netdev, davem


在 2020/6/11 19:05, Pablo Neira Ayuso 写道:
> On Thu, Jun 11, 2020 at 06:03:17PM +0800, wenxu@ucloud.cn wrote:
> [...]
>> diff --git a/net/core/flow_offload.c b/net/core/flow_offload.c
>> index 0cfc35e..40eaf64 100644
>> --- a/net/core/flow_offload.c
>> +++ b/net/core/flow_offload.c
>> @@ -372,14 +372,13 @@ int flow_indr_dev_register(flow_indr_block_bind_cb_t *cb, void *cb_priv)
>>   }
>>   EXPORT_SYMBOL(flow_indr_dev_register);
>>   
>> -static void __flow_block_indr_cleanup(flow_setup_cb_t *setup_cb, void *cb_priv,
>> +static void __flow_block_indr_cleanup(void (*release)(void *cb_priv),
>>   				      struct list_head *cleanup_list)
>>   {
>>   	struct flow_block_cb *this, *next;
>>   
>>   	list_for_each_entry_safe(this, next, &flow_block_indr_list, indr.list) {
>> -		if (this->cb == setup_cb &&
>> -		    this->cb_priv == cb_priv) {
>> +		if (this->release == release) {
> Are you sure this is correct?
>
> This will remove _all_ existing representors in this driver.
>
> This will not work if only one representor is gone?
>
> Please, describe what scenario you are trying to fix.
>
> Thank you.

Yes you are right. But there still an another problem.

The match statements this->cb_priv == cb_priv always return false

the flow_block_cb->cb_priv is totally differnent data from 
flow_indr_dev->cb_priv

in the dirvers.

>

^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2020-06-11 16:38 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-11 10:03 [PATCH net v2] flow_offload: fix incorrect cleanup for indirect flow_blocks wenxu
2020-06-11 11:05 ` Pablo Neira Ayuso
2020-06-11 16:36   ` wenxu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.