All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next] net: sched: gred: dynamically allocate tc_gred_qopt_offload
@ 2021-10-19 15:37 Arnd Bergmann
  2021-10-19 15:57 ` Jakub Kicinski
  0 siblings, 1 reply; 2+ messages in thread
From: Arnd Bergmann @ 2021-10-19 15:37 UTC (permalink / raw)
  To: Jamal Hadi Salim, Cong Wang, Jiri Pirko, David S. Miller,
	Jakub Kicinski, Ahmed S. Darwish, Sebastian Andrzej Siewior
  Cc: Arnd Bergmann, Zheng Yongjun, Eric Dumazet, netdev, linux-kernel

From: Arnd Bergmann <arnd@arndb.de>

The tc_gred_qopt_offload structure has grown too big to be on the
stack for 32-bit architectures after recent changes.

net/sched/sch_gred.c:903:13: error: stack frame size (1180) exceeds limit (1024) in 'gred_destroy' [-Werror,-Wframe-larger-than]
net/sched/sch_gred.c:310:13: error: stack frame size (1212) exceeds limit (1024) in 'gred_offload' [-Werror,-Wframe-larger-than]

Use dynamic allocation to avoid this. Unfortunately, this introduces
a new problem in gred_destroy(), which cannot recover from a failure
to allocate memory, and that may be worse than the potential
stack overflow risk.

Not sure what a better approach might be.

Fixes: 50dc9a8572aa ("net: sched: Merge Qdisc::bstats and Qdisc::cpu_bstats data types")
Fixes: 67c9e6270f30 ("net: sched: Protect Qdisc::bstats with u64_stats")
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
---
 net/sched/sch_gred.c | 64 +++++++++++++++++++++++++-------------------
 1 file changed, 36 insertions(+), 28 deletions(-)

diff --git a/net/sched/sch_gred.c b/net/sched/sch_gred.c
index 72de08ef8335..59c55c6cf3ea 100644
--- a/net/sched/sch_gred.c
+++ b/net/sched/sch_gred.c
@@ -307,46 +307,53 @@ static void gred_reset(struct Qdisc *sch)
 	}
 }
 
-static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
+static int gred_offload(struct Qdisc *sch, enum tc_gred_command command)
 {
 	struct gred_sched *table = qdisc_priv(sch);
 	struct net_device *dev = qdisc_dev(sch);
-	struct tc_gred_qopt_offload opt = {
-		.command	= command,
-		.handle		= sch->handle,
-		.parent		= sch->parent,
-	};
+	struct tc_gred_qopt_offload *opt;
 
 	if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
-		return;
+		return -ENXIO;
+
+	opt = kzalloc(sizeof(*opt), GFP_KERNEL);
+	if (!opt)
+		return -ENOMEM;
+
+	opt->command = command;
+	opt->handle = sch->handle;
+	opt->parent = sch->parent;
 
 	if (command == TC_GRED_REPLACE) {
 		unsigned int i;
 
-		opt.set.grio_on = gred_rio_mode(table);
-		opt.set.wred_on = gred_wred_mode(table);
-		opt.set.dp_cnt = table->DPs;
-		opt.set.dp_def = table->def;
+		opt->set.grio_on = gred_rio_mode(table);
+		opt->set.wred_on = gred_wred_mode(table);
+		opt->set.dp_cnt = table->DPs;
+		opt->set.dp_def = table->def;
 
 		for (i = 0; i < table->DPs; i++) {
 			struct gred_sched_data *q = table->tab[i];
 
 			if (!q)
 				continue;
-			opt.set.tab[i].present = true;
-			opt.set.tab[i].limit = q->limit;
-			opt.set.tab[i].prio = q->prio;
-			opt.set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
-			opt.set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
-			opt.set.tab[i].is_ecn = gred_use_ecn(q);
-			opt.set.tab[i].is_harddrop = gred_use_harddrop(q);
-			opt.set.tab[i].probability = q->parms.max_P;
-			opt.set.tab[i].backlog = &q->backlog;
+			opt->set.tab[i].present = true;
+			opt->set.tab[i].limit = q->limit;
+			opt->set.tab[i].prio = q->prio;
+			opt->set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
+			opt->set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
+			opt->set.tab[i].is_ecn = gred_use_ecn(q);
+			opt->set.tab[i].is_harddrop = gred_use_harddrop(q);
+			opt->set.tab[i].probability = q->parms.max_P;
+			opt->set.tab[i].backlog = &q->backlog;
 		}
-		opt.set.qstats = &sch->qstats;
+		opt->set.qstats = &sch->qstats;
 	}
 
-	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, &opt);
+	dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, opt);
+	kfree(opt);
+
+	return 0;
 }
 
 static int gred_offload_dump_stats(struct Qdisc *sch)
@@ -470,8 +477,7 @@ static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
 		}
 	}
 
-	gred_offload(sch, TC_GRED_REPLACE);
-	return 0;
+	return gred_offload(sch, TC_GRED_REPLACE);
 }
 
 static inline int gred_change_vq(struct Qdisc *sch, int dp,
@@ -719,8 +725,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
 	sch_tree_unlock(sch);
 	kfree(prealloc);
 
-	gred_offload(sch, TC_GRED_REPLACE);
-	return 0;
+	return gred_offload(sch, TC_GRED_REPLACE);
 
 err_unlock_free:
 	sch_tree_unlock(sch);
@@ -903,13 +908,16 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
 static void gred_destroy(struct Qdisc *sch)
 {
 	struct gred_sched *table = qdisc_priv(sch);
-	int i;
+	int i, ret;
 
 	for (i = 0; i < table->DPs; i++) {
 		if (table->tab[i])
 			gred_destroy_vq(table->tab[i]);
 	}
-	gred_offload(sch, TC_GRED_DESTROY);
+	ret = gred_offload(sch, TC_GRED_DESTROY);
+
+	WARN(ret, "%s: failed to disable offload: %pe\n",
+	     __func__, ERR_PTR(ret));
 }
 
 static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
-- 
2.29.2


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH net-next] net: sched: gred: dynamically allocate tc_gred_qopt_offload
  2021-10-19 15:37 [PATCH net-next] net: sched: gred: dynamically allocate tc_gred_qopt_offload Arnd Bergmann
@ 2021-10-19 15:57 ` Jakub Kicinski
  0 siblings, 0 replies; 2+ messages in thread
From: Jakub Kicinski @ 2021-10-19 15:57 UTC (permalink / raw)
  To: Arnd Bergmann
  Cc: Jamal Hadi Salim, Cong Wang, Jiri Pirko, David S. Miller,
	Ahmed S. Darwish, Sebastian Andrzej Siewior, Arnd Bergmann,
	Zheng Yongjun, Eric Dumazet, netdev, linux-kernel

On Tue, 19 Oct 2021 17:37:27 +0200 Arnd Bergmann wrote:
> @@ -470,8 +477,7 @@ static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
>  		}
>  	}
>  
> -	gred_offload(sch, TC_GRED_REPLACE);
> -	return 0;
> +	return gred_offload(sch, TC_GRED_REPLACE);
>  }
>  
>  static inline int gred_change_vq(struct Qdisc *sch, int dp,
> @@ -719,8 +725,7 @@ static int gred_change(struct Qdisc *sch, struct nlattr *opt,
>  	sch_tree_unlock(sch);
>  	kfree(prealloc);
>  
> -	gred_offload(sch, TC_GRED_REPLACE);
> -	return 0;
> +	return gred_offload(sch, TC_GRED_REPLACE);

Now we can return an error even tho the SW path has changed.
Qdisc offloads should not affect the SW changes AFAIR.

If we need to alloc dynamically let's allocate the buffer at init and
keep it in struct gred_sched. The offload calls are all under RTNL lock
so in fact we could even use static data, but let's do it right and
have a buffer per qdisc.

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2021-10-19 15:57 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-10-19 15:37 [PATCH net-next] net: sched: gred: dynamically allocate tc_gred_qopt_offload Arnd Bergmann
2021-10-19 15:57 ` Jakub Kicinski

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.