All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] net: sched: Add support for packet bursting.
@ 2021-06-25 12:03 Niclas Hedam
  2021-06-25 20:54   ` kernel test robot
                   ` (2 more replies)
  0 siblings, 3 replies; 7+ messages in thread
From: Niclas Hedam @ 2021-06-25 12:03 UTC (permalink / raw)
  To: stephen; +Cc: netdev

Hello,

This patch implements packet bursting in the NetEm scheduler.
This allows system administrators to hold back outgoing
packets and release them at a multiple of a time quantum.
This feature can be used to prevent timing attacks caused
by network latency.

I'm currently publishing a paper on this, which is currently not
publicly available, but the idea is based on Predictive Black-Box
Mitigation of Timing Channels
(https://dl.acm.org/doi/pdf/10.1145/1866307.1866341).

Signed-off-by: Niclas Hedam <niclas@hed.am>
---
 include/uapi/linux/pkt_sched.h |  2 ++
 net/sched/sch_netem.c          | 24 +++++++++++++++++++++---
 2 files changed, 23 insertions(+), 3 deletions(-)

diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 79a699f106b1..826d1dee6601 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -594,6 +594,7 @@ enum {
 	TCA_NETEM_DELAY_DIST,
 	TCA_NETEM_REORDER,
 	TCA_NETEM_CORRUPT,
+	TCA_NETEM_BURSTING,
 	TCA_NETEM_LOSS,
 	TCA_NETEM_RATE,
 	TCA_NETEM_ECN,
@@ -615,6 +616,7 @@ struct tc_netem_qopt {
 	__u32	gap;		/* re-ordering gap (0 for none) */
 	__u32   duplicate;	/* random packet dup  (0=none ~0=100%) */
 	__u32	jitter;		/* random jitter in latency (us) */
+	__u32	bursting;	/* send packets in bursts (us) */
 };

 struct tc_netem_corr {
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 0c345e43a09a..52d796287b86 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -85,6 +85,7 @@ struct netem_sched_data {
 	s64 latency;
 	s64 jitter;

+	u32 bursting;
 	u32 loss;
 	u32 ecn;
 	u32 limit;
@@ -467,7 +468,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 	/* If a delay is expected, orphan the skb. (orphaning usually takes
 	 * place at TX completion time, so _before_ the link transit delay)
 	 */
-	if (q->latency || q->jitter || q->rate)
+	if (q->latency || q->jitter || q->rate || q->bursting)
 		skb_orphan_partial(skb);

 	/*
@@ -527,8 +528,17 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 	qdisc_qstats_backlog_inc(sch, skb);

 	cb = netem_skb_cb(skb);
-	if (q->gap == 0 ||		/* not doing reordering */
-	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
+	if (q->bursting > 0) {
+		u64 now;
+
+		now = ktime_get_ns();
+
+		cb->time_to_send = now - (now % q->bursting) + q->bursting;
+
+		++q->counter;
+		tfifo_enqueue(skb, sch);
+	} else if (q->gap == 0 ||		/* not doing reordering */
+	    q->counter < q->gap - 1 ||		/* inside last reordering gap */
 	    q->reorder < get_crandom(&q->reorder_cor)) {
 		u64 now;
 		s64 delay;
@@ -927,6 +937,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
 	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
 	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
 	[TCA_NETEM_LATENCY64]	= { .type = NLA_S64 },
+	[TCA_NETEM_BURSTING]	= { .type = NLA_U64 },
 	[TCA_NETEM_JITTER64]	= { .type = NLA_S64 },
 	[TCA_NETEM_SLOT]	= { .len = sizeof(struct tc_netem_slot) },
 };
@@ -1001,6 +1012,7 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,

 	q->latency = PSCHED_TICKS2NS(qopt->latency);
 	q->jitter = PSCHED_TICKS2NS(qopt->jitter);
+	q->bursting = PSCHED_TICKS2NS(qopt->bursting);
 	q->limit = qopt->limit;
 	q->gap = qopt->gap;
 	q->counter = 0;
@@ -1032,6 +1044,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt,
 	if (tb[TCA_NETEM_LATENCY64])
 		q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);

+	if (tb[TCA_NETEM_BURSTING])
+		q->bursting = nla_get_u64(tb[TCA_NETEM_BURSTING]);
+
 	if (tb[TCA_NETEM_JITTER64])
 		q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);

@@ -1150,6 +1165,9 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
 			     UINT_MAX);
 	qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
 			    UINT_MAX);
+	qopt.bursting = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->bursting),
+			    UINT_MAX);
+
 	qopt.limit = q->limit;
 	qopt.loss = q->loss;
 	qopt.gap = q->gap;
--
2.25.1


^ permalink raw reply related	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2021-06-27 19:12 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-06-25 12:03 [PATCH] net: sched: Add support for packet bursting Niclas Hedam
2021-06-25 20:54 ` kernel test robot
2021-06-25 20:54   ` kernel test robot
2021-06-25 22:34 ` kernel test robot
2021-06-25 22:34   ` kernel test robot
2021-06-27 18:32 ` Cong Wang
2021-06-27 19:12   ` Niclas Hedam

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.