All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH net-next 0/3] netem: add nsec scheduling and slot feature
@ 2017-11-07 20:59 Dave Taht
  2017-11-07 20:59 ` [PATCH net-next 1/3] netem: convert to qdisc_watchdog_schedule_ns Dave Taht
                   ` (3 more replies)
  0 siblings, 4 replies; 12+ messages in thread
From: Dave Taht @ 2017-11-07 20:59 UTC (permalink / raw)
  To: netdev; +Cc: Dave Taht

This patch series converts netem away from the old "ticks" interface and
userspace API, and adds support for a new "slot" feature intended to
emulate bursty macs such as WiFi and LTE better.

Dave Taht (3):
  netem: convert to qdisc_watchdog_schedule_ns
  netem: add uapi to express delay and jitter in nanosec
  netem: support delivering packets in delayed time slots

 include/uapi/linux/pkt_sched.h |  10 +++
 net/sched/sch_netem.c          | 144 ++++++++++++++++++++++++++++++++---------
 2 files changed, 125 insertions(+), 29 deletions(-)

-- 
2.7.4

^ permalink raw reply	[flat|nested] 12+ messages in thread

* [PATCH net-next 1/3] netem: convert to qdisc_watchdog_schedule_ns
  2017-11-07 20:59 [PATCH net-next 0/3] netem: add nsec scheduling and slot feature Dave Taht
@ 2017-11-07 20:59 ` Dave Taht
  2017-11-14 21:11     ` James Hogan
  2017-11-07 20:59 ` [PATCH net-next 2/3] netem: add uapi to express delay and jitter in nanosec Dave Taht
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 12+ messages in thread
From: Dave Taht @ 2017-11-07 20:59 UTC (permalink / raw)
  To: netdev; +Cc: Dave Taht

Upgrade the internal netem scheduler to use nanoseconds rather than
ticks throughout.

Convert to and from the std "ticks" userspace api automatically,
while allowing for finer grained scheduling to take place.

Signed-off-by: Dave Taht <dave.taht@gmail.com>
---
 net/sched/sch_netem.c | 56 +++++++++++++++++++++++++--------------------------
 1 file changed, 28 insertions(+), 28 deletions(-)

diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index db0228a..443a75d 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -77,8 +77,8 @@ struct netem_sched_data {
 
 	struct qdisc_watchdog watchdog;
 
-	psched_tdiff_t latency;
-	psched_tdiff_t jitter;
+	s64 latency;
+	s64 jitter;
 
 	u32 loss;
 	u32 ecn;
@@ -145,7 +145,7 @@ struct netem_sched_data {
  * we save skb->tstamp value in skb->cb[] before destroying it.
  */
 struct netem_skb_cb {
-	psched_time_t	time_to_send;
+	u64	        time_to_send;
 };
 
 static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
@@ -305,11 +305,11 @@ static bool loss_event(struct netem_sched_data *q)
  * std deviation sigma.  Uses table lookup to approximate the desired
  * distribution, and a uniformly-distributed pseudo-random source.
  */
-static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
-				struct crndstate *state,
-				const struct disttable *dist)
+static s64 tabledist(s64 mu, s64 sigma,
+			 struct crndstate *state,
+			 const struct disttable *dist)
 {
-	psched_tdiff_t x;
+	s64 x;
 	long t;
 	u32 rnd;
 
@@ -332,10 +332,10 @@ static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
 	return  x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
 }
 
-static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q)
+static s64 packet_len_2_sched_time(unsigned int len,
+				       struct netem_sched_data *q)
 {
-	u64 ticks;
-
+	s64 offset;
 	len += q->packet_overhead;
 
 	if (q->cell_size) {
@@ -345,11 +345,9 @@ static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sche
 			cells++;
 		len = cells * (q->cell_size + q->cell_overhead);
 	}
-
-	ticks = (u64)len * NSEC_PER_SEC;
-
-	do_div(ticks, q->rate);
-	return PSCHED_NS2TICKS(ticks);
+	offset = (s64)len * NSEC_PER_SEC;
+	do_div(offset, q->rate);
+	return offset;
 }
 
 static void tfifo_reset(struct Qdisc *sch)
@@ -369,7 +367,7 @@ static void tfifo_reset(struct Qdisc *sch)
 static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
-	psched_time_t tnext = netem_skb_cb(nskb)->time_to_send;
+	u64 tnext = netem_skb_cb(nskb)->time_to_send;
 	struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
 
 	while (*p) {
@@ -515,13 +513,13 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 	if (q->gap == 0 ||		/* not doing reordering */
 	    q->counter < q->gap - 1 ||	/* inside last reordering gap */
 	    q->reorder < get_crandom(&q->reorder_cor)) {
-		psched_time_t now;
-		psched_tdiff_t delay;
+		u64 now;
+		s64 delay;
 
 		delay = tabledist(q->latency, q->jitter,
 				  &q->delay_cor, q->delay_dist);
 
-		now = psched_get_time();
+		now = ktime_get_ns();
 
 		if (q->rate) {
 			struct netem_skb_cb *last = NULL;
@@ -547,7 +545,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 				 * from delay.
 				 */
 				delay -= last->time_to_send - now;
-				delay = max_t(psched_tdiff_t, 0, delay);
+				delay = max_t(s64, 0, delay);
 				now = last->time_to_send;
 			}
 
@@ -562,7 +560,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 		 * Do re-ordering by putting one out of N packets at the front
 		 * of the queue.
 		 */
-		cb->time_to_send = psched_get_time();
+		cb->time_to_send = ktime_get_ns();
 		q->counter = 0;
 
 		netem_enqueue_skb_head(&sch->q, skb);
@@ -609,13 +607,13 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 	}
 	p = rb_first(&q->t_root);
 	if (p) {
-		psched_time_t time_to_send;
+		u64 time_to_send;
 
 		skb = rb_to_skb(p);
 
 		/* if more time remaining? */
 		time_to_send = netem_skb_cb(skb)->time_to_send;
-		if (time_to_send <= psched_get_time()) {
+		if (time_to_send <= ktime_get_ns()) {
 			rb_erase(p, &q->t_root);
 
 			sch->q.qlen--;
@@ -659,7 +657,7 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 			if (skb)
 				goto deliver;
 		}
-		qdisc_watchdog_schedule(&q->watchdog, time_to_send);
+		qdisc_watchdog_schedule_ns(&q->watchdog, time_to_send);
 	}
 
 	if (q->qdisc) {
@@ -888,8 +886,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
 
 	sch->limit = qopt->limit;
 
-	q->latency = qopt->latency;
-	q->jitter = qopt->jitter;
+	q->latency = PSCHED_TICKS2NS(qopt->latency);
+	q->jitter = PSCHED_TICKS2NS(qopt->jitter);
 	q->limit = qopt->limit;
 	q->gap = qopt->gap;
 	q->counter = 0;
@@ -1011,8 +1009,10 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
 	struct tc_netem_corrupt corrupt;
 	struct tc_netem_rate rate;
 
-	qopt.latency = q->latency;
-	qopt.jitter = q->jitter;
+	qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
+			     UINT_MAX);
+	qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
+			    UINT_MAX);
 	qopt.limit = q->limit;
 	qopt.loss = q->loss;
 	qopt.gap = q->gap;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH net-next 2/3] netem: add uapi to express delay and jitter in nanosec
  2017-11-07 20:59 [PATCH net-next 0/3] netem: add nsec scheduling and slot feature Dave Taht
  2017-11-07 20:59 ` [PATCH net-next 1/3] netem: convert to qdisc_watchdog_schedule_ns Dave Taht
@ 2017-11-07 20:59 ` Dave Taht
  2017-11-07 20:59 ` [PATCH net-next 3/3] netem: support delivering packets in delayed time slots Dave Taht
  2017-11-08  0:26 ` [PATCH net-next 0/3] netem: add nsec scheduling and slot feature Stephen Hemminger
  3 siblings, 0 replies; 12+ messages in thread
From: Dave Taht @ 2017-11-07 20:59 UTC (permalink / raw)
  To: netdev; +Cc: Dave Taht

netem userspace has long relied on a horrible /proc/net/psched hack to
translate the current notion of "ticks" to nanoseconds.

Expressing latency and jitter instead, in well defined nanoseconds,
increases the dynamic range of emulated delays and jitter in netem.

It will also ease a transition where reducing a tick to nsec equivalence
would constrain the max delay in prior versions of netem to only 4.3
seconds.

Signed-off-by: Dave Taht <dave.taht@gmail.com>
---
 include/uapi/linux/pkt_sched.h |  2 ++
 net/sched/sch_netem.c          | 16 ++++++++++++++++
 2 files changed, 18 insertions(+)

diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 5002562..20cfd64 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -536,6 +536,8 @@ enum {
 	TCA_NETEM_ECN,
 	TCA_NETEM_RATE64,
 	TCA_NETEM_PAD,
+	TCA_NETEM_LATENCY64,
+	TCA_NETEM_JITTER64,
 	__TCA_NETEM_MAX,
 };
 
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 443a75d..16c4813 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -819,6 +819,8 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
 	[TCA_NETEM_LOSS]	= { .type = NLA_NESTED },
 	[TCA_NETEM_ECN]		= { .type = NLA_U32 },
 	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
+	[TCA_NETEM_LATENCY64]	= { .type = NLA_S64 },
+	[TCA_NETEM_JITTER64]	= { .type = NLA_S64 },
 };
 
 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
@@ -916,6 +918,12 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
 		q->rate = max_t(u64, q->rate,
 				nla_get_u64(tb[TCA_NETEM_RATE64]));
 
+	if (tb[TCA_NETEM_LATENCY64])
+		q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
+
+	if (tb[TCA_NETEM_JITTER64])
+		q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
+
 	if (tb[TCA_NETEM_ECN])
 		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
 
@@ -1020,6 +1028,14 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
 	if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
 		goto nla_put_failure;
 
+	if (PSCHED_TICKS2NS(qopt.latency) != q->latency)
+		if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency),
+			    &q->latency))
+			goto nla_put_failure;
+	if (PSCHED_TICKS2NS(qopt.jitter) != q->jitter)
+		if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter),
+			    &q->jitter))
+			goto nla_put_failure;
 	cor.delay_corr = q->delay_cor.rho;
 	cor.loss_corr = q->loss_cor.rho;
 	cor.dup_corr = q->dup_cor.rho;
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [PATCH net-next 3/3] netem: support delivering packets in delayed time slots
  2017-11-07 20:59 [PATCH net-next 0/3] netem: add nsec scheduling and slot feature Dave Taht
  2017-11-07 20:59 ` [PATCH net-next 1/3] netem: convert to qdisc_watchdog_schedule_ns Dave Taht
  2017-11-07 20:59 ` [PATCH net-next 2/3] netem: add uapi to express delay and jitter in nanosec Dave Taht
@ 2017-11-07 20:59 ` Dave Taht
  2017-11-08  0:26 ` [PATCH net-next 0/3] netem: add nsec scheduling and slot feature Stephen Hemminger
  3 siblings, 0 replies; 12+ messages in thread
From: Dave Taht @ 2017-11-07 20:59 UTC (permalink / raw)
  To: netdev; +Cc: Dave Taht

Slotting is a crude approximation of the behaviors of shared media such
as cable, wifi, and LTE, which gather up a bunch of packets within a
varying delay window and deliver them, relative to that, nearly all at
once.

It works within the existing loss, duplication, jitter and delay
parameters of netem. Some amount of inherent latency must be specified,
regardless.

The new "slot" parameter specifies a minimum and maximum delay between
transmission attempts.

The "bytes" and "packets" parameters can be used to limit the amount of
information transferred per slot.

Examples of use:

tc qdisc add dev eth0 root netem delay 200us \
         slot 800us 10ms bytes 64k packets 42

A more correct example, using stacked netem instances and a packet limit
to emulate a tail drop wifi queue with slots and variable packet
delivery, with a 200Mbit isochronous underlying rate, and 20ms path
delay:

tc qdisc add dev eth0 root handle 1: netem delay 20ms rate 200mbit \
         limit 10000
tc qdisc add dev eth0 parent 1:1 handle 10:1 netem delay 200us \
         slot 800us 10ms bytes 64k packets 42 limit 512

Signed-off-by: Dave Taht <dave.taht@gmail.com>
---
 include/uapi/linux/pkt_sched.h |  8 +++++
 net/sched/sch_netem.c          | 76 ++++++++++++++++++++++++++++++++++++++++--
 2 files changed, 81 insertions(+), 3 deletions(-)

diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 20cfd64..37b5096 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -538,6 +538,7 @@ enum {
 	TCA_NETEM_PAD,
 	TCA_NETEM_LATENCY64,
 	TCA_NETEM_JITTER64,
+	TCA_NETEM_SLOT,
 	__TCA_NETEM_MAX,
 };
 
@@ -575,6 +576,13 @@ struct tc_netem_rate {
 	__s32	cell_overhead;
 };
 
+struct tc_netem_slot {
+	__s64   min_delay; /* nsec */
+	__s64   max_delay;
+	__s32   max_packets;
+	__s32   max_bytes;
+};
+
 enum {
 	NETEM_LOSS_UNSPEC,
 	NETEM_LOSS_GI,		/* General Intuitive - 4 state model */
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 16c4813..a7189f9 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -135,6 +135,13 @@ struct netem_sched_data {
 		u32 a5; /* p23 used only in 4-states */
 	} clg;
 
+	struct tc_netem_slot slot_config;
+	struct slotstate {
+		u64 slot_next;
+		s32 packets_left;
+		s32 bytes_left;
+	} slot;
+
 };
 
 /* Time stamp put into socket buffer control block
@@ -591,6 +598,20 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
 	return NET_XMIT_SUCCESS;
 }
 
+/* Delay the next round with a new future slot with a
+ * correct number of bytes and packets.
+ */
+
+static void get_slot_next(struct netem_sched_data *q, u64 now)
+{
+	q->slot.slot_next = now + q->slot_config.min_delay +
+		(prandom_u32() *
+			(q->slot_config.max_delay -
+				q->slot_config.min_delay) >> 32);
+	q->slot.packets_left = q->slot_config.max_packets;
+	q->slot.bytes_left = q->slot_config.max_bytes;
+}
+
 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
@@ -608,14 +629,17 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 	p = rb_first(&q->t_root);
 	if (p) {
 		u64 time_to_send;
+		u64 now = ktime_get_ns();
 
 		skb = rb_to_skb(p);
 
 		/* if more time remaining? */
 		time_to_send = netem_skb_cb(skb)->time_to_send;
-		if (time_to_send <= ktime_get_ns()) {
-			rb_erase(p, &q->t_root);
+		if (q->slot.slot_next && q->slot.slot_next < time_to_send)
+			get_slot_next(q, now);
 
+		if (time_to_send <= now &&  q->slot.slot_next <= now) {
+			rb_erase(p, &q->t_root);
 			sch->q.qlen--;
 			qdisc_qstats_backlog_dec(sch, skb);
 			skb->next = NULL;
@@ -634,6 +658,14 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 				skb->tstamp = 0;
 #endif
 
+			if (q->slot.slot_next) {
+				q->slot.packets_left--;
+				q->slot.bytes_left -= qdisc_pkt_len(skb);
+				if (q->slot.packets_left <= 0 ||
+				    q->slot.bytes_left <= 0)
+					get_slot_next(q, now);
+			}
+
 			if (q->qdisc) {
 				unsigned int pkt_len = qdisc_pkt_len(skb);
 				struct sk_buff *to_free = NULL;
@@ -657,7 +689,12 @@ static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 			if (skb)
 				goto deliver;
 		}
-		qdisc_watchdog_schedule_ns(&q->watchdog, time_to_send);
+
+		if (q->slot.slot_next > now)
+			qdisc_watchdog_schedule_ns(&q->watchdog,
+						   q->slot.slot_next);
+		else
+			qdisc_watchdog_schedule_ns(&q->watchdog, time_to_send);
 	}
 
 	if (q->qdisc) {
@@ -688,6 +725,7 @@ static void dist_free(struct disttable *d)
  * Distribution data is a variable size payload containing
  * signed 16 bit values.
  */
+
 static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
 {
 	struct netem_sched_data *q = qdisc_priv(sch);
@@ -718,6 +756,23 @@ static int get_dist_table(struct Qdisc *sch, const struct nlattr *attr)
 	return 0;
 }
 
+static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
+{
+	const struct tc_netem_slot *c = nla_data(attr);
+
+	q->slot_config = *c;
+	if (q->slot_config.max_packets == 0)
+		q->slot_config.max_packets = INT_MAX;
+	if (q->slot_config.max_bytes == 0)
+		q->slot_config.max_bytes = INT_MAX;
+	q->slot.packets_left = q->slot_config.max_packets;
+	q->slot.bytes_left = q->slot_config.max_bytes;
+	if (q->slot_config.min_delay | q->slot_config.max_delay)
+		q->slot.slot_next = ktime_get_ns();
+	else
+		q->slot.slot_next = 0;
+}
+
 static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
 {
 	const struct tc_netem_corr *c = nla_data(attr);
@@ -821,6 +876,7 @@ static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
 	[TCA_NETEM_RATE64]	= { .type = NLA_U64 },
 	[TCA_NETEM_LATENCY64]	= { .type = NLA_S64 },
 	[TCA_NETEM_JITTER64]	= { .type = NLA_S64 },
+	[TCA_NETEM_SLOT]	= { .len = sizeof(struct tc_netem_slot) },
 };
 
 static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
@@ -927,6 +983,9 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt)
 	if (tb[TCA_NETEM_ECN])
 		q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
 
+	if (tb[TCA_NETEM_SLOT])
+		get_slot(q, tb[TCA_NETEM_SLOT]);
+
 	return ret;
 }
 
@@ -1016,6 +1075,7 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
 	struct tc_netem_reorder reorder;
 	struct tc_netem_corrupt corrupt;
 	struct tc_netem_rate rate;
+	struct tc_netem_slot slot;
 
 	qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
 			     UINT_MAX);
@@ -1072,6 +1132,16 @@ static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
 	if (dump_loss_model(q, skb) != 0)
 		goto nla_put_failure;
 
+	if (q->slot_config.min_delay | q->slot_config.max_delay) {
+		slot = q->slot_config;
+		if (slot.max_packets == INT_MAX)
+			slot.max_packets = 0;
+		if (slot.max_bytes == INT_MAX)
+			slot.max_bytes = 0;
+		if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
+			goto nla_put_failure;
+	}
+
 	return nla_nest_end(skb, nla);
 
 nla_put_failure:
-- 
2.7.4

^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [PATCH net-next 0/3] netem: add nsec scheduling and slot feature
  2017-11-07 20:59 [PATCH net-next 0/3] netem: add nsec scheduling and slot feature Dave Taht
                   ` (2 preceding siblings ...)
  2017-11-07 20:59 ` [PATCH net-next 3/3] netem: support delivering packets in delayed time slots Dave Taht
@ 2017-11-08  0:26 ` Stephen Hemminger
  2017-11-08 21:45   ` Dave Taht
  3 siblings, 1 reply; 12+ messages in thread
From: Stephen Hemminger @ 2017-11-08  0:26 UTC (permalink / raw)
  To: Dave Taht; +Cc: netdev

On Tue,  7 Nov 2017 12:59:33 -0800
Dave Taht <dave.taht@gmail.com> wrote:

> This patch series converts netem away from the old "ticks" interface and
> userspace API, and adds support for a new "slot" feature intended to
> emulate bursty macs such as WiFi and LTE better.
> 
> Dave Taht (3):
>   netem: convert to qdisc_watchdog_schedule_ns
>   netem: add uapi to express delay and jitter in nanosec
>   netem: support delivering packets in delayed time slots
> 
>  include/uapi/linux/pkt_sched.h |  10 +++
>  net/sched/sch_netem.c          | 144 ++++++++++++++++++++++++++++++++---------
>  2 files changed, 125 insertions(+), 29 deletions(-)
> 

Dave, thanks for the patch.
One issue is that it needs to keep binary compatibility both for kernel and iproute.
That means that users of new kernel should be able to use old versions of iproute
without any visible impact (and vice versa).

For the kernel, that means if new attributes are not present the old attributes
would be used. For iproute2 that means send both new and old versions.

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [PATCH net-next 0/3] netem: add nsec scheduling and slot feature
  2017-11-08  0:26 ` [PATCH net-next 0/3] netem: add nsec scheduling and slot feature Stephen Hemminger
@ 2017-11-08 21:45   ` Dave Taht
  0 siblings, 0 replies; 12+ messages in thread
From: Dave Taht @ 2017-11-08 21:45 UTC (permalink / raw)
  To: Stephen Hemminger; +Cc: netdev

On Tue, Nov 7, 2017 at 4:26 PM, Stephen Hemminger
<stephen@networkplumber.org> wrote:
> On Tue,  7 Nov 2017 12:59:33 -0800
> Dave Taht <dave.taht@gmail.com> wrote:
>
>> This patch series converts netem away from the old "ticks" interface and
>> userspace API, and adds support for a new "slot" feature intended to
>> emulate bursty macs such as WiFi and LTE better.
>>
>> Dave Taht (3):
>>   netem: convert to qdisc_watchdog_schedule_ns
>>   netem: add uapi to express delay and jitter in nanosec
>>   netem: support delivering packets in delayed time slots
>>
>>  include/uapi/linux/pkt_sched.h |  10 +++
>>  net/sched/sch_netem.c          | 144 ++++++++++++++++++++++++++++++++---------
>>  2 files changed, 125 insertions(+), 29 deletions(-)
>>
>
> Dave, thanks for the patch.
> One issue is that it needs to keep binary compatibility both for kernel and iproute.
> That means that users of new kernel should be able to use old versions of iproute
> without any visible impact (and vice versa).
>
> For the kernel, that means if new attributes are not present the old attributes
> would be used.

For the kernel patchset you are commenting on, this was the case. There was no
way to send via an old iproute2 a jitter or delay value that would
trigger sending
the new attributes, although the old api has severe rounding errors as you get
down to a few us.

I just sent a kernel v2 that always sends the old and new attributes.

>For iproute2 that means send both new and old versions.

This, in the iproute2 patchset, didn't, as I assumed new versions of
iproute2 would only be used with newer kernels. I'll respin that.

I did want to somehow, one day, obsolete ticks, but I guess
that is not possible.

-- 

Dave Täht
CEO, TekLibre, LLC
http://www.teklibre.com
Tel: 1-669-226-2619

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [net-next,1/3] netem: convert to qdisc_watchdog_schedule_ns
  2017-11-07 20:59 ` [PATCH net-next 1/3] netem: convert to qdisc_watchdog_schedule_ns Dave Taht
@ 2017-11-14 21:11     ` James Hogan
  0 siblings, 0 replies; 12+ messages in thread
From: James Hogan @ 2017-11-14 21:11 UTC (permalink / raw)
  To: Dave Taht; +Cc: netdev, linux-next, linux-mips, Ralf Baechle

[-- Attachment #1: Type: text/plain, Size: 984 bytes --]

On Tue, Nov 07, 2017 at 12:59:34PM -0800, Dave Taht wrote:
> diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
> index db0228a..443a75d 100644
> --- a/net/sched/sch_netem.c
> +++ b/net/sched/sch_netem.c

...

> @@ -305,11 +305,11 @@ static bool loss_event(struct netem_sched_data *q)
>   * std deviation sigma.  Uses table lookup to approximate the desired
>   * distribution, and a uniformly-distributed pseudo-random source.
>   */
> -static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
> -				struct crndstate *state,
> -				const struct disttable *dist)
> +static s64 tabledist(s64 mu, s64 sigma,

sigma is used in a modulo operation in this function, which results in
this error on a bunch of MIPS configs once it is made 64-bits wide:

net/sched/sch_netem.o In function `tabledist':
net/sched/sch_netem.c:330: undefined reference to `__moddi3'

Should that code not be using <linux/math64.h>, i.e. div_s64_rem() now
that it is 64bit?

Thanks
James

[-- Attachment #2: Digital signature --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [net-next,1/3] netem: convert to qdisc_watchdog_schedule_ns
@ 2017-11-14 21:11     ` James Hogan
  0 siblings, 0 replies; 12+ messages in thread
From: James Hogan @ 2017-11-14 21:11 UTC (permalink / raw)
  To: Dave Taht; +Cc: netdev, linux-next, linux-mips, Ralf Baechle

[-- Attachment #1: Type: text/plain, Size: 984 bytes --]

On Tue, Nov 07, 2017 at 12:59:34PM -0800, Dave Taht wrote:
> diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
> index db0228a..443a75d 100644
> --- a/net/sched/sch_netem.c
> +++ b/net/sched/sch_netem.c

...

> @@ -305,11 +305,11 @@ static bool loss_event(struct netem_sched_data *q)
>   * std deviation sigma.  Uses table lookup to approximate the desired
>   * distribution, and a uniformly-distributed pseudo-random source.
>   */
> -static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
> -				struct crndstate *state,
> -				const struct disttable *dist)
> +static s64 tabledist(s64 mu, s64 sigma,

sigma is used in a modulo operation in this function, which results in
this error on a bunch of MIPS configs once it is made 64-bits wide:

net/sched/sch_netem.o In function `tabledist':
net/sched/sch_netem.c:330: undefined reference to `__moddi3'

Should that code not be using <linux/math64.h>, i.e. div_s64_rem() now
that it is 64bit?

Thanks
James

[-- Attachment #2: Digital signature --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [net-next,1/3] netem: convert to qdisc_watchdog_schedule_ns
  2017-11-14 21:11     ` James Hogan
@ 2017-11-14 21:43       ` James Hogan
  -1 siblings, 0 replies; 12+ messages in thread
From: James Hogan @ 2017-11-14 21:43 UTC (permalink / raw)
  To: Dave Taht; +Cc: netdev, linux-next, linux-mips, Ralf Baechle

[-- Attachment #1: Type: text/plain, Size: 1270 bytes --]

On Tue, Nov 14, 2017 at 09:11:12PM +0000, James Hogan wrote:
> On Tue, Nov 07, 2017 at 12:59:34PM -0800, Dave Taht wrote:
> > diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
> > index db0228a..443a75d 100644
> > --- a/net/sched/sch_netem.c
> > +++ b/net/sched/sch_netem.c
> 
> ...
> 
> > @@ -305,11 +305,11 @@ static bool loss_event(struct netem_sched_data *q)
> >   * std deviation sigma.  Uses table lookup to approximate the desired
> >   * distribution, and a uniformly-distributed pseudo-random source.
> >   */
> > -static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
> > -				struct crndstate *state,
> > -				const struct disttable *dist)
> > +static s64 tabledist(s64 mu, s64 sigma,
> 
> sigma is used in a modulo operation in this function, which results in
> this error on a bunch of MIPS configs once it is made 64-bits wide:
> 
> net/sched/sch_netem.o In function `tabledist':
> net/sched/sch_netem.c:330: undefined reference to `__moddi3'
> 
> Should that code not be using <linux/math64.h>, i.e. div_s64_rem() now
> that it is 64bit?

For the record, Dave has kindly pointed me at:
https://patchwork.ozlabs.org/project/netdev/list/?series=13554

which fixes the MIPS builds.

Cheers
James

[-- Attachment #2: Digital signature --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [net-next,1/3] netem: convert to qdisc_watchdog_schedule_ns
@ 2017-11-14 21:43       ` James Hogan
  0 siblings, 0 replies; 12+ messages in thread
From: James Hogan @ 2017-11-14 21:43 UTC (permalink / raw)
  To: Dave Taht; +Cc: netdev, linux-next, linux-mips, Ralf Baechle

[-- Attachment #1: Type: text/plain, Size: 1270 bytes --]

On Tue, Nov 14, 2017 at 09:11:12PM +0000, James Hogan wrote:
> On Tue, Nov 07, 2017 at 12:59:34PM -0800, Dave Taht wrote:
> > diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
> > index db0228a..443a75d 100644
> > --- a/net/sched/sch_netem.c
> > +++ b/net/sched/sch_netem.c
> 
> ...
> 
> > @@ -305,11 +305,11 @@ static bool loss_event(struct netem_sched_data *q)
> >   * std deviation sigma.  Uses table lookup to approximate the desired
> >   * distribution, and a uniformly-distributed pseudo-random source.
> >   */
> > -static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
> > -				struct crndstate *state,
> > -				const struct disttable *dist)
> > +static s64 tabledist(s64 mu, s64 sigma,
> 
> sigma is used in a modulo operation in this function, which results in
> this error on a bunch of MIPS configs once it is made 64-bits wide:
> 
> net/sched/sch_netem.o In function `tabledist':
> net/sched/sch_netem.c:330: undefined reference to `__moddi3'
> 
> Should that code not be using <linux/math64.h>, i.e. div_s64_rem() now
> that it is 64bit?

For the record, Dave has kindly pointed me at:
https://patchwork.ozlabs.org/project/netdev/list/?series=13554

which fixes the MIPS builds.

Cheers
James

[-- Attachment #2: Digital signature --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [net-next,1/3] netem: convert to qdisc_watchdog_schedule_ns
  2017-11-14 21:11     ` James Hogan
@ 2017-11-14 22:49       ` Stephen Hemminger
  -1 siblings, 0 replies; 12+ messages in thread
From: Stephen Hemminger @ 2017-11-14 22:49 UTC (permalink / raw)
  To: James Hogan; +Cc: Dave Taht, netdev, linux-next, linux-mips, Ralf Baechle

[-- Attachment #1: Type: text/plain, Size: 1249 bytes --]

On Tue, 14 Nov 2017 21:11:13 +0000
James Hogan <james.hogan@mips.com> wrote:

> On Tue, Nov 07, 2017 at 12:59:34PM -0800, Dave Taht wrote:
> > diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
> > index db0228a..443a75d 100644
> > --- a/net/sched/sch_netem.c
> > +++ b/net/sched/sch_netem.c  
> 
> ...
> 
> > @@ -305,11 +305,11 @@ static bool loss_event(struct netem_sched_data *q)
> >   * std deviation sigma.  Uses table lookup to approximate the desired
> >   * distribution, and a uniformly-distributed pseudo-random source.
> >   */
> > -static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
> > -				struct crndstate *state,
> > -				const struct disttable *dist)
> > +static s64 tabledist(s64 mu, s64 sigma,  
> 
> sigma is used in a modulo operation in this function, which results in
> this error on a bunch of MIPS configs once it is made 64-bits wide:
> 
> net/sched/sch_netem.o In function `tabledist':
> net/sched/sch_netem.c:330: undefined reference to `__moddi3'
> 
> Should that code not be using <linux/math64.h>, i.e. div_s64_rem() now
> that it is 64bit?
> 
> Thanks
> James

Not really since random is only 32 bit, the sigma value being 64 bit makes
no sense really.

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [net-next,1/3] netem: convert to qdisc_watchdog_schedule_ns
@ 2017-11-14 22:49       ` Stephen Hemminger
  0 siblings, 0 replies; 12+ messages in thread
From: Stephen Hemminger @ 2017-11-14 22:49 UTC (permalink / raw)
  To: James Hogan; +Cc: Dave Taht, netdev, linux-next, linux-mips, Ralf Baechle

[-- Attachment #1: Type: text/plain, Size: 1249 bytes --]

On Tue, 14 Nov 2017 21:11:13 +0000
James Hogan <james.hogan@mips.com> wrote:

> On Tue, Nov 07, 2017 at 12:59:34PM -0800, Dave Taht wrote:
> > diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
> > index db0228a..443a75d 100644
> > --- a/net/sched/sch_netem.c
> > +++ b/net/sched/sch_netem.c  
> 
> ...
> 
> > @@ -305,11 +305,11 @@ static bool loss_event(struct netem_sched_data *q)
> >   * std deviation sigma.  Uses table lookup to approximate the desired
> >   * distribution, and a uniformly-distributed pseudo-random source.
> >   */
> > -static psched_tdiff_t tabledist(psched_tdiff_t mu, psched_tdiff_t sigma,
> > -				struct crndstate *state,
> > -				const struct disttable *dist)
> > +static s64 tabledist(s64 mu, s64 sigma,  
> 
> sigma is used in a modulo operation in this function, which results in
> this error on a bunch of MIPS configs once it is made 64-bits wide:
> 
> net/sched/sch_netem.o In function `tabledist':
> net/sched/sch_netem.c:330: undefined reference to `__moddi3'
> 
> Should that code not be using <linux/math64.h>, i.e. div_s64_rem() now
> that it is 64bit?
> 
> Thanks
> James

Not really since random is only 32 bit, the sigma value being 64 bit makes
no sense really.

[-- Attachment #2: OpenPGP digital signature --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

^ permalink raw reply	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2017-11-14 22:49 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2017-11-07 20:59 [PATCH net-next 0/3] netem: add nsec scheduling and slot feature Dave Taht
2017-11-07 20:59 ` [PATCH net-next 1/3] netem: convert to qdisc_watchdog_schedule_ns Dave Taht
2017-11-14 21:11   ` [net-next,1/3] " James Hogan
2017-11-14 21:11     ` James Hogan
2017-11-14 21:43     ` James Hogan
2017-11-14 21:43       ` James Hogan
2017-11-14 22:49     ` Stephen Hemminger
2017-11-14 22:49       ` Stephen Hemminger
2017-11-07 20:59 ` [PATCH net-next 2/3] netem: add uapi to express delay and jitter in nanosec Dave Taht
2017-11-07 20:59 ` [PATCH net-next 3/3] netem: support delivering packets in delayed time slots Dave Taht
2017-11-08  0:26 ` [PATCH net-next 0/3] netem: add nsec scheduling and slot feature Stephen Hemminger
2017-11-08 21:45   ` Dave Taht

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.