netdev.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* amplifying qdisc
@ 2020-07-09  6:10 Russell Strong
  2020-07-09  6:26 ` Stephen Hemminger
  0 siblings, 1 reply; 5+ messages in thread
From: Russell Strong @ 2020-07-09  6:10 UTC (permalink / raw)
  To: netdev

Hi,

I'm attempting to fill a link with background traffic that is sent
whenever the link is idle.  To do this I've creates a qdisc that will
repeat the last packet in the queue for a defined number of times
(possibly infinite in the future). I am able to control the contents of
the fill traffic by sending the occasional packet through this qdisc.

This is works as the root qdisc and below a TBF.  When I try it as a
leaf of HTB unexpected behaviour ensues.  I suspect my approach is
violating some rules for qdiscs?  Any help/ideas/pointers would be
appreciated.

// SPDX-License-Identifier: GPL-2.0-or-later
/*
 * net/sched/sch_amp.c	amplifying qdisc
 *
 * Authors:	Russell Strong <russell@strong.id.au>
 */

#include <linux/module.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/skbuff.h>
#include <net/pkt_sched.h>

struct amp_sched_data {
	u32 duplicates;
	u32 remaining;
};

static int amp_enqueue(struct sk_buff *skb, struct Qdisc *sch,
			 struct sk_buff **to_free)
{
	struct amp_sched_data *asd = qdisc_priv(sch);

	printk(KERN_DEBUG "amp_enqueue: qlen %d\n", sch->q.qlen);

	asd->remaining = asd->duplicates;

	if (likely(sch->q.qlen < sch->limit))
		return qdisc_enqueue_tail(skb, sch);

	return qdisc_drop(skb, sch, to_free);
}

static struct sk_buff *amp_dequeue(struct Qdisc *sch)
{
	struct sk_buff *skb;
	struct amp_sched_data *asd = qdisc_priv(sch);

	printk(KERN_DEBUG "amp_dequeue: qlen %d, remaining %d\n",
		sch->q.qlen, asd->remaining);

	if (sch->q.qlen == 1 && asd->remaining > 0) {
		skb = qdisc_peek_head(sch);
		skb = skb_clone(skb, GFP_ATOMIC);
		asd->remaining -= 1;
	} else
		skb = qdisc_dequeue_head(sch);

	return skb;
}

static int amp_init(struct Qdisc *sch, struct nlattr *opt,
		     struct netlink_ext_ack *extack)
{
	struct amp_sched_data *asd = qdisc_priv(sch);
	u32 limit = qdisc_dev(sch)->tx_queue_len;

	sch->limit = limit;
	asd->duplicates = 10;
	asd->remaining = 0;

	printk(KERN_DEBUG "amp_init\n");

	return 0;
}

static void amp_reset_queue(struct Qdisc *sch)
{
	struct amp_sched_data *asd = qdisc_priv(sch);
	asd->remaining = 0;

	qdisc_reset_queue(sch);
}

static int amp_dump(struct Qdisc *sch, struct sk_buff *skb)
{
	return skb->len;
}

struct Qdisc_ops amp_qdisc_ops __read_mostly = {
	.id		=	"amp",
	.priv_size	=	sizeof(struct amp_sched_data),
	.enqueue	=	amp_enqueue,
	.dequeue	=	amp_dequeue,
	.peek		=	qdisc_peek_head,
	.init		=	amp_init,
	.reset		=	amp_reset_queue,
	.change		=	amp_init,
	.dump		=	amp_dump,
	.owner		=	THIS_MODULE,
};

static int __init amp_module_init(void)
{
        return register_qdisc(&amp_qdisc_ops);
}

static void __exit amp_module_exit(void)
{
        unregister_qdisc(&amp_qdisc_ops);
}

module_init(amp_module_init)
module_exit(amp_module_exit)
MODULE_LICENSE("GPL");

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: amplifying qdisc
  2020-07-09  6:10 amplifying qdisc Russell Strong
@ 2020-07-09  6:26 ` Stephen Hemminger
  2020-07-12  1:40   ` Russell Strong
  0 siblings, 1 reply; 5+ messages in thread
From: Stephen Hemminger @ 2020-07-09  6:26 UTC (permalink / raw)
  To: Russell Strong; +Cc: netdev

On Thu, 9 Jul 2020 16:10:34 +1000
Russell Strong <russell@strong.id.au> wrote:

> Hi,
> 
> I'm attempting to fill a link with background traffic that is sent
> whenever the link is idle.  To do this I've creates a qdisc that will
> repeat the last packet in the queue for a defined number of times
> (possibly infinite in the future). I am able to control the contents of
> the fill traffic by sending the occasional packet through this qdisc.
> 
> This is works as the root qdisc and below a TBF.  When I try it as a
> leaf of HTB unexpected behaviour ensues.  I suspect my approach is
> violating some rules for qdiscs?  Any help/ideas/pointers would be
> appreciated.

Netem can already do things like this. Why not add to that


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: amplifying qdisc
  2020-07-09  6:26 ` Stephen Hemminger
@ 2020-07-12  1:40   ` Russell Strong
  2020-07-13 16:52     ` Stephen Hemminger
  0 siblings, 1 reply; 5+ messages in thread
From: Russell Strong @ 2020-07-12  1:40 UTC (permalink / raw)
  To: Stephen Hemminger; +Cc: netdev

On Wed, 8 Jul 2020 23:26:34 -0700
Stephen Hemminger <stephen@networkplumber.org> wrote:

> On Thu, 9 Jul 2020 16:10:34 +1000
> Russell Strong <russell@strong.id.au> wrote:
> 
> > Hi,
> > 
> > I'm attempting to fill a link with background traffic that is sent
> > whenever the link is idle.  To do this I've creates a qdisc that
> > will repeat the last packet in the queue for a defined number of
> > times (possibly infinite in the future). I am able to control the
> > contents of the fill traffic by sending the occasional packet
> > through this qdisc.
> > 
> > This is works as the root qdisc and below a TBF.  When I try it as a
> > leaf of HTB unexpected behaviour ensues.  I suspect my approach is
> > violating some rules for qdiscs?  Any help/ideas/pointers would be
> > appreciated.  
> 
> Netem can already do things like this. Why not add to that
> 

Hi,

Tried doing this within netem as follows; but run into similar
problems.  Works as the root qdisc (except for "Route cache is full:
consider increasing sysctl net.ipv[4|6].route.max_size.") but not under
htb.  I am attempting to duplicate at dequeue, rather than enqueue to
get an infinite stream of packets rather than a fixed number of
duplicates. Is this possible?

Thanks
Russell


diff --git a/sch_netem.c b/sch_netem.c
index 42e557d..9a674df 100644
--- a/sch_netem.c
+++ b/sch_netem.c
@@ -98,6 +98,7 @@ struct netem_sched_data {
        u32 cell_size;
        struct reciprocal_value cell_size_reciprocal;
        s32 cell_overhead;
+       u32 repeat_last;
 
        struct crndstate {
                u32 last;
@@ -697,9 +698,13 @@ deliver:
                        get_slot_next(q, now);
 
                if (time_to_send <= now && q->slot.slot_next <= now) {
-                       netem_erase_head(q, skb);
-                       sch->q.qlen--;
-                       qdisc_qstats_backlog_dec(sch, skb);
+                       if (sch->q.qlen == 1 && q->repeat_last)
+                               skb = skb_clone(skb, GFP_ATOMIC);
+                       else {
+                               netem_erase_head(q, skb);
+                               sch->q.qlen--;
+                               qdisc_qstats_backlog_dec(sch, skb);
+                       }
                        skb->next = NULL;
                        skb->prev = NULL;
                        /* skb->dev shares skb->rbnode area,
@@ -1061,6 +1066,7 @@ static int netem_init(struct Qdisc *sch, struct
nlattr *opt, return -EINVAL;
 
        q->loss_model = CLG_RANDOM;
+       q->repeat_last = 1;
        ret = netem_change(sch, opt, extack);
        if (ret)
                pr_info("netem: change failed\n");

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: amplifying qdisc
  2020-07-12  1:40   ` Russell Strong
@ 2020-07-13 16:52     ` Stephen Hemminger
  2020-07-20  1:39       ` Russell Strong
  0 siblings, 1 reply; 5+ messages in thread
From: Stephen Hemminger @ 2020-07-13 16:52 UTC (permalink / raw)
  To: Russell Strong; +Cc: netdev

On Sun, 12 Jul 2020 11:40:01 +1000
Russell Strong <russell@strong.id.au> wrote:

> On Wed, 8 Jul 2020 23:26:34 -0700
> Stephen Hemminger <stephen@networkplumber.org> wrote:
> 
> > On Thu, 9 Jul 2020 16:10:34 +1000
> > Russell Strong <russell@strong.id.au> wrote:
> >   
> > > Hi,
> > > 
> > > I'm attempting to fill a link with background traffic that is sent
> > > whenever the link is idle.  To do this I've creates a qdisc that
> > > will repeat the last packet in the queue for a defined number of
> > > times (possibly infinite in the future). I am able to control the
> > > contents of the fill traffic by sending the occasional packet
> > > through this qdisc.
> > > 
> > > This is works as the root qdisc and below a TBF.  When I try it as a
> > > leaf of HTB unexpected behaviour ensues.  I suspect my approach is
> > > violating some rules for qdiscs?  Any help/ideas/pointers would be
> > > appreciated.    
> > 
> > Netem can already do things like this. Why not add to that
> >   
> 
> Hi,
> 
> Tried doing this within netem as follows; but run into similar
> problems.  Works as the root qdisc (except for "Route cache is full:
> consider increasing sysctl net.ipv[4|6].route.max_size.") but not under
> htb.  I am attempting to duplicate at dequeue, rather than enqueue to
> get an infinite stream of packets rather than a fixed number of
> duplicates. Is this possible?
> 
> Thanks
> Russell

HTB expects any thing under it to be work conserving.

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: amplifying qdisc
  2020-07-13 16:52     ` Stephen Hemminger
@ 2020-07-20  1:39       ` Russell Strong
  0 siblings, 0 replies; 5+ messages in thread
From: Russell Strong @ 2020-07-20  1:39 UTC (permalink / raw)
  To: Stephen Hemminger; +Cc: netdev

On Mon, 13 Jul 2020 09:52:28 -0700
Stephen Hemminger <stephen@networkplumber.org> wrote:

> On Sun, 12 Jul 2020 11:40:01 +1000
> Russell Strong <russell@strong.id.au> wrote:
> 
> > On Wed, 8 Jul 2020 23:26:34 -0700
> > Stephen Hemminger <stephen@networkplumber.org> wrote:
> >   
> > > On Thu, 9 Jul 2020 16:10:34 +1000
> > > Russell Strong <russell@strong.id.au> wrote:
> > >     
> > > > Hi,
> > > > 
> > > > I'm attempting to fill a link with background traffic that is
> > > > sent whenever the link is idle.  To do this I've creates a
> > > > qdisc that will repeat the last packet in the queue for a
> > > > defined number of times (possibly infinite in the future). I am
> > > > able to control the contents of the fill traffic by sending the
> > > > occasional packet through this qdisc.
> > > > 
> > > > This is works as the root qdisc and below a TBF.  When I try it
> > > > as a leaf of HTB unexpected behaviour ensues.  I suspect my
> > > > approach is violating some rules for qdiscs?  Any
> > > > help/ideas/pointers would be appreciated.      
> > > 
> > > Netem can already do things like this. Why not add to that
> > >     
> > 
> > Hi,
> > 
> > Tried doing this within netem as follows; but run into similar
> > problems.  Works as the root qdisc (except for "Route cache is full:
> > consider increasing sysctl net.ipv[4|6].route.max_size.") but not
> > under htb.  I am attempting to duplicate at dequeue, rather than
> > enqueue to get an infinite stream of packets rather than a fixed
> > number of duplicates. Is this possible?
> > 
> > Thanks
> > Russell  
> 
> HTB expects any thing under it to be work conserving.

Thanks for the tip.  I've tried a new approach using tasklets that
appears to be working fine both as the root qdisc and under htb.

Basically, at dequeue, if the qlen drops below 1/2, a tasklet is
scheduled that generates more packets to fill the queue via
dev_queue_xmit.  By generating off the queue length I can avoid the
dropped packets I was getting from pktgen. Sound sane?

Next I would like to add netlink control over the packet size mix and
contents that would be dynamically updated.

This code is big hands, small map stuff to see how it might work.
Does this kind of functionality fit well with netem?

I was always intending to use it in conjunction with other qdiscs and
IPSec to obscure traffic patterns without adding too much latency or
load that might interfere with the real traffic.

diff --git a/sch_netem.c b/sch_netem.c
index 42e557d..a6eef95 100644
--- a/sch_netem.c
+++ b/sch_netem.c
@@ -145,6 +145,9 @@ struct netem_sched_data {
        } slot;
 
        struct disttable *slot_dist;
+
+       u32 fill_imix;
+       struct tasklet_struct tasklet;
 };
 
 /* Time stamp put into socket buffer control block
@@ -673,6 +676,70 @@ static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
        }
 }
 
+static unsigned char template_packet[] = {
+        0x32, 0xf2, 0xa9, 0x7d, 0xfe, 0xeb,     // dst mac
+        0x06, 0x1d, 0x01, 0x59, 0x3d, 0x20,     // src mac
+
+        0x86, 0xdd,                             // eth type
+
+        0x61, 0x00, 0x00, 0x00,                 // ipv6 version + tclass + flowlabel
+
+        0x00, 0x00,                             // payload length
+
+        0x3b,                                   // next header ( no next header )
+
+        0x01,                                   // hop limit
+
+        0xfe, 0x80, 0x00, 0x00,                 // source
+        0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x01,
+
+        0xfe, 0x80, 0x00, 0x00,                 // destination
+        0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x00,
+        0x00, 0x00, 0x00, 0x02,
+
+        0x00, 0x00                              // pad to 8 align
+};
+
+static void generate_packets(unsigned long data)
+{
+        int i;
+        struct sk_buff *skb;
+        struct Qdisc *sch = (struct Qdisc *)data;
+
+        for (i = 0; i < sch->limit - sch->q.qlen; i++) {
+                u16 r, len;
+
+                get_random_bytes(&r, sizeof(r));
+
+                // perturbed IMIX
+                if (r % 12 < 7)
+                        len = 14 + 40 + 30 + (r % 20);
+                else if (r % 12 < 11)
+                        len = 14 + 40 + 526 + (r % 100);
+                else
+                        len = qdisc_dev(sch)->mtu;
+
+                skb = alloc_skb(len + 512, GFP_ATOMIC);
+                if (!skb)
+                        return;
+
+                skb_reserve(skb, 256);
+                skb_reset_mac_header(skb);
+                skb_put(skb, len);
+                template_packet[18] = (len - (14 + 40)) / 256;
+                template_packet[19] = (len - (14 + 40)) % 256;
+                memcpy(skb_mac_header(skb), template_packet, sizeof(template_packet));
+
+                skb->dev = qdisc_dev(sch);
+                skb->priority = 0x10;
+
+                dev_queue_xmit(skb);
+        }
+}
+
 static struct sk_buff *netem_dequeue(struct Qdisc *sch)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
@@ -683,6 +750,8 @@ tfifo_dequeue:
        if (skb) {
                qdisc_qstats_backlog_dec(sch, skb);
 deliver:
+               if (q->fill_imix && sch->q.qlen < sch->limit / 2)
+                       tasklet_schedule(&q->tasklet);
                qdisc_bstats_update(sch, skb);
                return skb;
        }
@@ -1055,6 +1124,10 @@ static int netem_init(struct Qdisc *sch, struct nlattr *opt,
        struct netem_sched_data *q = qdisc_priv(sch);
        int ret;
 
+       tasklet_init(&q->tasklet, generate_packets, (unsigned long)sch);
+       tasklet_schedule(&q->tasklet);
+       q->fill_imix = 1;
+
        qdisc_watchdog_init(&q->watchdog, sch);
 
        if (!opt)
@@ -1071,6 +1144,8 @@ static void netem_destroy(struct Qdisc *sch)
 {
        struct netem_sched_data *q = qdisc_priv(sch);
 
+       tasklet_kill(&q->tasklet);
+
        qdisc_watchdog_cancel(&q->watchdog);
        if (q->qdisc)
                qdisc_put(q->qdisc);

^ permalink raw reply related	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2020-07-20  1:39 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-09  6:10 amplifying qdisc Russell Strong
2020-07-09  6:26 ` Stephen Hemminger
2020-07-12  1:40   ` Russell Strong
2020-07-13 16:52     ` Stephen Hemminger
2020-07-20  1:39       ` Russell Strong

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).