Message ID | 20070508121322.GA21647@gondor.apana.org.au |
---|---|
State | New, archived |
Headers | show |
Series |
|
Related | show |
Herbert Xu wrote: > Sorry, I had forgotten that I've already concluded previously that > this doesn't work because we don't want to prevent the interface > from being brought up (and other reasons). My memory is failing me :) > > So I think the best option now is to get rid of the delay on carrier > on events for everyone. > Sounds good to me. So I'll use this change instead. Subject: xen: go back to using normal network stack carriers This effectively reverts xen-unstable change 14280:42b29f084c31. Herbert has changed the behaviour of the core networking to not delay an initial down->up transition, and so the timing concern has been solved. Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> Cc: Herbert Xu <herbert@gondor.apana.org.au> Cc: Keir Fraser <Keir.Fraser@cl.cam.ac.uk> diff -r 282bef511e66 drivers/net/xen-netfront.c --- a/drivers/net/xen-netfront.c Tue May 08 13:11:18 2007 -0700 +++ b/drivers/net/xen-netfront.c Tue May 08 13:14:47 2007 -0700 @@ -95,7 +95,6 @@ struct netfront_info { unsigned int evtchn; unsigned int copying_receiver; - unsigned int carrier; /* Receive-ring batched refills. */ #define RX_MIN_TARGET 8 @@ -142,15 +141,6 @@ struct netfront_rx_info { }; /* - * Implement our own carrier flag: the network stack's version causes delays - * when the carrier is re-enabled (in particular, dev_activate() may not - * immediately be called, which can cause packet loss). - */ -#define netfront_carrier_on(netif) ((netif)->carrier = 1) -#define netfront_carrier_off(netif) ((netif)->carrier = 0) -#define netfront_carrier_ok(netif) ((netif)->carrier) - -/* * Access macros for acquiring freeing slots in tx_skbs[]. */ @@ -241,7 +231,7 @@ static void xennet_alloc_rx_buffers(stru int nr_flips; struct xen_netif_rx_request *req; - if (unlikely(!netfront_carrier_ok(np))) + if (unlikely(!netif_carrier_ok(dev))) return; /* @@ -380,7 +370,7 @@ static int xennet_open(struct net_device memset(&np->stats, 0, sizeof(np->stats)); spin_lock_bh(&np->rx_lock); - if (netfront_carrier_ok(np)) { + if (netif_carrier_ok(dev)) { xennet_alloc_rx_buffers(dev); np->rx.sring->rsp_event = np->rx.rsp_cons + 1; if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) @@ -400,7 +390,7 @@ static void xennet_tx_buf_gc(struct net_ struct netfront_info *np = netdev_priv(dev); struct sk_buff *skb; - BUG_ON(!netfront_carrier_ok(np)); + BUG_ON(!netif_carrier_ok(dev)); do { prod = np->tx.sring->rsp_prod; @@ -540,7 +530,7 @@ static int xennet_start_xmit(struct sk_b spin_lock_irq(&np->tx_lock); - if (unlikely(!netfront_carrier_ok(np) || + if (unlikely(!netif_carrier_ok(dev) || (frags > 1 && !xennet_can_sg(dev)) || netif_needs_gso(dev, skb))) { spin_unlock_irq(&np->tx_lock); @@ -973,7 +963,7 @@ static int xennet_poll(struct net_device spin_lock(&np->rx_lock); - if (unlikely(!netfront_carrier_ok(np))) { + if (unlikely(!netif_carrier_ok(dev))) { spin_unlock(&np->rx_lock); return 0; } @@ -1308,7 +1298,7 @@ static struct net_device * __devinit xen np->netdev = netdev; - netfront_carrier_off(np); + netif_carrier_off(netdev); return netdev; @@ -1376,7 +1366,7 @@ static void xennet_disconnect_backend(st /* Stop old i/f to prevent errors whilst we rebuild the state. */ spin_lock_bh(&info->rx_lock); spin_lock_irq(&info->tx_lock); - netfront_carrier_off(info); + netif_carrier_off(info->netdev); spin_unlock_irq(&info->tx_lock); spin_unlock_bh(&info->rx_lock); @@ -1440,7 +1430,7 @@ static irqreturn_t xennet_interrupt(int spin_lock_irqsave(&np->tx_lock, flags); - if (likely(netfront_carrier_ok(np))) { + if (likely(netif_carrier_ok(dev))) { xennet_tx_buf_gc(dev); /* Under tx_lock: protects access to rx shared-ring indexes. */ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) @@ -1728,7 +1718,7 @@ static int xennet_connect(struct net_dev * domain a kick because we've probably just requeued some * packets. */ - netfront_carrier_on(np); + netif_carrier_on(np->netdev); notify_remote_via_irq(np->netdev->irq); xennet_tx_buf_gc(dev); xennet_alloc_rx_buffers(dev); - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
From: Herbert Xu <herbert@gondor.apana.org.au> Date: Tue, 8 May 2007 22:13:22 +1000 > [NET] link_watch: Move link watch list into net_device > > These days the link watch mechanism is an integral part of the > network subsystem as it manages the carrier status. So it now > makes sense to allocate some memory for it in net_device rather > than allocating it on demand. > > In fact, this is necessary because we can't tolerate a memory > allocation failure since that means we'd have to potentially > throw a link up event away. > > It also simplifies the code greatly. > > In doing so I discovered a subtle race condition in the use > of singleevent. This race condition still exists (and is > somewhat magnified) without singleevent but it's now plugged > thanks to an smp_mb__before_clear_bit. > > Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Applied, thanks Herbert. - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
On Tue, May 08, 2007 at 01:19:33PM -0700, Jeremy Fitzhardinge wrote: > > Subject: xen: go back to using normal network stack carriers > > This effectively reverts xen-unstable change > 14280:42b29f084c31. Herbert has changed the behaviour of the core > networking to not delay an initial down->up transition, and so the > timing concern has been solved. > > Signed-off-by: Jeremy Fitzhardinge <jeremy@xensource.com> > Cc: Herbert Xu <herbert@gondor.apana.org.au> > Cc: Keir Fraser <Keir.Fraser@cl.cam.ac.uk> Looks good to me. Thanks,
Herbert Xu wrote: > [NET] link_watch: Move link watch list into net_device > > These days the link watch mechanism is an integral part of the > network subsystem as it manages the carrier status. So it now > makes sense to allocate some memory for it in net_device rather > than allocating it on demand. I think there's a problem with one of these two patches. I've been noticing that one of my events/X threads has been going into a spin for about 5 mins after boot. I added some debugging to kernel/workqueue.c:run_workqueue, since its that loop which seems to be spinning due to list corruption. When I look to see if that loop has iterated for more than 100 times in one go (which seems unlikely), I get this: BUG: cpu 3, count=101 list screwup on c04babe4, func c03217e8 func=linkwatch_event+0x0/0x2a [<c0109173>] show_trace_log_lvl+0x1a/0x30 [<c0109c7f>] show_trace+0x12/0x14 [<c0109d0c>] dump_stack+0x16/0x18 [<c0137c25>] run_workqueue+0x97/0x18c [<c01386a4>] worker_thread+0xe5/0xf5 [<c013afe9>] kthread+0x3b/0x62 [<c0108d47>] kernel_thread_helper+0x7/0x10 ======================= I wonder if the problem is that the linkwatch_work is being rescheduled when its already been scheduled, or something like that? J - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
From: Jeremy Fitzhardinge <jeremy@goop.org> Date: Thu, 10 May 2007 15:00:05 -0700 > Herbert Xu wrote: > > [NET] link_watch: Move link watch list into net_device > > > > These days the link watch mechanism is an integral part of the > > network subsystem as it manages the carrier status. So it now > > makes sense to allocate some memory for it in net_device rather > > than allocating it on demand. > > I think there's a problem with one of these two patches. Yes, there are :-) Did you catch the follow-on bug fixes? - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
David Miller wrote: > From: Jeremy Fitzhardinge <jeremy@goop.org> > Date: Thu, 10 May 2007 15:00:05 -0700 > > >> Herbert Xu wrote: >> >>> [NET] link_watch: Move link watch list into net_device >>> >>> These days the link watch mechanism is an integral part of the >>> network subsystem as it manages the carrier status. So it now >>> makes sense to allocate some memory for it in net_device rather >>> than allocating it on demand. >>> >> I think there's a problem with one of these two patches. >> > > Yes, there are :-) > > Did you catch the follow-on bug fixes? > Nope. Guess I got dropped from the cc: list. Was this on lkml or netdev? J - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
On Thu, 10 May 2007 15:00:05 -0700 Jeremy Fitzhardinge <jeremy@goop.org> wrote: > Herbert Xu wrote: > > [NET] link_watch: Move link watch list into net_device > > > > These days the link watch mechanism is an integral part of the > > network subsystem as it manages the carrier status. So it now > > makes sense to allocate some memory for it in net_device rather > > than allocating it on demand. > > I think there's a problem with one of these two patches. I've been > noticing that one of my events/X threads has been going into a spin for > about 5 mins after boot. I added some debugging to > kernel/workqueue.c:run_workqueue, since its that loop which seems to be > spinning due to list corruption. > > When I look to see if that loop has iterated for more than 100 times in > one go (which seems unlikely), I get this: > > BUG: cpu 3, count=101 list screwup on c04babe4, func c03217e8 > func=linkwatch_event+0x0/0x2a > [<c0109173>] show_trace_log_lvl+0x1a/0x30 > [<c0109c7f>] show_trace+0x12/0x14 > [<c0109d0c>] dump_stack+0x16/0x18 > [<c0137c25>] run_workqueue+0x97/0x18c > [<c01386a4>] worker_thread+0xe5/0xf5 > [<c013afe9>] kthread+0x3b/0x62 > [<c0108d47>] kernel_thread_helper+0x7/0x10 > ======================= > > > I wonder if the problem is that the linkwatch_work is being rescheduled > when its already been scheduled, or something like that? Five minutes after boot is when jiffies wraps. Are you sure it's a list-screwup rather than a jiffy-wrap screwup? - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
Andrew Morton wrote: > Five minutes after boot is when jiffies wraps. Are you sure it's > a list-screwup rather than a jiffy-wrap screwup? > Hm, its suggestive, isn't it? Apparently they've already fixed this in the sekret networking clubhouse, so I'll need to track it down. J - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
From: Jeremy Fitzhardinge <jeremy@goop.org> Date: Thu, 10 May 2007 15:22:17 -0700 > Andrew Morton wrote: > > Five minutes after boot is when jiffies wraps. Are you sure it's > > a list-screwup rather than a jiffy-wrap screwup? > > > > > Hm, its suggestive, isn't it? Apparently they've already fixed this in > the sekret networking clubhouse, so I'll need to track it down. I'm not so certain now that we know it's the jiffies wrap point :-) The fixes in question are attached below and they were posted and discussed on netdev: -------------------- commit fe47cdba83b3041e4ac1aa1418431020a4afe1e0 Author: Herbert Xu <herbert@gondor.apana.org.au> Date: Tue May 8 23:22:43 2007 -0700 [NET] link_watch: Eliminate potential delay on wrap-around When the jiffies wrap around or when the system boots up for the first time, down events can be delayed indefinitely since we no longer update linkwatch_nextevent when only urgent events are processed. This patch fixes this by setting linkwatch_nextevent when a wrap-around occurs. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net> diff --git a/net/core/link_watch.c b/net/core/link_watch.c index b5f4579..4674ae5 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -101,8 +101,10 @@ static void linkwatch_schedule_work(unsigned long delay) return; /* If we wrap around we'll delay it by at most HZ. */ - if (delay > HZ) + if (delay > HZ) { + linkwatch_nextevent = jiffies; delay = 0; + } schedule_delayed_work(&linkwatch_work, delay); } -------------------- commit 4cba637dbb9a13706494a1c85174c8e736914010 Author: Herbert Xu <herbert@gondor.apana.org.au> Date: Wed May 9 00:17:30 2007 -0700 [NET] link_watch: Always schedule urgent events Urgent events may be delayed if we already have a non-urgent event queued for that device. This patch changes this by making sure that an urgent event is always looked at immediately. I've replaced the LW_RUNNING flag by LW_URGENT since whether work is scheduled is already kept track by the work queue system. The only complication is that we have to provide some exclusion for the setting linkwatch_nextevent which is available in the actual work function. Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au> Signed-off-by: David S. Miller <davem@davemloft.net> diff --git a/net/core/link_watch.c b/net/core/link_watch.c index 4674ae5..a5e372b 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -26,7 +26,7 @@ enum lw_bits { - LW_RUNNING = 0, + LW_URGENT = 0, }; static unsigned long linkwatch_flags; @@ -95,18 +95,41 @@ static void linkwatch_add_event(struct net_device *dev) } -static void linkwatch_schedule_work(unsigned long delay) +static void linkwatch_schedule_work(int urgent) { - if (test_and_set_bit(LW_RUNNING, &linkwatch_flags)) + unsigned long delay = linkwatch_nextevent - jiffies; + + if (test_bit(LW_URGENT, &linkwatch_flags)) return; - /* If we wrap around we'll delay it by at most HZ. */ - if (delay > HZ) { - linkwatch_nextevent = jiffies; + /* Minimise down-time: drop delay for up event. */ + if (urgent) { + if (test_and_set_bit(LW_URGENT, &linkwatch_flags)) + return; delay = 0; } - schedule_delayed_work(&linkwatch_work, delay); + /* If we wrap around we'll delay it by at most HZ. */ + if (delay > HZ) + delay = 0; + + /* + * This is true if we've scheduled it immeditately or if we don't + * need an immediate execution and it's already pending. + */ + if (schedule_delayed_work(&linkwatch_work, delay) == !delay) + return; + + /* Don't bother if there is nothing urgent. */ + if (!test_bit(LW_URGENT, &linkwatch_flags)) + return; + + /* It's already running which is good enough. */ + if (!cancel_delayed_work(&linkwatch_work)) + return; + + /* Otherwise we reschedule it again for immediate exection. */ + schedule_delayed_work(&linkwatch_work, 0); } @@ -123,7 +146,11 @@ static void __linkwatch_run_queue(int urgent_only) */ if (!urgent_only) linkwatch_nextevent = jiffies + HZ; - clear_bit(LW_RUNNING, &linkwatch_flags); + /* Limit wrap-around effect on delay. */ + else if (time_after(linkwatch_nextevent, jiffies + HZ)) + linkwatch_nextevent = jiffies; + + clear_bit(LW_URGENT, &linkwatch_flags); spin_lock_irq(&lweventlist_lock); next = lweventlist; @@ -166,7 +193,7 @@ static void __linkwatch_run_queue(int urgent_only) } if (lweventlist) - linkwatch_schedule_work(linkwatch_nextevent - jiffies); + linkwatch_schedule_work(0); } @@ -187,21 +214,16 @@ static void linkwatch_event(struct work_struct *dummy) void linkwatch_fire_event(struct net_device *dev) { - if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { - unsigned long delay; + int urgent = linkwatch_urgent_event(dev); + if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { dev_hold(dev); linkwatch_add_event(dev); + } else if (!urgent) + return; - delay = linkwatch_nextevent - jiffies; - - /* Minimise down-time: drop delay for up event. */ - if (linkwatch_urgent_event(dev)) - delay = 0; - - linkwatch_schedule_work(delay); - } + linkwatch_schedule_work(urgent); } EXPORT_SYMBOL(linkwatch_fire_event); -------------------- - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
David Miller wrote: > I'm not so certain now that we know it's the jiffies wrap point :-) > > The fixes in question are attached below and they were posted and > discussed on netdev: > Yep, this patch gets rid of my spinning thread. I can't find this patch or any discussion on marc.info; is there a better netdev list archive? J - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
* Jeremy Fitzhardinge (jeremy@goop.org) wrote: > Yep, this patch gets rid of my spinning thread. I can't find this patch > or any discussion on marc.info; is there a better netdev list archive? See the "linkwatch bustage in git-net" thread on netdev http://thread.gmane.org/gmane.linux.network/61800/focus=61812 - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
From: Jeremy Fitzhardinge <jeremy@goop.org> Date: Thu, 10 May 2007 15:45:42 -0700 > David Miller wrote: > > I'm not so certain now that we know it's the jiffies wrap point :-) > > > > The fixes in question are attached below and they were posted and > > discussed on netdev: > > > > Yep, this patch gets rid of my spinning thread. I can't find this patch > or any discussion on marc.info; is there a better netdev list archive? I don't see it there either... let me check my mail archive... Indeed, they were "posted" to netdev but were blocked by the vger regexp filters on the keyword "urgent" so that postings never made it to the list. I removed that filter regexp so that never happens again, sorry. - To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 3044622..f671cd2 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -467,6 +467,8 @@ struct net_device /* device index hash chain */ struct hlist_node index_hlist; + struct net_device *link_watch_next; + /* register/unregister state machine */ enum { NETREG_UNINITIALIZED=0, NETREG_REGISTERED, /* completed register_netdevice */ diff --git a/net/core/link_watch.c b/net/core/link_watch.c index e3c26a9..71a35da 100644 --- a/net/core/link_watch.c +++ b/net/core/link_watch.c @@ -19,7 +19,6 @@ #include <linux/rtnetlink.h> #include <linux/jiffies.h> #include <linux/spinlock.h> -#include <linux/list.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/bitops.h> @@ -28,7 +27,6 @@ enum lw_bits { LW_RUNNING = 0, - LW_SE_USED }; static unsigned long linkwatch_flags; @@ -37,17 +35,9 @@ static unsigned long linkwatch_nextevent; static void linkwatch_event(struct work_struct *dummy); static DECLARE_DELAYED_WORK(linkwatch_work, linkwatch_event); -static LIST_HEAD(lweventlist); +static struct net_device *lweventlist; static DEFINE_SPINLOCK(lweventlist_lock); -struct lw_event { - struct list_head list; - struct net_device *dev; -}; - -/* Avoid kmalloc() for most systems */ -static struct lw_event singleevent; - static unsigned char default_operstate(const struct net_device *dev) { if (!netif_carrier_ok(dev)) @@ -90,21 +80,23 @@ static void rfc2863_policy(struct net_device *dev) /* Must be called with the rtnl semaphore held */ void linkwatch_run_queue(void) { - struct list_head head, *n, *next; + struct net_device *next; spin_lock_irq(&lweventlist_lock); - list_replace_init(&lweventlist, &head); + next = lweventlist; + lweventlist = NULL; spin_unlock_irq(&lweventlist_lock); - list_for_each_safe(n, next, &head) { - struct lw_event *event = list_entry(n, struct lw_event, list); - struct net_device *dev = event->dev; + while (next) { + struct net_device *dev = next; - if (event == &singleevent) { - clear_bit(LW_SE_USED, &linkwatch_flags); - } else { - kfree(event); - } + next = dev->link_watch_next; + + /* + * Make sure the above read is complete since it can be + * rewritten as soon as we clear the bit below. + */ + smp_mb__before_clear_bit(); /* We are about to handle this device, * so new events can be accepted @@ -147,24 +139,12 @@ void linkwatch_fire_event(struct net_device *dev) { if (!test_and_set_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state)) { unsigned long flags; - struct lw_event *event; - - if (test_and_set_bit(LW_SE_USED, &linkwatch_flags)) { - event = kmalloc(sizeof(struct lw_event), GFP_ATOMIC); - - if (unlikely(event == NULL)) { - clear_bit(__LINK_STATE_LINKWATCH_PENDING, &dev->state); - return; - } - } else { - event = &singleevent; - } dev_hold(dev); - event->dev = dev; spin_lock_irqsave(&lweventlist_lock, flags); - list_add_tail(&event->list, &lweventlist); + dev->link_watch_next = lweventlist; + lweventlist = dev; spin_unlock_irqrestore(&lweventlist_lock, flags); if (!test_and_set_bit(LW_RUNNING, &linkwatch_flags)) {