--- linux/mm/highmem.c.orig Mon Feb 26 23:33:17 2001 +++ linux/mm/highmem.c Mon Feb 26 23:37:02 2001 @@ -217,6 +217,7 @@ static inline void bounce_end_io (struct buffer_head *bh, int uptodate) { struct page *page; + unsigned long flags; struct list_head *tmp; struct buffer_head *bh_orig = (struct buffer_head *)(bh->b_private); @@ -231,27 +232,27 @@ __free_page(page); else { tmp = &emergency_pages; - spin_lock_irq(&emergency_lock); + spin_lock_irqsave(&emergency_lock, flags); /* * We are abusing page->list to manage * the highmem emergency pool: */ list_add(&page->list, &emergency_pages); nr_emergency_pages++; - spin_unlock_irq(&emergency_lock); + spin_unlock_irqrestore(&emergency_lock, flags); } if (nr_emergency_pages >= POOL_SIZE) kmem_cache_free(bh_cachep, bh); else { tmp = &emergency_bhs; - spin_lock_irq(&emergency_lock); + spin_lock_irqsave(&emergency_lock, flags); /* * Ditto in the bh case, here we abuse b_inode_buffers: */ list_add(&bh->b_inode_buffers, &emergency_bhs); nr_emergency_bhs++; - spin_unlock_irq(&emergency_lock); + spin_unlock_irqrestore(&emergency_lock, flags); } } @@ -297,6 +298,12 @@ spin_unlock_irq(&emergency_lock); if (page) return page; + + run_task_queue(&tq_disk); + + current->policy |= SCHED_YIELD; + __set_current_state(TASK_RUNNING); + schedule(); goto repeat_alloc; } @@ -328,6 +335,12 @@ spin_unlock_irq(&emergency_lock); if (bh) return bh; + + run_task_queue(&tq_disk); + + current->policy |= SCHED_YIELD; + __set_current_state(TASK_RUNNING); + schedule(); goto repeat_alloc; }