linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Andrew Morton <akpm@digeo.com>
To: Linus Torvalds <torvalds@transmeta.com>
Cc: lkml <linux-kernel@vger.kernel.org>
Subject: [patch 2/4] use prepare_to_wait in VM/VFS
Date: Wed, 25 Sep 2002 21:07:58 -0700	[thread overview]
Message-ID: <3D92881E.E047A58F@digeo.com> (raw)



The patch uses the new wakeup machinery in some hot parts of the VFS
and block layers.

wait_on_buffer(), wait_on_page(), lock_page(), blk_congestion_wait().
Also in get_request_wait(), although the benefit for exclusive wakeups
will be lower.



 drivers/block/ll_rw_blk.c |   17 +++++---------
 fs/buffer.c               |   16 +++++--------
 include/linux/pagemap.h   |    8 +++++-
 mm/filemap.c              |   55 ++++++++++++++++------------------------------
 4 files changed, 40 insertions(+), 56 deletions(-)

--- 2.5.38/drivers/block/ll_rw_blk.c~vm-wakeups	Sun Sep 22 21:01:45 2002
+++ 2.5.38-akpm/drivers/block/ll_rw_blk.c	Sun Sep 22 21:01:45 2002
@@ -1233,24 +1233,23 @@ static struct request *get_request(reque
  */
 static struct request *get_request_wait(request_queue_t *q, int rw)
 {
-	DECLARE_WAITQUEUE(wait, current);
+	DEFINE_WAIT(wait);
 	struct request_list *rl = &q->rq[rw];
 	struct request *rq;
 
 	spin_lock_prefetch(q->queue_lock);
 
 	generic_unplug_device(q);
-	add_wait_queue_exclusive(&rl->wait, &wait);
 	do {
-		set_current_state(TASK_UNINTERRUPTIBLE);
+		prepare_to_wait_exclusive(&rl->wait, &wait,
+					TASK_UNINTERRUPTIBLE);
 		if (!rl->count)
 			schedule();
+		finish_wait(&rl->wait, &wait);
 		spin_lock_irq(q->queue_lock);
 		rq = get_request(q, rw);
 		spin_unlock_irq(q->queue_lock);
 	} while (rq == NULL);
-	remove_wait_queue(&rl->wait, &wait);
-	current->state = TASK_RUNNING;
 	return rq;
 }
 
@@ -1460,18 +1459,16 @@ void blk_put_request(struct request *req
  */
 void blk_congestion_wait(int rw, long timeout)
 {
-	DECLARE_WAITQUEUE(wait, current);
+	DEFINE_WAIT(wait);
 	struct congestion_state *cs = &congestion_states[rw];
 
 	if (atomic_read(&cs->nr_congested_queues) == 0)
 		return;
 	blk_run_queues();
-	set_current_state(TASK_UNINTERRUPTIBLE);
-	add_wait_queue(&cs->wqh, &wait);
+	prepare_to_wait(&cs->wqh, &wait, TASK_UNINTERRUPTIBLE);
 	if (atomic_read(&cs->nr_congested_queues) != 0)
 		schedule_timeout(timeout);
-	set_current_state(TASK_RUNNING);
-	remove_wait_queue(&cs->wqh, &wait);
+	finish_wait(&cs->wqh, &wait);
 }
 
 /*
--- 2.5.38/fs/buffer.c~vm-wakeups	Sun Sep 22 21:01:45 2002
+++ 2.5.38-akpm/fs/buffer.c	Sun Sep 22 21:01:45 2002
@@ -128,22 +128,18 @@ void unlock_buffer(struct buffer_head *b
  */
 void __wait_on_buffer(struct buffer_head * bh)
 {
-	wait_queue_head_t *wq = bh_waitq_head(bh);
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
+	wait_queue_head_t *wqh = bh_waitq_head(bh);
+	DEFINE_WAIT(wait);
 
 	get_bh(bh);
-	add_wait_queue(wq, &wait);
 	do {
+		prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
 		blk_run_queues();
-		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-		if (!buffer_locked(bh))
-			break;
-		schedule();
+		if (buffer_locked(bh))
+			schedule();
 	} while (buffer_locked(bh));
-	tsk->state = TASK_RUNNING;
-	remove_wait_queue(wq, &wait);
 	put_bh(bh);
+	finish_wait(wqh, &wait);
 }
 
 static inline void
--- 2.5.38/mm/filemap.c~vm-wakeups	Sun Sep 22 21:01:45 2002
+++ 2.5.38-akpm/mm/filemap.c	Sun Sep 22 21:01:45 2002
@@ -632,19 +632,15 @@ static inline wait_queue_head_t *page_wa
 void wait_on_page_bit(struct page *page, int bit_nr)
 {
 	wait_queue_head_t *waitqueue = page_waitqueue(page);
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
+	DEFINE_WAIT(wait);
 
-	add_wait_queue(waitqueue, &wait);
 	do {
-		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-		if (!test_bit(bit_nr, &page->flags))
-			break;
+		prepare_to_wait(waitqueue, &wait, TASK_UNINTERRUPTIBLE);
 		sync_page(page);
-		schedule();
+		if (test_bit(bit_nr, &page->flags))
+			schedule();
 	} while (test_bit(bit_nr, &page->flags));
-	__set_task_state(tsk, TASK_RUNNING);
-	remove_wait_queue(waitqueue, &wait);
+	finish_wait(waitqueue, &wait);
 }
 EXPORT_SYMBOL(wait_on_page_bit);
 
@@ -690,38 +686,27 @@ void end_page_writeback(struct page *pag
 EXPORT_SYMBOL(end_page_writeback);
 
 /*
- * Get a lock on the page, assuming we need to sleep
- * to get it..
+ * Get a lock on the page, assuming we need to sleep to get it.
+ *
+ * Ugly: running sync_page() in state TASK_UNINTERRUPTIBLE is scary.  If some
+ * random driver's requestfn sets TASK_RUNNING, we could busywait.  However
+ * chances are that on the second loop, the block layer's plug list is empty,
+ * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
  */
-static void __lock_page(struct page *page)
+void __lock_page(struct page *page)
 {
-	wait_queue_head_t *waitqueue = page_waitqueue(page);
-	struct task_struct *tsk = current;
-	DECLARE_WAITQUEUE(wait, tsk);
+	wait_queue_head_t *wqh = page_waitqueue(page);
+	DEFINE_WAIT(wait);
 
-	add_wait_queue_exclusive(waitqueue, &wait);
-	for (;;) {
-		set_task_state(tsk, TASK_UNINTERRUPTIBLE);
-		if (PageLocked(page)) {
-			sync_page(page);
+	while (TestSetPageLocked(page)) {
+		prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
+		sync_page(page);
+		if (PageLocked(page))
 			schedule();
-		}
-		if (!TestSetPageLocked(page))
-			break;
 	}
-	__set_task_state(tsk, TASK_RUNNING);
-	remove_wait_queue(waitqueue, &wait);
-}
-
-/*
- * Get an exclusive lock on the page, optimistically
- * assuming it's not locked..
- */
-void lock_page(struct page *page)
-{
-	if (TestSetPageLocked(page))
-		__lock_page(page);
+	finish_wait(wqh, &wait);
 }
+EXPORT_SYMBOL(__lock_page);
 
 /*
  * a rather lightweight function, finding and getting a reference to a
--- 2.5.38/include/linux/pagemap.h~vm-wakeups	Sun Sep 22 21:01:45 2002
+++ 2.5.38-akpm/include/linux/pagemap.h	Sun Sep 22 21:01:45 2002
@@ -74,9 +74,15 @@ static inline void ___add_to_page_cache(
 	inc_page_state(nr_pagecache);
 }
 
-extern void FASTCALL(lock_page(struct page *page));
+extern void FASTCALL(__lock_page(struct page *page));
 extern void FASTCALL(unlock_page(struct page *page));
 
+static inline void lock_page(struct page *page)
+{
+	if (TestSetPageLocked(page))
+		__lock_page(page);
+}
+	
 /*
  * This is exported only for wait_on_page_locked/wait_on_page_writeback.
  * Never use this directly!

.

                 reply	other threads:[~2002-09-26  4:02 UTC|newest]

Thread overview: [no followups] expand[flat|nested]  mbox.gz  Atom feed

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=3D92881E.E047A58F@digeo.com \
    --to=akpm@digeo.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=torvalds@transmeta.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).