All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 0/4] Page based O_DIRECT v2
@ 2009-08-18  8:34 Jens Axboe
  2009-08-18  8:34 ` [PATCH 1/4] direct-io: unify argument passing by adding a dio_args structure Jens Axboe
                   ` (5 more replies)
  0 siblings, 6 replies; 13+ messages in thread
From: Jens Axboe @ 2009-08-18  8:34 UTC (permalink / raw)
  To: linux-kernel; +Cc: zach.brown, hch

Hi,

Updated patchset for page based O_DIRECT. I didn't include the
loop bits this time, lets focus on getting these core bits into
shape and then loop is easily patchable on top of this.

Changes since last post:

- Changed do_dio() to generic_file_direct_IO() as per Christophs
  suggestion.
- Split the first patch into two parts. One simply adds dio_args
  and maintains the current code, the next has the functional change
  but without changing file systems (except NFS).
- Add ->rw to dio_args (Christoph).
- A locking fixup. Not really related, but should be fixed up anyways.

There are at least two pending things to work on:

1) NFS is still broken, I get a crash in freeing some data that
   is not related to the pages. Will debug this.
2) As Christoph suggested, we need some way to wait for a dio
   when all segments are submitted. Currently it waits for each
   segment. Not sure how best to solve this issue, will think a
   bit more about this. Basically we need to pass down the wait
   list to the generic_file_direct_IO() and have that do the
   queue kick and wait.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 1/4] direct-io: unify argument passing by adding a dio_args structure
  2009-08-18  8:34 [PATCH 0/4] Page based O_DIRECT v2 Jens Axboe
@ 2009-08-18  8:34 ` Jens Axboe
  2009-08-18  8:34 ` [PATCH 2/4] direct-io: make O_DIRECT IO path be page based Jens Axboe
                   ` (4 subsequent siblings)
  5 siblings, 0 replies; 13+ messages in thread
From: Jens Axboe @ 2009-08-18  8:34 UTC (permalink / raw)
  To: linux-kernel; +Cc: zach.brown, hch, Jens Axboe

The O_DIRECT IO path is a mess of arguments. Clean that up by passing
those arguments in a dedicated dio_args structure.

This is in preparation for changing the internal implementation to be
page based instead of using iovecs.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
 fs/block_dev.c              |    7 ++--
 fs/btrfs/inode.c            |    4 +--
 fs/direct-io.c              |   70 +++++++++++++++++++++++++++----------------
 fs/ext2/inode.c             |    8 ++---
 fs/ext3/inode.c             |   15 ++++-----
 fs/ext4/inode.c             |   15 ++++-----
 fs/fat/inode.c              |   12 +++----
 fs/gfs2/aops.c              |   11 ++----
 fs/hfs/inode.c              |    7 ++--
 fs/hfsplus/inode.c          |    8 ++--
 fs/jfs/inode.c              |    7 ++--
 fs/nfs/direct.c             |    9 ++----
 fs/nilfs2/inode.c           |    9 ++---
 fs/ocfs2/aops.c             |   11 ++-----
 fs/reiserfs/inode.c         |    7 +---
 fs/xfs/linux-2.6/xfs_aops.c |   19 ++++--------
 include/linux/fs.h          |   59 +++++++++++++++++++++---------------
 include/linux/nfs_fs.h      |    3 +-
 mm/filemap.c                |    8 +++--
 19 files changed, 141 insertions(+), 148 deletions(-)

diff --git a/fs/block_dev.c b/fs/block_dev.c
index 94dfda2..2e494fa 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -166,14 +166,13 @@ blkdev_get_blocks(struct inode *inode, sector_t iblock,
 }
 
 static ssize_t
-blkdev_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-			loff_t offset, unsigned long nr_segs)
+blkdev_direct_IO(struct kiocb *iocb, struct dio_args *args)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
 
-	return blockdev_direct_IO_no_locking(rw, iocb, inode, I_BDEV(inode),
-				iov, offset, nr_segs, blkdev_get_blocks, NULL);
+	return blockdev_direct_IO_no_locking(iocb, inode, I_BDEV(inode),
+				args, blkdev_get_blocks, NULL);
 }
 
 int __sync_blockdev(struct block_device *bdev, int wait)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 272b9b2..094e3a7 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4308,9 +4308,7 @@ out:
 	return em;
 }
 
-static ssize_t btrfs_direct_IO(int rw, struct kiocb *iocb,
-			const struct iovec *iov, loff_t offset,
-			unsigned long nr_segs)
+static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct dio_args *args)
 {
 	return -EINVAL;
 }
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 8b10b87..181848c 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -929,14 +929,14 @@ out:
  * Releases both i_mutex and i_alloc_sem
  */
 static ssize_t
-direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode, 
-	const struct iovec *iov, loff_t offset, unsigned long nr_segs, 
-	unsigned blkbits, get_block_t get_block, dio_iodone_t end_io,
-	struct dio *dio)
+direct_io_worker(struct kiocb *iocb, struct inode *inode, 
+	struct dio_args *args, unsigned blkbits, get_block_t get_block,
+	dio_iodone_t end_io, struct dio *dio)
 {
-	unsigned long user_addr; 
+	const struct iovec *iov = args->iov;
+	unsigned long user_addr;
 	unsigned long flags;
-	int seg;
+	int seg, rw = args->rw;
 	ssize_t ret = 0;
 	ssize_t ret2;
 	size_t bytes;
@@ -945,7 +945,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
 	dio->rw = rw;
 	dio->blkbits = blkbits;
 	dio->blkfactor = inode->i_blkbits - blkbits;
-	dio->block_in_file = offset >> blkbits;
+	dio->block_in_file = args->offset >> blkbits;
 
 	dio->get_block = get_block;
 	dio->end_io = end_io;
@@ -965,14 +965,14 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
 	if (unlikely(dio->blkfactor))
 		dio->pages_in_io = 2;
 
-	for (seg = 0; seg < nr_segs; seg++) {
-		user_addr = (unsigned long)iov[seg].iov_base;
+	for (seg = 0; seg < args->nr_segs; seg++) {
+		user_addr = (unsigned long) iov[seg].iov_base;
 		dio->pages_in_io +=
 			((user_addr+iov[seg].iov_len +PAGE_SIZE-1)/PAGE_SIZE
 				- user_addr/PAGE_SIZE);
 	}
 
-	for (seg = 0; seg < nr_segs; seg++) {
+	for (seg = 0; seg < args->nr_segs; seg++) {
 		user_addr = (unsigned long)iov[seg].iov_base;
 		dio->size += bytes = iov[seg].iov_len;
 
@@ -1076,7 +1076,7 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
 	spin_unlock_irqrestore(&dio->bio_lock, flags);
 
 	if (ret2 == 0) {
-		ret = dio_complete(dio, offset, ret);
+		ret = dio_complete(dio, args->offset, ret);
 		kfree(dio);
 	} else
 		BUG_ON(ret != -EIOCBQUEUED);
@@ -1106,10 +1106,9 @@ direct_io_worker(int rw, struct kiocb *iocb, struct inode *inode,
  * Additional i_alloc_sem locking requirements described inline below.
  */
 ssize_t
-__blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-	struct block_device *bdev, const struct iovec *iov, loff_t offset, 
-	unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
-	int dio_lock_type)
+__blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+	struct block_device *bdev, struct dio_args *args, get_block_t get_block,
+	dio_iodone_t end_io, int dio_lock_type)
 {
 	int seg;
 	size_t size;
@@ -1118,10 +1117,11 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
 	unsigned bdev_blkbits = 0;
 	unsigned blocksize_mask = (1 << blkbits) - 1;
 	ssize_t retval = -EINVAL;
-	loff_t end = offset;
+	loff_t end = args->offset;
 	struct dio *dio;
 	int release_i_mutex = 0;
 	int acquire_i_mutex = 0;
+	int rw = args->rw;
 
 	if (rw & WRITE)
 		rw = WRITE_ODIRECT;
@@ -1129,18 +1129,18 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
 	if (bdev)
 		bdev_blkbits = blksize_bits(bdev_logical_block_size(bdev));
 
-	if (offset & blocksize_mask) {
+	if (args->offset & blocksize_mask) {
 		if (bdev)
 			 blkbits = bdev_blkbits;
 		blocksize_mask = (1 << blkbits) - 1;
-		if (offset & blocksize_mask)
+		if (args->offset & blocksize_mask)
 			goto out;
 	}
 
 	/* Check the memory alignment.  Blocks cannot straddle pages */
-	for (seg = 0; seg < nr_segs; seg++) {
-		addr = (unsigned long)iov[seg].iov_base;
-		size = iov[seg].iov_len;
+	for (seg = 0; seg < args->nr_segs; seg++) {
+		addr = (unsigned long) args->iov[seg].iov_base;
+		size = args->iov[seg].iov_len;
 		end += size;
 		if ((addr & blocksize_mask) || (size & blocksize_mask))  {
 			if (bdev)
@@ -1168,7 +1168,7 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
 	dio->lock_type = dio_lock_type;
 	if (dio_lock_type != DIO_NO_LOCKING) {
 		/* watch out for a 0 len io from a tricksy fs */
-		if (rw == READ && end > offset) {
+		if (rw == READ && end > args->offset) {
 			struct address_space *mapping;
 
 			mapping = iocb->ki_filp->f_mapping;
@@ -1177,8 +1177,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
 				release_i_mutex = 1;
 			}
 
-			retval = filemap_write_and_wait_range(mapping, offset,
-							      end - 1);
+			retval = filemap_write_and_wait_range(mapping,
+							args->offset, end - 1);
 			if (retval) {
 				kfree(dio);
 				goto out;
@@ -1204,8 +1204,8 @@ __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
 	dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) &&
 		(end > i_size_read(inode)));
 
-	retval = direct_io_worker(rw, iocb, inode, iov, offset,
-				nr_segs, blkbits, get_block, end_io, dio);
+	retval = direct_io_worker(iocb, inode, args, blkbits, get_block, end_io,
+					dio);
 
 	/*
 	 * In case of error extending write may have instantiated a few
@@ -1231,3 +1231,21 @@ out:
 	return retval;
 }
 EXPORT_SYMBOL(__blockdev_direct_IO);
+
+ssize_t generic_file_direct_IO(int rw, struct address_space *mapping,
+			       struct kiocb *iocb, const struct iovec *iov,
+			       loff_t offset, unsigned long nr_segs)
+{
+	struct dio_args args = {
+		.rw		= rw,
+		.iov		= iov,
+		.length		= iov_length(iov, nr_segs),
+		.offset		= offset,
+		.nr_segs	= nr_segs,
+	};
+
+	if (mapping->a_ops->direct_IO)
+		return mapping->a_ops->direct_IO(iocb, &args);
+
+	return -EINVAL;
+}
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index e271303..e813df7 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -790,15 +790,13 @@ static sector_t ext2_bmap(struct address_space *mapping, sector_t block)
 	return generic_block_bmap(mapping,block,ext2_get_block);
 }
 
-static ssize_t
-ext2_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-			loff_t offset, unsigned long nr_segs)
+static ssize_t ext2_direct_IO(struct kiocb *iocb, struct dio_args *args)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
 
-	return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
-				offset, nr_segs, ext2_get_block, NULL);
+	return blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, args,
+					ext2_get_block, NULL);
 }
 
 static int
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index b49908a..11dc0d1 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1713,9 +1713,7 @@ static int ext3_releasepage(struct page *page, gfp_t wait)
  * crashes then stale disk data _may_ be exposed inside the file. But current
  * VFS code falls back into buffered path in that case so we are safe.
  */
-static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
-			const struct iovec *iov, loff_t offset,
-			unsigned long nr_segs)
+static ssize_t ext3_direct_IO(struct kiocb *iocb, struct dio_args *args)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
@@ -1723,10 +1721,10 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
 	handle_t *handle;
 	ssize_t ret;
 	int orphan = 0;
-	size_t count = iov_length(iov, nr_segs);
+	size_t count = args->length;
 
-	if (rw == WRITE) {
-		loff_t final_size = offset + count;
+	if (args->rw == WRITE) {
+		loff_t final_size = args->offset + count;
 
 		if (final_size > inode->i_size) {
 			/* Credits for sb + inode write */
@@ -1746,8 +1744,7 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
 		}
 	}
 
-	ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
-				 offset, nr_segs,
+	ret = blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, args,
 				 ext3_get_block, NULL);
 
 	if (orphan) {
@@ -1765,7 +1762,7 @@ static ssize_t ext3_direct_IO(int rw, struct kiocb *iocb,
 		if (inode->i_nlink)
 			ext3_orphan_del(handle, inode);
 		if (ret > 0) {
-			loff_t end = offset + ret;
+			loff_t end = args->offset + ret;
 			if (end > inode->i_size) {
 				ei->i_disksize = end;
 				i_size_write(inode, end);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index f9c642b..164fdb3 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3267,9 +3267,7 @@ static int ext4_releasepage(struct page *page, gfp_t wait)
  * crashes then stale disk data _may_ be exposed inside the file. But current
  * VFS code falls back into buffered path in that case so we are safe.
  */
-static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
-			      const struct iovec *iov, loff_t offset,
-			      unsigned long nr_segs)
+static ssize_t ext4_direct_IO(struct kiocb *iocb, struct dio_args *args)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
@@ -3277,10 +3275,10 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
 	handle_t *handle;
 	ssize_t ret;
 	int orphan = 0;
-	size_t count = iov_length(iov, nr_segs);
+	size_t count = args->length;
 
-	if (rw == WRITE) {
-		loff_t final_size = offset + count;
+	if (args->rw == WRITE) {
+		loff_t final_size = args->offset + count;
 
 		if (final_size > inode->i_size) {
 			/* Credits for sb + inode write */
@@ -3300,8 +3298,7 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
 		}
 	}
 
-	ret = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
-				 offset, nr_segs,
+	ret = blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, args,
 				 ext4_get_block, NULL);
 
 	if (orphan) {
@@ -3319,7 +3316,7 @@ static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
 		if (inode->i_nlink)
 			ext4_orphan_del(handle, inode);
 		if (ret > 0) {
-			loff_t end = offset + ret;
+			loff_t end = args->offset + ret;
 			if (end > inode->i_size) {
 				ei->i_disksize = end;
 				i_size_write(inode, end);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 8970d8c..9d41851 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -167,14 +167,12 @@ static int fat_write_end(struct file *file, struct address_space *mapping,
 	return err;
 }
 
-static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
-			     const struct iovec *iov,
-			     loff_t offset, unsigned long nr_segs)
+static ssize_t fat_direct_IO(struct kiocb *iocb, struct dio_args *args)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
 
-	if (rw == WRITE) {
+	if (args->rw == WRITE) {
 		/*
 		 * FIXME: blockdev_direct_IO() doesn't use ->write_begin(),
 		 * so we need to update the ->mmu_private to block boundary.
@@ -184,7 +182,7 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
 		 *
 		 * Return 0, and fallback to normal buffered write.
 		 */
-		loff_t size = offset + iov_length(iov, nr_segs);
+		loff_t size = args->offset + args->length;
 		if (MSDOS_I(inode)->mmu_private < size)
 			return 0;
 	}
@@ -193,8 +191,8 @@ static ssize_t fat_direct_IO(int rw, struct kiocb *iocb,
 	 * FAT need to use the DIO_LOCKING for avoiding the race
 	 * condition of fat_get_block() and ->truncate().
 	 */
-	return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
-				  offset, nr_segs, fat_get_block, NULL);
+	return blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, args,
+				  fat_get_block, NULL);
 }
 
 static sector_t _fat_bmap(struct address_space *mapping, sector_t block)
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 7ebae9a..a9422a2 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -1021,9 +1021,7 @@ static int gfs2_ok_for_dio(struct gfs2_inode *ip, int rw, loff_t offset)
 
 
 
-static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
-			      const struct iovec *iov, loff_t offset,
-			      unsigned long nr_segs)
+static ssize_t gfs2_direct_IO(struct kiocb *iocb, struct dio_args *args)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
@@ -1043,13 +1041,12 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
 	rv = gfs2_glock_nq(&gh);
 	if (rv)
 		return rv;
-	rv = gfs2_ok_for_dio(ip, rw, offset);
+	rv = gfs2_ok_for_dio(ip, args->rw, args->offset);
 	if (rv != 1)
 		goto out; /* dio not valid, fall back to buffered i/o */
 
-	rv = blockdev_direct_IO_no_locking(rw, iocb, inode, inode->i_sb->s_bdev,
-					   iov, offset, nr_segs,
-					   gfs2_get_block_direct, NULL);
+	rv = blockdev_direct_IO_no_locking(iocb, inode, inode->i_sb->s_bdev,
+					   args, gfs2_get_block_direct, NULL);
 out:
 	gfs2_glock_dq_m(1, &gh);
 	gfs2_holder_uninit(&gh);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index a1cbff2..2998914 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -107,14 +107,13 @@ static int hfs_releasepage(struct page *page, gfp_t mask)
 	return res ? try_to_free_buffers(page) : 0;
 }
 
-static ssize_t hfs_direct_IO(int rw, struct kiocb *iocb,
-		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+static ssize_t hfs_direct_IO(struct kiocb *iocb, struct dio_args *args)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
 
-	return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
-				  offset, nr_segs, hfs_get_block, NULL);
+	return blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, args,
+				  hfs_get_block, NULL);
 }
 
 static int hfs_writepages(struct address_space *mapping,
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index 1bcf597..dd7102b 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -100,14 +100,14 @@ static int hfsplus_releasepage(struct page *page, gfp_t mask)
 	return res ? try_to_free_buffers(page) : 0;
 }
 
-static ssize_t hfsplus_direct_IO(int rw, struct kiocb *iocb,
-		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+static ssize_t hfsplus_direct_IO(struct kiocb *iocb,
+				 struct dio_args *args)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
 
-	return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
-				  offset, nr_segs, hfsplus_get_block, NULL);
+	return blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, args,
+				  hfsplus_get_block, NULL);
 }
 
 static int hfsplus_writepages(struct address_space *mapping,
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index b2ae190..e1420de 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -306,14 +306,13 @@ static sector_t jfs_bmap(struct address_space *mapping, sector_t block)
 	return generic_block_bmap(mapping, block, jfs_get_block);
 }
 
-static ssize_t jfs_direct_IO(int rw, struct kiocb *iocb,
-	const struct iovec *iov, loff_t offset, unsigned long nr_segs)
+static ssize_t jfs_direct_IO(struct kiocb *iocb, struct dio_args *args)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
 
-	return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
-				offset, nr_segs, jfs_get_block, NULL);
+	return blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, args,
+					jfs_get_block, NULL);
 }
 
 const struct address_space_operations jfs_aops = {
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index e4e089a..45d931b 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -103,21 +103,18 @@ static inline int put_dreq(struct nfs_direct_req *dreq)
 /**
  * nfs_direct_IO - NFS address space operation for direct I/O
  * @rw: direction (read or write)
- * @iocb: target I/O control block
- * @iov: array of vectors that define I/O buffer
- * @pos: offset in file to begin the operation
- * @nr_segs: size of iovec array
+ * @args: IO arguments
  *
  * The presence of this routine in the address space ops vector means
  * the NFS client supports direct I/O.  However, we shunt off direct
  * read and write requests before the VFS gets them, so this method
  * should never be called.
  */
-ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
+ssize_t nfs_direct_IO(struct kiocb *iocb, struct dio_args *args)
 {
 	dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
 			iocb->ki_filp->f_path.dentry->d_name.name,
-			(long long) pos, nr_segs);
+			(long long) args->offset, args->nr_segs);
 
 	return -EINVAL;
 }
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index fe9d8f2..840c307 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -222,19 +222,18 @@ static int nilfs_write_end(struct file *file, struct address_space *mapping,
 }
 
 static ssize_t
-nilfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
-		loff_t offset, unsigned long nr_segs)
+nilfs_direct_IO(struct kiocb *iocb, struct dio_args *args)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
 	ssize_t size;
 
-	if (rw == WRITE)
+	if (args->rw == WRITE)
 		return 0;
 
 	/* Needs synchronization with the cleaner */
-	size = blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
-				  offset, nr_segs, nilfs_get_block, NULL);
+	size = blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, args,
+				  nilfs_get_block, NULL);
 	return size;
 }
 
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index b401654..56e61ba 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -668,11 +668,7 @@ static int ocfs2_releasepage(struct page *page, gfp_t wait)
 	return jbd2_journal_try_to_free_buffers(journal, page, wait);
 }
 
-static ssize_t ocfs2_direct_IO(int rw,
-			       struct kiocb *iocb,
-			       const struct iovec *iov,
-			       loff_t offset,
-			       unsigned long nr_segs)
+static ssize_t ocfs2_direct_IO(struct kiocb *iocb, struct dio_args *args)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_path.dentry->d_inode->i_mapping->host;
@@ -687,9 +683,8 @@ static ssize_t ocfs2_direct_IO(int rw,
 	if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
 		return 0;
 
-	ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
-					    inode->i_sb->s_bdev, iov, offset,
-					    nr_segs, 
+	ret = blockdev_direct_IO_no_locking(iocb, inode,
+					    inode->i_sb->s_bdev, args,
 					    ocfs2_direct_IO_get_blocks,
 					    ocfs2_dio_end_io);
 
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index a14d6cd..201e6ca 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -3025,15 +3025,12 @@ static int reiserfs_releasepage(struct page *page, gfp_t unused_gfp_flags)
 
 /* We thank Mingming Cao for helping us understand in great detail what
    to do in this section of the code. */
-static ssize_t reiserfs_direct_IO(int rw, struct kiocb *iocb,
-				  const struct iovec *iov, loff_t offset,
-				  unsigned long nr_segs)
+static ssize_t reiserfs_direct_IO(struct kiocb *iocb, struct dio_args *args)
 {
 	struct file *file = iocb->ki_filp;
 	struct inode *inode = file->f_mapping->host;
 
-	return blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
-				  offset, nr_segs,
+	return blockdev_direct_IO(iocb, inode, inode->i_sb->s_bdev, args,
 				  reiserfs_get_blocks_direct_io, NULL);
 }
 
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index aecf251..0faf1fe 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -1532,11 +1532,8 @@ xfs_end_io_direct(
 
 STATIC ssize_t
 xfs_vm_direct_IO(
-	int			rw,
 	struct kiocb		*iocb,
-	const struct iovec	*iov,
-	loff_t			offset,
-	unsigned long		nr_segs)
+	struct dio_args		*args)
 {
 	struct file	*file = iocb->ki_filp;
 	struct inode	*inode = file->f_mapping->host;
@@ -1545,18 +1542,14 @@ xfs_vm_direct_IO(
 
 	bdev = xfs_find_bdev_for_inode(XFS_I(inode));
 
-	if (rw == WRITE) {
+	if (args->rw == WRITE) {
 		iocb->private = xfs_alloc_ioend(inode, IOMAP_UNWRITTEN);
-		ret = blockdev_direct_IO_own_locking(rw, iocb, inode,
-			bdev, iov, offset, nr_segs,
-			xfs_get_blocks_direct,
-			xfs_end_io_direct);
+		ret = blockdev_direct_IO_own_locking(iocb, inode, bdev, args,
+				xfs_get_blocks_direct, xfs_end_io_direct);
 	} else {
 		iocb->private = xfs_alloc_ioend(inode, IOMAP_READ);
-		ret = blockdev_direct_IO_no_locking(rw, iocb, inode,
-			bdev, iov, offset, nr_segs,
-			xfs_get_blocks_direct,
-			xfs_end_io_direct);
+		ret = blockdev_direct_IO_no_locking(iocb, inode,
+			bdev, args, xfs_get_blocks_direct, xfs_end_io_direct);
 	}
 
 	if (unlikely(ret != -EIOCBQUEUED && iocb->private))
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 67888a9..5971116 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -560,6 +560,7 @@ typedef struct {
 typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
 		unsigned long, unsigned long);
 
+struct dio_args;
 struct address_space_operations {
 	int (*writepage)(struct page *page, struct writeback_control *wbc);
 	int (*readpage)(struct file *, struct page *);
@@ -585,8 +586,7 @@ struct address_space_operations {
 	sector_t (*bmap)(struct address_space *, sector_t);
 	void (*invalidatepage) (struct page *, unsigned long);
 	int (*releasepage) (struct page *, gfp_t);
-	ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
-			loff_t offset, unsigned long nr_segs);
+	ssize_t (*direct_IO)(struct kiocb *, struct dio_args *);
 	int (*get_xip_mem)(struct address_space *, pgoff_t, int,
 						void **, unsigned long *);
 	/* migrate the contents of a page to the specified target */
@@ -2241,10 +2241,24 @@ static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
 #endif
 
 #ifdef CONFIG_BLOCK
-ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
-	struct block_device *bdev, const struct iovec *iov, loff_t offset,
-	unsigned long nr_segs, get_block_t get_block, dio_iodone_t end_io,
-	int lock_type);
+
+/*
+ * Arguments passwed to aops->direct_IO()
+ */
+struct dio_args {
+	int rw;
+	const struct iovec *iov;
+	unsigned long length;
+	loff_t offset;
+	unsigned long nr_segs;
+};
+
+ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
+	struct block_device *bdev, struct dio_args *args, get_block_t get_block,
+	dio_iodone_t end_io, int lock_type);
+
+ssize_t generic_file_direct_IO(int, struct address_space *, struct kiocb *,
+				const struct iovec *, loff_t, unsigned long);
 
 enum {
 	DIO_LOCKING = 1, /* need locking between buffered and direct access */
@@ -2252,31 +2266,28 @@ enum {
 	DIO_OWN_LOCKING, /* filesystem locks buffered and direct internally */
 };
 
-static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
-	struct inode *inode, struct block_device *bdev, const struct iovec *iov,
-	loff_t offset, unsigned long nr_segs, get_block_t get_block,
-	dio_iodone_t end_io)
+static inline ssize_t blockdev_direct_IO(struct kiocb *iocb,
+	struct inode *inode, struct block_device *bdev, struct dio_args *args,
+	get_block_t get_block, dio_iodone_t end_io)
 {
-	return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
-				nr_segs, get_block, end_io, DIO_LOCKING);
+	return __blockdev_direct_IO(iocb, inode, bdev, args,
+					get_block, end_io, DIO_LOCKING);
 }
 
-static inline ssize_t blockdev_direct_IO_no_locking(int rw, struct kiocb *iocb,
-	struct inode *inode, struct block_device *bdev, const struct iovec *iov,
-	loff_t offset, unsigned long nr_segs, get_block_t get_block,
-	dio_iodone_t end_io)
+static inline ssize_t blockdev_direct_IO_no_locking(struct kiocb *iocb,
+	struct inode *inode, struct block_device *bdev, struct dio_args *args,
+	get_block_t get_block, dio_iodone_t end_io)
 {
-	return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
-				nr_segs, get_block, end_io, DIO_NO_LOCKING);
+	return __blockdev_direct_IO(iocb, inode, bdev, args,
+					get_block, end_io, DIO_NO_LOCKING);
 }
 
-static inline ssize_t blockdev_direct_IO_own_locking(int rw, struct kiocb *iocb,
-	struct inode *inode, struct block_device *bdev, const struct iovec *iov,
-	loff_t offset, unsigned long nr_segs, get_block_t get_block,
-	dio_iodone_t end_io)
+static inline ssize_t blockdev_direct_IO_own_locking(struct kiocb *iocb,
+	struct inode *inode, struct block_device *bdev, struct dio_args *args,
+	get_block_t get_block, dio_iodone_t end_io)
 {
-	return __blockdev_direct_IO(rw, iocb, inode, bdev, iov, offset,
-				nr_segs, get_block, end_io, DIO_OWN_LOCKING);
+	return __blockdev_direct_IO(iocb, inode, bdev, args,
+					get_block, end_io, DIO_OWN_LOCKING);
 }
 #endif
 
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index f6b9024..97a2383 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -408,8 +408,7 @@ extern int nfs3_removexattr (struct dentry *, const char *name);
 /*
  * linux/fs/nfs/direct.c
  */
-extern ssize_t nfs_direct_IO(int, struct kiocb *, const struct iovec *, loff_t,
-			unsigned long);
+extern ssize_t nfs_direct_IO(struct kiocb *, struct dio_args *);
 extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
 			const struct iovec *iov, unsigned long nr_segs,
 			loff_t pos);
diff --git a/mm/filemap.c b/mm/filemap.c
index ccea3b6..cf85298 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1345,8 +1345,9 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
 			retval = filemap_write_and_wait_range(mapping, pos,
 					pos + iov_length(iov, nr_segs) - 1);
 			if (!retval) {
-				retval = mapping->a_ops->direct_IO(READ, iocb,
-							iov, pos, nr_segs);
+				retval = generic_file_direct_IO(READ, mapping,
+								iocb, iov,
+								pos, nr_segs);
 			}
 			if (retval > 0)
 				*ppos = pos + retval;
@@ -2144,7 +2145,8 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
 		}
 	}
 
-	written = mapping->a_ops->direct_IO(WRITE, iocb, iov, pos, *nr_segs);
+	written = generic_file_direct_IO(WRITE, mapping, iocb, iov, pos,
+						*nr_segs);
 
 	/*
 	 * Finally, try again to invalidate clean pages which might have been
-- 
1.6.4.53.g3f55e


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 2/4] direct-io: make O_DIRECT IO path be page based
  2009-08-18  8:34 [PATCH 0/4] Page based O_DIRECT v2 Jens Axboe
  2009-08-18  8:34 ` [PATCH 1/4] direct-io: unify argument passing by adding a dio_args structure Jens Axboe
@ 2009-08-18  8:34 ` Jens Axboe
  2009-08-18  8:35 ` [PATCH 3/4] direct-io: add a "IO for kernel" flag to kiocb Jens Axboe
                   ` (3 subsequent siblings)
  5 siblings, 0 replies; 13+ messages in thread
From: Jens Axboe @ 2009-08-18  8:34 UTC (permalink / raw)
  To: linux-kernel; +Cc: zach.brown, hch, Jens Axboe

Currently we pass in the iovec array and let the O_DIRECT core
handle the get_user_pages() business. This work, but it means that
we can ever only use user pages for O_DIRECT.

Switch the aops->direct_IO() and below code to use page arrays
instead, so that it doesn't make any assumptions about who the pages
belong to. This works directly for all users but NFS, which just
uses the same helper that the generic mapping read/write functions
also call.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
 fs/direct-io.c         |  304 ++++++++++++++++++++----------------------------
 fs/nfs/direct.c        |  161 +++++++++----------------
 fs/nfs/file.c          |    8 +-
 include/linux/fs.h     |   15 ++-
 include/linux/nfs_fs.h |    7 +-
 mm/filemap.c           |    6 +-
 6 files changed, 206 insertions(+), 295 deletions(-)

diff --git a/fs/direct-io.c b/fs/direct-io.c
index 181848c..22a945b 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -38,12 +38,6 @@
 #include <asm/atomic.h>
 
 /*
- * How many user pages to map in one call to get_user_pages().  This determines
- * the size of a structure on the stack.
- */
-#define DIO_PAGES	64
-
-/*
  * This code generally works in units of "dio_blocks".  A dio_block is
  * somewhere between the hard sector size and the filesystem block size.  it
  * is determined on a per-invocation basis.   When talking to the filesystem
@@ -105,20 +99,13 @@ struct dio {
 	sector_t cur_page_block;	/* Where it starts */
 
 	/*
-	 * Page fetching state. These variables belong to dio_refill_pages().
-	 */
-	int curr_page;			/* changes */
-	int total_pages;		/* doesn't change */
-	unsigned long curr_user_address;/* changes */
-
-	/*
 	 * Page queue.  These variables belong to dio_refill_pages() and
 	 * dio_get_page().
 	 */
-	struct page *pages[DIO_PAGES];	/* page buffer */
-	unsigned head;			/* next page to process */
-	unsigned tail;			/* last valid page + 1 */
-	int page_errors;		/* errno from get_user_pages() */
+	struct page **pages;		/* page buffer */
+	unsigned int head_page;		/* next page to process */
+	unsigned int total_pages;	/* last valid page + 1 */
+	unsigned int first_page_off;	/* offset into first page in map */
 
 	/* BIO completion state */
 	spinlock_t bio_lock;		/* protects BIO fields below */
@@ -134,57 +121,6 @@ struct dio {
 };
 
 /*
- * How many pages are in the queue?
- */
-static inline unsigned dio_pages_present(struct dio *dio)
-{
-	return dio->tail - dio->head;
-}
-
-/*
- * Go grab and pin some userspace pages.   Typically we'll get 64 at a time.
- */
-static int dio_refill_pages(struct dio *dio)
-{
-	int ret;
-	int nr_pages;
-
-	nr_pages = min(dio->total_pages - dio->curr_page, DIO_PAGES);
-	ret = get_user_pages_fast(
-		dio->curr_user_address,		/* Where from? */
-		nr_pages,			/* How many pages? */
-		dio->rw == READ,		/* Write to memory? */
-		&dio->pages[0]);		/* Put results here */
-
-	if (ret < 0 && dio->blocks_available && (dio->rw & WRITE)) {
-		struct page *page = ZERO_PAGE(0);
-		/*
-		 * A memory fault, but the filesystem has some outstanding
-		 * mapped blocks.  We need to use those blocks up to avoid
-		 * leaking stale data in the file.
-		 */
-		if (dio->page_errors == 0)
-			dio->page_errors = ret;
-		page_cache_get(page);
-		dio->pages[0] = page;
-		dio->head = 0;
-		dio->tail = 1;
-		ret = 0;
-		goto out;
-	}
-
-	if (ret >= 0) {
-		dio->curr_user_address += ret * PAGE_SIZE;
-		dio->curr_page += ret;
-		dio->head = 0;
-		dio->tail = ret;
-		ret = 0;
-	}
-out:
-	return ret;	
-}
-
-/*
  * Get another userspace page.  Returns an ERR_PTR on error.  Pages are
  * buffered inside the dio so that we can call get_user_pages() against a
  * decent number of pages, less frequently.  To provide nicer use of the
@@ -192,15 +128,10 @@ out:
  */
 static struct page *dio_get_page(struct dio *dio)
 {
-	if (dio_pages_present(dio) == 0) {
-		int ret;
+	if (dio->head_page < dio->total_pages)
+		return dio->pages[dio->head_page++];
 
-		ret = dio_refill_pages(dio);
-		if (ret)
-			return ERR_PTR(ret);
-		BUG_ON(dio_pages_present(dio) == 0);
-	}
-	return dio->pages[dio->head++];
+	return NULL;
 }
 
 /**
@@ -245,8 +176,6 @@ static int dio_complete(struct dio *dio, loff_t offset, int ret)
 		up_read_non_owner(&dio->inode->i_alloc_sem);
 
 	if (ret == 0)
-		ret = dio->page_errors;
-	if (ret == 0)
 		ret = dio->io_error;
 	if (ret == 0)
 		ret = transferred;
@@ -351,8 +280,10 @@ static void dio_bio_submit(struct dio *dio)
  */
 static void dio_cleanup(struct dio *dio)
 {
-	while (dio_pages_present(dio))
-		page_cache_release(dio_get_page(dio));
+	struct page *page;
+
+	while ((page = dio_get_page(dio)) != NULL)
+		page_cache_release(page);
 }
 
 /*
@@ -490,7 +421,6 @@ static int dio_bio_reap(struct dio *dio)
  */
 static int get_more_blocks(struct dio *dio)
 {
-	int ret;
 	struct buffer_head *map_bh = &dio->map_bh;
 	sector_t fs_startblk;	/* Into file, in filesystem-sized blocks */
 	unsigned long fs_count;	/* Number of filesystem-sized blocks */
@@ -502,38 +432,33 @@ static int get_more_blocks(struct dio *dio)
 	 * If there was a memory error and we've overwritten all the
 	 * mapped blocks then we can now return that memory error
 	 */
-	ret = dio->page_errors;
-	if (ret == 0) {
-		BUG_ON(dio->block_in_file >= dio->final_block_in_request);
-		fs_startblk = dio->block_in_file >> dio->blkfactor;
-		dio_count = dio->final_block_in_request - dio->block_in_file;
-		fs_count = dio_count >> dio->blkfactor;
-		blkmask = (1 << dio->blkfactor) - 1;
-		if (dio_count & blkmask)	
-			fs_count++;
-
-		map_bh->b_state = 0;
-		map_bh->b_size = fs_count << dio->inode->i_blkbits;
-
-		create = dio->rw & WRITE;
-		if (dio->lock_type == DIO_LOCKING) {
-			if (dio->block_in_file < (i_size_read(dio->inode) >>
-							dio->blkbits))
-				create = 0;
-		} else if (dio->lock_type == DIO_NO_LOCKING) {
+	BUG_ON(dio->block_in_file >= dio->final_block_in_request);
+	fs_startblk = dio->block_in_file >> dio->blkfactor;
+	dio_count = dio->final_block_in_request - dio->block_in_file;
+	fs_count = dio_count >> dio->blkfactor;
+	blkmask = (1 << dio->blkfactor) - 1;
+	if (dio_count & blkmask)
+		fs_count++;
+
+	map_bh->b_state = 0;
+	map_bh->b_size = fs_count << dio->inode->i_blkbits;
+
+	create = dio->rw & WRITE;
+	if (dio->lock_type == DIO_LOCKING) {
+		if (dio->block_in_file < (i_size_read(dio->inode) >>
+						dio->blkbits))
 			create = 0;
-		}
-
-		/*
-		 * For writes inside i_size we forbid block creations: only
-		 * overwrites are permitted.  We fall back to buffered writes
-		 * at a higher level for inside-i_size block-instantiating
-		 * writes.
-		 */
-		ret = (*dio->get_block)(dio->inode, fs_startblk,
-						map_bh, create);
+	} else if (dio->lock_type == DIO_NO_LOCKING) {
+		create = 0;
 	}
-	return ret;
+
+	/*
+	 * For writes inside i_size we forbid block creations: only
+	 * overwrites are permitted.  We fall back to buffered writes
+	 * at a higher level for inside-i_size block-instantiating
+	 * writes.
+	 */
+	return dio->get_block(dio->inode, fs_startblk, map_bh, create);
 }
 
 /*
@@ -567,8 +492,8 @@ static int dio_bio_add_page(struct dio *dio)
 {
 	int ret;
 
-	ret = bio_add_page(dio->bio, dio->cur_page,
-			dio->cur_page_len, dio->cur_page_offset);
+	ret = bio_add_page(dio->bio, dio->cur_page, dio->cur_page_len,
+				dio->cur_page_offset);
 	if (ret == dio->cur_page_len) {
 		/*
 		 * Decrement count only, if we are done with this page
@@ -804,6 +729,9 @@ static int do_direct_IO(struct dio *dio)
 			unsigned this_chunk_blocks;	/* # of blocks */
 			unsigned u;
 
+			offset_in_page += dio->first_page_off;
+			dio->first_page_off = 0;
+
 			if (dio->blocks_available == 0) {
 				/*
 				 * Need to go and map some more disk
@@ -933,13 +861,10 @@ direct_io_worker(struct kiocb *iocb, struct inode *inode,
 	struct dio_args *args, unsigned blkbits, get_block_t get_block,
 	dio_iodone_t end_io, struct dio *dio)
 {
-	const struct iovec *iov = args->iov;
-	unsigned long user_addr;
 	unsigned long flags;
-	int seg, rw = args->rw;
+	int rw = args->rw;
 	ssize_t ret = 0;
 	ssize_t ret2;
-	size_t bytes;
 
 	dio->inode = inode;
 	dio->rw = rw;
@@ -965,46 +890,25 @@ direct_io_worker(struct kiocb *iocb, struct inode *inode,
 	if (unlikely(dio->blkfactor))
 		dio->pages_in_io = 2;
 
-	for (seg = 0; seg < args->nr_segs; seg++) {
-		user_addr = (unsigned long) iov[seg].iov_base;
-		dio->pages_in_io +=
-			((user_addr+iov[seg].iov_len +PAGE_SIZE-1)/PAGE_SIZE
-				- user_addr/PAGE_SIZE);
-	}
+	dio->pages_in_io += args->nr_segs;
+	dio->size = args->length;
+	if (args->user_addr) {
+		dio->first_page_off = args->user_addr & ~PAGE_MASK;
+		dio->first_block_in_page = dio->first_page_off >> blkbits;
+		if (dio->first_block_in_page)
+			dio->first_page_off -= 1 << blkbits;
+	} else
+		dio->first_page_off = args->first_page_off;
 
-	for (seg = 0; seg < args->nr_segs; seg++) {
-		user_addr = (unsigned long)iov[seg].iov_base;
-		dio->size += bytes = iov[seg].iov_len;
-
-		/* Index into the first page of the first block */
-		dio->first_block_in_page = (user_addr & ~PAGE_MASK) >> blkbits;
-		dio->final_block_in_request = dio->block_in_file +
-						(bytes >> blkbits);
-		/* Page fetching state */
-		dio->head = 0;
-		dio->tail = 0;
-		dio->curr_page = 0;
-
-		dio->total_pages = 0;
-		if (user_addr & (PAGE_SIZE-1)) {
-			dio->total_pages++;
-			bytes -= PAGE_SIZE - (user_addr & (PAGE_SIZE - 1));
-		}
-		dio->total_pages += (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
-		dio->curr_user_address = user_addr;
-	
-		ret = do_direct_IO(dio);
+	dio->final_block_in_request = dio->block_in_file + (dio->size >> blkbits);
+	dio->head_page = 0;
+	dio->total_pages = args->nr_segs;
 
-		dio->result += iov[seg].iov_len -
+	ret = do_direct_IO(dio);
+
+	dio->result += args->length -
 			((dio->final_block_in_request - dio->block_in_file) <<
 					blkbits);
-
-		if (ret) {
-			dio_cleanup(dio);
-			break;
-		}
-	} /* end iovec loop */
-
 	if (ret == -ENOTBLK && (rw & WRITE)) {
 		/*
 		 * The remaining part of the request will be
@@ -1110,9 +1014,6 @@ __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 	struct block_device *bdev, struct dio_args *args, get_block_t get_block,
 	dio_iodone_t end_io, int dio_lock_type)
 {
-	int seg;
-	size_t size;
-	unsigned long addr;
 	unsigned blkbits = inode->i_blkbits;
 	unsigned bdev_blkbits = 0;
 	unsigned blocksize_mask = (1 << blkbits) - 1;
@@ -1138,17 +1039,14 @@ __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 	}
 
 	/* Check the memory alignment.  Blocks cannot straddle pages */
-	for (seg = 0; seg < args->nr_segs; seg++) {
-		addr = (unsigned long) args->iov[seg].iov_base;
-		size = args->iov[seg].iov_len;
-		end += size;
-		if ((addr & blocksize_mask) || (size & blocksize_mask))  {
-			if (bdev)
-				 blkbits = bdev_blkbits;
-			blocksize_mask = (1 << blkbits) - 1;
-			if ((addr & blocksize_mask) || (size & blocksize_mask))  
-				goto out;
-		}
+	if ((args->user_addr & blocksize_mask) ||
+	    (args->length & blocksize_mask))  {
+		if (bdev)
+			 blkbits = bdev_blkbits;
+		blocksize_mask = (1 << blkbits) - 1;
+		if ((args->user_addr & blocksize_mask) ||
+		    (args->length & blocksize_mask))
+			goto out;
 	}
 
 	dio = kzalloc(sizeof(*dio), GFP_KERNEL);
@@ -1156,6 +1054,8 @@ __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 	if (!dio)
 		goto out;
 
+	dio->pages = args->pages;
+
 	/*
 	 * For block device access DIO_NO_LOCKING is used,
 	 *	neither readers nor writers do any locking at all
@@ -1232,20 +1132,70 @@ out:
 }
 EXPORT_SYMBOL(__blockdev_direct_IO);
 
-ssize_t generic_file_direct_IO(int rw, struct address_space *mapping,
-			       struct kiocb *iocb, const struct iovec *iov,
-			       loff_t offset, unsigned long nr_segs)
+static ssize_t __generic_file_direct_IO(int rw, struct address_space *mapping,
+					struct kiocb *iocb,
+					const struct iovec *iov, loff_t offset,
+					dio_io_actor *actor)
 {
+	struct page *stack_pages[UIO_FASTIOV];
+	unsigned long nr_pages, start, end;
 	struct dio_args args = {
-		.rw		= rw,
-		.iov		= iov,
-		.length		= iov_length(iov, nr_segs),
+		.pages		= stack_pages,
+		.length		= iov->iov_len,
+		.user_addr	= (unsigned long) iov->iov_base,
 		.offset		= offset,
-		.nr_segs	= nr_segs,
 	};
+	ssize_t ret;
+
+	end = (args.user_addr + iov->iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
+	start = args.user_addr >> PAGE_SHIFT;
+	nr_pages = end - start;
+
+	if (nr_pages >= UIO_FASTIOV) {
+		args.pages = kzalloc(nr_pages * sizeof(struct page *),
+					GFP_KERNEL);
+		if (!args.pages)
+			return -ENOMEM;
+	}
+
+	ret = get_user_pages_fast(args.user_addr, nr_pages, rw == READ,
+					args.pages);
+	if (ret > 0) {
+		args.nr_segs = ret;
+		ret = actor(iocb, &args);
+	}
 
-	if (mapping->a_ops->direct_IO)
-		return mapping->a_ops->direct_IO(iocb, &args);
+	if (args.pages != stack_pages)
+		kfree(args.pages);
 
-	return -EINVAL;
+	return ret;
+}
+
+/*
+ * Transform the iov into a page based structure for passing into the lower
+ * parts of O_DIRECT handling
+ */
+ssize_t generic_file_direct_IO(int rw, struct address_space *mapping,
+			       struct kiocb *kiocb, const struct iovec *iov,
+			       loff_t offset, unsigned long nr_segs,
+			       dio_io_actor *actor)
+{
+	ssize_t ret = 0, ret2;
+	unsigned long i;
+
+	for (i = 0; i < nr_segs; i++) {
+		ret2 = __generic_file_direct_IO(rw, mapping, kiocb, iov, offset,
+							actor);
+		if (ret2 < 0) {
+			if (!ret)
+				ret = ret2;
+			break;
+		}
+		iov++;
+		offset += ret2;
+		ret += ret2;
+	}
+
+	return ret;
 }
+EXPORT_SYMBOL_GPL(generic_file_direct_IO);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 45d931b..d9da548 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -271,13 +271,12 @@ static const struct rpc_call_ops nfs_read_direct_ops = {
  * no requests have been sent, just return an error.
  */
 static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
-						const struct iovec *iov,
-						loff_t pos)
+						struct dio_args *args)
 {
 	struct nfs_open_context *ctx = dreq->ctx;
 	struct inode *inode = ctx->path.dentry->d_inode;
-	unsigned long user_addr = (unsigned long)iov->iov_base;
-	size_t count = iov->iov_len;
+	unsigned long user_addr = args->user_addr;
+	size_t count = args->length;
 	size_t rsize = NFS_SERVER(inode)->rsize;
 	struct rpc_task *task;
 	struct rpc_message msg = {
@@ -306,24 +305,8 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
 		if (unlikely(!data))
 			break;
 
-		down_read(&current->mm->mmap_sem);
-		result = get_user_pages(current, current->mm, user_addr,
-					data->npages, 1, 0, data->pagevec, NULL);
-		up_read(&current->mm->mmap_sem);
-		if (result < 0) {
-			nfs_readdata_free(data);
-			break;
-		}
-		if ((unsigned)result < data->npages) {
-			bytes = result * PAGE_SIZE;
-			if (bytes <= pgbase) {
-				nfs_direct_release_pages(data->pagevec, result);
-				nfs_readdata_free(data);
-				break;
-			}
-			bytes -= pgbase;
-			data->npages = result;
-		}
+		data->pagevec = args->pages;
+		data->npages = args->nr_segs;
 
 		get_dreq(dreq);
 
@@ -332,7 +315,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
 		data->cred = msg.rpc_cred;
 		data->args.fh = NFS_FH(inode);
 		data->args.context = ctx;
-		data->args.offset = pos;
+		data->args.offset = args->offset;
 		data->args.pgbase = pgbase;
 		data->args.pages = data->pagevec;
 		data->args.count = bytes;
@@ -361,7 +344,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
 
 		started += bytes;
 		user_addr += bytes;
-		pos += bytes;
+		args->offset += bytes;
 		/* FIXME: Remove this unnecessary math from final patch */
 		pgbase += bytes;
 		pgbase &= ~PAGE_MASK;
@@ -376,26 +359,19 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
 }
 
 static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
-					      const struct iovec *iov,
-					      unsigned long nr_segs,
-					      loff_t pos)
+					      struct dio_args *args)
 {
 	ssize_t result = -EINVAL;
 	size_t requested_bytes = 0;
-	unsigned long seg;
 
 	get_dreq(dreq);
 
-	for (seg = 0; seg < nr_segs; seg++) {
-		const struct iovec *vec = &iov[seg];
-		result = nfs_direct_read_schedule_segment(dreq, vec, pos);
-		if (result < 0)
-			break;
-		requested_bytes += result;
-		if ((size_t)result < vec->iov_len)
-			break;
-		pos += vec->iov_len;
-	}
+	result = nfs_direct_read_schedule_segment(dreq, args);
+	if (result < 0)
+		goto out;
+
+	requested_bytes += result;
+	args += result;
 
 	if (put_dreq(dreq))
 		nfs_direct_complete(dreq);
@@ -403,13 +379,13 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
 	if (requested_bytes != 0)
 		return 0;
 
+out:
 	if (result < 0)
 		return result;
 	return -EIO;
 }
 
-static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
-			       unsigned long nr_segs, loff_t pos)
+static ssize_t nfs_direct_read(struct kiocb *iocb, struct dio_args *args)
 {
 	ssize_t result = 0;
 	struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -424,7 +400,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
 	if (!is_sync_kiocb(iocb))
 		dreq->iocb = iocb;
 
-	result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
+	result = nfs_direct_read_schedule_iovec(dreq, args);
 	if (!result)
 		result = nfs_direct_wait(dreq);
 	nfs_direct_req_release(dreq);
@@ -691,13 +667,13 @@ static const struct rpc_call_ops nfs_write_direct_ops = {
  * no requests have been sent, just return an error.
  */
 static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
-						 const struct iovec *iov,
-						 loff_t pos, int sync)
+						 struct dio_args *args,
+						 int sync)
 {
 	struct nfs_open_context *ctx = dreq->ctx;
 	struct inode *inode = ctx->path.dentry->d_inode;
-	unsigned long user_addr = (unsigned long)iov->iov_base;
-	size_t count = iov->iov_len;
+	unsigned long user_addr = args->user_addr;
+	size_t count = args->length;
 	struct rpc_task *task;
 	struct rpc_message msg = {
 		.rpc_cred = ctx->cred,
@@ -726,24 +702,8 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
 		if (unlikely(!data))
 			break;
 
-		down_read(&current->mm->mmap_sem);
-		result = get_user_pages(current, current->mm, user_addr,
-					data->npages, 0, 0, data->pagevec, NULL);
-		up_read(&current->mm->mmap_sem);
-		if (result < 0) {
-			nfs_writedata_free(data);
-			break;
-		}
-		if ((unsigned)result < data->npages) {
-			bytes = result * PAGE_SIZE;
-			if (bytes <= pgbase) {
-				nfs_direct_release_pages(data->pagevec, result);
-				nfs_writedata_free(data);
-				break;
-			}
-			bytes -= pgbase;
-			data->npages = result;
-		}
+		data->pagevec = args->pages;
+		data->npages = args->nr_segs;
 
 		get_dreq(dreq);
 
@@ -754,7 +714,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
 		data->cred = msg.rpc_cred;
 		data->args.fh = NFS_FH(inode);
 		data->args.context = ctx;
-		data->args.offset = pos;
+		data->args.offset = args->offset;
 		data->args.pgbase = pgbase;
 		data->args.pages = data->pagevec;
 		data->args.count = bytes;
@@ -784,7 +744,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
 
 		started += bytes;
 		user_addr += bytes;
-		pos += bytes;
+		args->offset += bytes;
 
 		/* FIXME: Remove this useless math from the final patch */
 		pgbase += bytes;
@@ -800,27 +760,19 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
 }
 
 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
-					       const struct iovec *iov,
-					       unsigned long nr_segs,
-					       loff_t pos, int sync)
+					       struct dio_args *args, int sync)
 {
 	ssize_t result = 0;
 	size_t requested_bytes = 0;
-	unsigned long seg;
 
 	get_dreq(dreq);
 
-	for (seg = 0; seg < nr_segs; seg++) {
-		const struct iovec *vec = &iov[seg];
-		result = nfs_direct_write_schedule_segment(dreq, vec,
-							   pos, sync);
-		if (result < 0)
-			break;
-		requested_bytes += result;
-		if ((size_t)result < vec->iov_len)
-			break;
-		pos += vec->iov_len;
-	}
+	result = nfs_direct_write_schedule_segment(dreq, args, sync);
+	if (result < 0)
+		goto out;
+
+	requested_bytes += result;
+	args->offset += result;
 
 	if (put_dreq(dreq))
 		nfs_direct_write_complete(dreq, dreq->inode);
@@ -828,14 +780,13 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
 	if (requested_bytes != 0)
 		return 0;
 
+out:
 	if (result < 0)
 		return result;
 	return -EIO;
 }
 
-static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
-				unsigned long nr_segs, loff_t pos,
-				size_t count)
+static ssize_t nfs_direct_write(struct kiocb *iocb, struct dio_args *args)
 {
 	ssize_t result = 0;
 	struct inode *inode = iocb->ki_filp->f_mapping->host;
@@ -848,7 +799,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
 		return -ENOMEM;
 	nfs_alloc_commit_data(dreq);
 
-	if (dreq->commit_data == NULL || count < wsize)
+	if (dreq->commit_data == NULL || args->length < wsize)
 		sync = NFS_FILE_SYNC;
 
 	dreq->inode = inode;
@@ -856,7 +807,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
 	if (!is_sync_kiocb(iocb))
 		dreq->iocb = iocb;
 
-	result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
+	result = nfs_direct_write_schedule_iovec(dreq, args, sync);
 	if (!result)
 		result = nfs_direct_wait(dreq);
 	nfs_direct_req_release(dreq);
@@ -867,9 +818,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
 /**
  * nfs_file_direct_read - file direct read operation for NFS files
  * @iocb: target I/O control block
- * @iov: vector of user buffers into which to read data
- * @nr_segs: size of iov vector
- * @pos: byte offset in file where reading starts
+ * @args: direct IO arguments
  *
  * We use this function for direct reads instead of calling
  * generic_file_aio_read() in order to avoid gfar's check to see if
@@ -885,21 +834,20 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
  * client must read the updated atime from the server back into its
  * cache.
  */
-ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
-				unsigned long nr_segs, loff_t pos)
+static ssize_t nfs_file_direct_read(struct kiocb *iocb, struct dio_args *args)
 {
 	ssize_t retval = -EINVAL;
 	struct file *file = iocb->ki_filp;
 	struct address_space *mapping = file->f_mapping;
 	size_t count;
 
-	count = iov_length(iov, nr_segs);
+	count = args->length;
 	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
 
 	dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
 		file->f_path.dentry->d_parent->d_name.name,
 		file->f_path.dentry->d_name.name,
-		count, (long long) pos);
+		count, (long long) args->offset);
 
 	retval = 0;
 	if (!count)
@@ -909,9 +857,9 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
 	if (retval)
 		goto out;
 
-	retval = nfs_direct_read(iocb, iov, nr_segs, pos);
+	retval = nfs_direct_read(iocb, args);
 	if (retval > 0)
-		iocb->ki_pos = pos + retval;
+		iocb->ki_pos = args->offset + retval;
 
 out:
 	return retval;
@@ -920,9 +868,7 @@ out:
 /**
  * nfs_file_direct_write - file direct write operation for NFS files
  * @iocb: target I/O control block
- * @iov: vector of user buffers from which to write data
- * @nr_segs: size of iov vector
- * @pos: byte offset in file where writing starts
+ * @args: direct IO arguments
  *
  * We use this function for direct writes instead of calling
  * generic_file_aio_write() in order to avoid taking the inode
@@ -942,23 +888,22 @@ out:
  * Note that O_APPEND is not supported for NFS direct writes, as there
  * is no atomic O_APPEND write facility in the NFS protocol.
  */
-ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
-				unsigned long nr_segs, loff_t pos)
+static ssize_t nfs_file_direct_write(struct kiocb *iocb, struct dio_args *args)
 {
 	ssize_t retval = -EINVAL;
 	struct file *file = iocb->ki_filp;
 	struct address_space *mapping = file->f_mapping;
 	size_t count;
 
-	count = iov_length(iov, nr_segs);
+	count = args->length;
 	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
 
 	dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
 		file->f_path.dentry->d_parent->d_name.name,
 		file->f_path.dentry->d_name.name,
-		count, (long long) pos);
+		count, (long long) args->offset);
 
-	retval = generic_write_checks(file, &pos, &count, 0);
+	retval = generic_write_checks(file, &args->offset, &count, 0);
 	if (retval)
 		goto out;
 
@@ -973,15 +918,23 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
 	if (retval)
 		goto out;
 
-	retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
+	retval = nfs_direct_write(iocb, args);
 
 	if (retval > 0)
-		iocb->ki_pos = pos + retval;
+		iocb->ki_pos = args->offset + retval;
 
 out:
 	return retval;
 }
 
+ssize_t nfs_file_direct_io(struct kiocb *kiocb, struct dio_args *args)
+{
+	if (args->rw == READ)
+		return nfs_file_direct_read(kiocb, args);
+
+	return nfs_file_direct_write(kiocb, args);
+}
+
 /**
  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
  *
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 0506232..97d8cc7 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -249,13 +249,15 @@ static ssize_t
 nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
 		unsigned long nr_segs, loff_t pos)
 {
+	struct address_space *mapping = iocb->ki_filp->f_mapping;
 	struct dentry * dentry = iocb->ki_filp->f_path.dentry;
 	struct inode * inode = dentry->d_inode;
 	ssize_t result;
 	size_t count = iov_length(iov, nr_segs);
 
 	if (iocb->ki_filp->f_flags & O_DIRECT)
-		return nfs_file_direct_read(iocb, iov, nr_segs, pos);
+		return generic_file_direct_IO(READ, mapping, iocb, iov, pos,
+						nr_segs, nfs_file_direct_io);
 
 	dprintk("NFS: read(%s/%s, %lu@%lu)\n",
 		dentry->d_parent->d_name.name, dentry->d_name.name,
@@ -546,13 +548,15 @@ static int nfs_need_sync_write(struct file *filp, struct inode *inode)
 static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
 				unsigned long nr_segs, loff_t pos)
 {
+	struct address_space *mapping = iocb->ki_filp->f_mapping;
 	struct dentry * dentry = iocb->ki_filp->f_path.dentry;
 	struct inode * inode = dentry->d_inode;
 	ssize_t result;
 	size_t count = iov_length(iov, nr_segs);
 
 	if (iocb->ki_filp->f_flags & O_DIRECT)
-		return nfs_file_direct_write(iocb, iov, nr_segs, pos);
+		return generic_file_direct_IO(WRITE, mapping, iocb, iov, pos,
+						nr_segs, nfs_file_direct_io);
 
 	dprintk("NFS: write(%s/%s, %lu@%Ld)\n",
 		dentry->d_parent->d_name.name, dentry->d_name.name,
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5971116..539994a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -2247,18 +2247,27 @@ static inline int xip_truncate_page(struct address_space *mapping, loff_t from)
  */
 struct dio_args {
 	int rw;
-	const struct iovec *iov;
+	struct page **pages;
+	unsigned int first_page_off;
+	unsigned long nr_segs;
 	unsigned long length;
 	loff_t offset;
-	unsigned long nr_segs;
+
+	/*
+	 * Original user pointer, we'll get rid of this
+	 */
+	unsigned long user_addr;
 };
 
 ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 	struct block_device *bdev, struct dio_args *args, get_block_t get_block,
 	dio_iodone_t end_io, int lock_type);
 
+typedef ssize_t (dio_io_actor)(struct kiocb *, struct dio_args *);
+
 ssize_t generic_file_direct_IO(int, struct address_space *, struct kiocb *,
-				const struct iovec *, loff_t, unsigned long);
+				const struct iovec *, loff_t, unsigned long,
+				dio_io_actor);
 
 enum {
 	DIO_LOCKING = 1, /* need locking between buffered and direct access */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 97a2383..ded8337 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -409,12 +409,7 @@ extern int nfs3_removexattr (struct dentry *, const char *name);
  * linux/fs/nfs/direct.c
  */
 extern ssize_t nfs_direct_IO(struct kiocb *, struct dio_args *);
-extern ssize_t nfs_file_direct_read(struct kiocb *iocb,
-			const struct iovec *iov, unsigned long nr_segs,
-			loff_t pos);
-extern ssize_t nfs_file_direct_write(struct kiocb *iocb,
-			const struct iovec *iov, unsigned long nr_segs,
-			loff_t pos);
+extern ssize_t nfs_file_direct_io(struct kiocb *, struct dio_args *);
 
 /*
  * linux/fs/nfs/dir.c
diff --git a/mm/filemap.c b/mm/filemap.c
index cf85298..3e03021 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1346,8 +1346,8 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
 					pos + iov_length(iov, nr_segs) - 1);
 			if (!retval) {
 				retval = generic_file_direct_IO(READ, mapping,
-								iocb, iov,
-								pos, nr_segs);
+						iocb, iov, pos, nr_segs,
+						mapping->a_ops->direct_IO);
 			}
 			if (retval > 0)
 				*ppos = pos + retval;
@@ -2146,7 +2146,7 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
 	}
 
 	written = generic_file_direct_IO(WRITE, mapping, iocb, iov, pos,
-						*nr_segs);
+					*nr_segs, mapping->a_ops->direct_IO);
 
 	/*
 	 * Finally, try again to invalidate clean pages which might have been
-- 
1.6.4.53.g3f55e


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 3/4] direct-io: add a "IO for kernel" flag to kiocb
  2009-08-18  8:34 [PATCH 0/4] Page based O_DIRECT v2 Jens Axboe
  2009-08-18  8:34 ` [PATCH 1/4] direct-io: unify argument passing by adding a dio_args structure Jens Axboe
  2009-08-18  8:34 ` [PATCH 2/4] direct-io: make O_DIRECT IO path be page based Jens Axboe
@ 2009-08-18  8:35 ` Jens Axboe
  2009-08-18  8:35 ` [PATCH 4/4] direct-io: get rid of irq flag saving where it isn't needed Jens Axboe
                   ` (2 subsequent siblings)
  5 siblings, 0 replies; 13+ messages in thread
From: Jens Axboe @ 2009-08-18  8:35 UTC (permalink / raw)
  To: linux-kernel; +Cc: zach.brown, hch, Jens Axboe

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
 fs/direct-io.c      |   12 +++++++++++-
 include/linux/aio.h |    3 +++
 2 files changed, 14 insertions(+), 1 deletions(-)

diff --git a/fs/direct-io.c b/fs/direct-io.c
index 22a945b..0e923f2 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -117,6 +117,7 @@ struct dio {
 	struct kiocb *iocb;		/* kiocb */
 	int is_async;			/* is IO async ? */
 	int io_error;			/* IO error in completion path */
+	int is_kernel;
 	ssize_t result;                 /* IO result */
 };
 
@@ -336,7 +337,7 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
 
 	if (dio->is_async && dio->rw == READ) {
 		bio_check_pages_dirty(bio);	/* transfers ownership */
-	} else {
+	} else if (!dio->is_kernel) {
 		for (page_no = 0; page_no < bio->bi_vcnt; page_no++) {
 			struct page *page = bvec[page_no].bv_page;
 
@@ -345,6 +346,14 @@ static int dio_bio_complete(struct dio *dio, struct bio *bio)
 			page_cache_release(page);
 		}
 		bio_put(bio);
+	} else {
+		for (page_no = 0; page_no < bio->bi_vcnt; page_no++) {
+			struct page *page = bvec[page_no].bv_page;
+
+			if (!dio->io_error)
+				SetPageUptodate(page);
+		}
+		bio_put(bio);
 	}
 	return uptodate ? 0 : -EIO;
 }
@@ -1103,6 +1112,7 @@ __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
 	 */
 	dio->is_async = !is_sync_kiocb(iocb) && !((rw & WRITE) &&
 		(end > i_size_read(inode)));
+	dio->is_kernel = kiocbIsKernel(iocb);
 
 	retval = direct_io_worker(iocb, inode, args, blkbits, get_block, end_io,
 					dio);
diff --git a/include/linux/aio.h b/include/linux/aio.h
index 47f7d93..ce3f34b 100644
--- a/include/linux/aio.h
+++ b/include/linux/aio.h
@@ -34,6 +34,7 @@ struct kioctx;
 /* #define KIF_LOCKED		0 */
 #define KIF_KICKED		1
 #define KIF_CANCELLED		2
+#define KIF_KERNEL_PAGES	3
 
 #define kiocbTryLock(iocb)	test_and_set_bit(KIF_LOCKED, &(iocb)->ki_flags)
 #define kiocbTryKick(iocb)	test_and_set_bit(KIF_KICKED, &(iocb)->ki_flags)
@@ -41,6 +42,7 @@ struct kioctx;
 #define kiocbSetLocked(iocb)	set_bit(KIF_LOCKED, &(iocb)->ki_flags)
 #define kiocbSetKicked(iocb)	set_bit(KIF_KICKED, &(iocb)->ki_flags)
 #define kiocbSetCancelled(iocb)	set_bit(KIF_CANCELLED, &(iocb)->ki_flags)
+#define kiocbSetKernel(iocb)	set_bit(KIF_KERNEL_PAGES, &(iocb)->ki_flags)
 
 #define kiocbClearLocked(iocb)	clear_bit(KIF_LOCKED, &(iocb)->ki_flags)
 #define kiocbClearKicked(iocb)	clear_bit(KIF_KICKED, &(iocb)->ki_flags)
@@ -49,6 +51,7 @@ struct kioctx;
 #define kiocbIsLocked(iocb)	test_bit(KIF_LOCKED, &(iocb)->ki_flags)
 #define kiocbIsKicked(iocb)	test_bit(KIF_KICKED, &(iocb)->ki_flags)
 #define kiocbIsCancelled(iocb)	test_bit(KIF_CANCELLED, &(iocb)->ki_flags)
+#define kiocbIsKernel(iocb)	test_bit(KIF_KERNEL_PAGES, &(iocb)->ki_flags)
 
 /* is there a better place to document function pointer methods? */
 /**
-- 
1.6.4.53.g3f55e


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* [PATCH 4/4] direct-io: get rid of irq flag saving where it isn't needed
  2009-08-18  8:34 [PATCH 0/4] Page based O_DIRECT v2 Jens Axboe
                   ` (2 preceding siblings ...)
  2009-08-18  8:35 ` [PATCH 3/4] direct-io: add a "IO for kernel" flag to kiocb Jens Axboe
@ 2009-08-18  8:35 ` Jens Axboe
  2009-08-19 12:44 ` [PATCH 0/4] Page based O_DIRECT v2 Boaz Harrosh
  2009-08-19 19:05 ` Alan D. Brunelle
  5 siblings, 0 replies; 13+ messages in thread
From: Jens Axboe @ 2009-08-18  8:35 UTC (permalink / raw)
  To: linux-kernel; +Cc: zach.brown, hch, Jens Axboe

We use the flags saving variant of the spin lock functions everywhere
in fs/direct-io.c, even in places where we otherwise block. Get rid
of that except for the end_io path, which may indeed be called with
irqs disabled.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
 fs/direct-io.c |   24 ++++++++++--------------
 1 files changed, 10 insertions(+), 14 deletions(-)

diff --git a/fs/direct-io.c b/fs/direct-io.c
index 0e923f2..2f73593 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -259,13 +259,12 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev,
 static void dio_bio_submit(struct dio *dio)
 {
 	struct bio *bio = dio->bio;
-	unsigned long flags;
 
 	bio->bi_private = dio;
 
-	spin_lock_irqsave(&dio->bio_lock, flags);
+	spin_lock_irq(&dio->bio_lock);
 	dio->refcount++;
-	spin_unlock_irqrestore(&dio->bio_lock, flags);
+	spin_unlock_irq(&dio->bio_lock);
 
 	if (dio->is_async && dio->rw == READ)
 		bio_set_pages_dirty(bio);
@@ -295,10 +294,9 @@ static void dio_cleanup(struct dio *dio)
  */
 static struct bio *dio_await_one(struct dio *dio)
 {
-	unsigned long flags;
 	struct bio *bio = NULL;
 
-	spin_lock_irqsave(&dio->bio_lock, flags);
+	spin_lock_irq(&dio->bio_lock);
 
 	/*
 	 * Wait as long as the list is empty and there are bios in flight.  bio
@@ -309,17 +307,17 @@ static struct bio *dio_await_one(struct dio *dio)
 	while (dio->refcount > 1 && dio->bio_list == NULL) {
 		__set_current_state(TASK_UNINTERRUPTIBLE);
 		dio->waiter = current;
-		spin_unlock_irqrestore(&dio->bio_lock, flags);
+		spin_unlock_irq(&dio->bio_lock);
 		io_schedule();
 		/* wake up sets us TASK_RUNNING */
-		spin_lock_irqsave(&dio->bio_lock, flags);
+		spin_lock_irq(&dio->bio_lock);
 		dio->waiter = NULL;
 	}
 	if (dio->bio_list) {
 		bio = dio->bio_list;
 		dio->bio_list = bio->bi_private;
 	}
-	spin_unlock_irqrestore(&dio->bio_lock, flags);
+	spin_unlock_irq(&dio->bio_lock);
 	return bio;
 }
 
@@ -388,14 +386,13 @@ static int dio_bio_reap(struct dio *dio)
 
 	if (dio->reap_counter++ >= 64) {
 		while (dio->bio_list) {
-			unsigned long flags;
 			struct bio *bio;
 			int ret2;
 
-			spin_lock_irqsave(&dio->bio_lock, flags);
+			spin_lock_irq(&dio->bio_lock);
 			bio = dio->bio_list;
 			dio->bio_list = bio->bi_private;
-			spin_unlock_irqrestore(&dio->bio_lock, flags);
+			spin_unlock_irq(&dio->bio_lock);
 			ret2 = dio_bio_complete(dio, bio);
 			if (ret == 0)
 				ret = ret2;
@@ -870,7 +867,6 @@ direct_io_worker(struct kiocb *iocb, struct inode *inode,
 	struct dio_args *args, unsigned blkbits, get_block_t get_block,
 	dio_iodone_t end_io, struct dio *dio)
 {
-	unsigned long flags;
 	int rw = args->rw;
 	ssize_t ret = 0;
 	ssize_t ret2;
@@ -984,9 +980,9 @@ direct_io_worker(struct kiocb *iocb, struct inode *inode,
 	 * completion paths can drop their ref and use the remaining count to
 	 * decide to wake the submission path atomically.
 	 */
-	spin_lock_irqsave(&dio->bio_lock, flags);
+	spin_lock_irq(&dio->bio_lock);
 	ret2 = --dio->refcount;
-	spin_unlock_irqrestore(&dio->bio_lock, flags);
+	spin_unlock_irq(&dio->bio_lock);
 
 	if (ret2 == 0) {
 		ret = dio_complete(dio, args->offset, ret);
-- 
1.6.4.53.g3f55e


^ permalink raw reply related	[flat|nested] 13+ messages in thread

* Re: [PATCH 0/4] Page based O_DIRECT v2
  2009-08-18  8:34 [PATCH 0/4] Page based O_DIRECT v2 Jens Axboe
                   ` (3 preceding siblings ...)
  2009-08-18  8:35 ` [PATCH 4/4] direct-io: get rid of irq flag saving where it isn't needed Jens Axboe
@ 2009-08-19 12:44 ` Boaz Harrosh
  2009-08-19 13:01   ` Jens Axboe
  2009-08-19 19:05 ` Alan D. Brunelle
  5 siblings, 1 reply; 13+ messages in thread
From: Boaz Harrosh @ 2009-08-19 12:44 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-kernel, zach.brown, hch

On 08/18/2009 11:34 AM, Jens Axboe wrote:
> Hi,
> 
> Updated patchset for page based O_DIRECT. I didn't include the
> loop bits this time, lets focus on getting these core bits into
> shape and then loop is easily patchable on top of this.
> 
> Changes since last post:
> 
> - Changed do_dio() to generic_file_direct_IO() as per Christophs
>   suggestion.
> - Split the first patch into two parts. One simply adds dio_args
>   and maintains the current code, the next has the functional change
>   but without changing file systems (except NFS).
> - Add ->rw to dio_args (Christoph).
> - A locking fixup. Not really related, but should be fixed up anyways.
> 
> There are at least two pending things to work on:
> 
> 1) NFS is still broken, I get a crash in freeing some data that
>    is not related to the pages. Will debug this.
> 2) As Christoph suggested, we need some way to wait for a dio
>    when all segments are submitted. Currently it waits for each
>    segment. Not sure how best to solve this issue, will think a
>    bit more about this. Basically we need to pass down the wait
>    list to the generic_file_direct_IO() and have that do the
>    queue kick and wait.
> 

Jens hi.

I please have some basic question on the subject?

[1]
So before, the complete iovec from user mode could potentially be
submitted in a single request, depending on the implementor.
With new code, each iovec entry is broken to it's few pages and
is submitted as a separate request. This might not be bad for
block based devices that could see these segments merged back by the
IO elevator. But what about the other implementers that see a
grate performance boost in the current scatter-gather nature of the
iovec API. It's almost as if the application was calling the kernel
for each segment separately.

I wish you would use a more generic page carrier then page-* array.
and submit the complete iovec at once.

We used to use scatter-lists but these are best only used inside DMA
engines and Drivers as they are more then 2 times too big. The ideal for
me is the bio_vec array as used inside a bio. scatter-list has all these
helpers, iterators, and wrappers, which bio_vec do not, so I don't know
what the best choice is.

But your current solution, (from inspection only I have not tested any of
this), might mean a grate performance degradation for some work scenarios.
For example a user-mode app the gathers lots of small memory sources and
hopes to write it as a single very large on-the-wire-NFS-write , might find
itself writing lots of small on-the-wire-NFS-writes.

[2]
Please address linux-fsdevel on these patches. lkml is so crowded and after
all these files do sit in fs/

Thanks
Boaz

^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 0/4] Page based O_DIRECT v2
  2009-08-19 12:44 ` [PATCH 0/4] Page based O_DIRECT v2 Boaz Harrosh
@ 2009-08-19 13:01   ` Jens Axboe
  0 siblings, 0 replies; 13+ messages in thread
From: Jens Axboe @ 2009-08-19 13:01 UTC (permalink / raw)
  To: Boaz Harrosh; +Cc: linux-kernel, zach.brown, hch

On Wed, Aug 19 2009, Boaz Harrosh wrote:
> On 08/18/2009 11:34 AM, Jens Axboe wrote:
> > Hi,
> > 
> > Updated patchset for page based O_DIRECT. I didn't include the
> > loop bits this time, lets focus on getting these core bits into
> > shape and then loop is easily patchable on top of this.
> > 
> > Changes since last post:
> > 
> > - Changed do_dio() to generic_file_direct_IO() as per Christophs
> >   suggestion.
> > - Split the first patch into two parts. One simply adds dio_args
> >   and maintains the current code, the next has the functional change
> >   but without changing file systems (except NFS).
> > - Add ->rw to dio_args (Christoph).
> > - A locking fixup. Not really related, but should be fixed up anyways.
> > 
> > There are at least two pending things to work on:
> > 
> > 1) NFS is still broken, I get a crash in freeing some data that
> >    is not related to the pages. Will debug this.
> > 2) As Christoph suggested, we need some way to wait for a dio
> >    when all segments are submitted. Currently it waits for each
> >    segment. Not sure how best to solve this issue, will think a
> >    bit more about this. Basically we need to pass down the wait
> >    list to the generic_file_direct_IO() and have that do the
> >    queue kick and wait.
> > 
> 
> Jens hi.
> 
> I please have some basic question on the subject?
> 
> [1]
> So before, the complete iovec from user mode could potentially be
> submitted in a single request, depending on the implementor.
> With new code, each iovec entry is broken to it's few pages and
> is submitted as a separate request. This might not be bad for
> block based devices that could see these segments merged back by the
> IO elevator. But what about the other implementers that see a
> grate performance boost in the current scatter-gather nature of the
> iovec API. It's almost as if the application was calling the kernel
> for each segment separately.
> 
> I wish you would use a more generic page carrier then page-* array.
> and submit the complete iovec at once.
> 
> We used to use scatter-lists but these are best only used inside DMA
> engines and Drivers as they are more then 2 times too big. The ideal for
> me is the bio_vec array as used inside a bio. scatter-list has all these
> helpers, iterators, and wrappers, which bio_vec do not, so I don't know
> what the best choice is.
> 
> But your current solution, (from inspection only I have not tested any of
> this), might mean a grate performance degradation for some work scenarios.
> For example a user-mode app the gathers lots of small memory sources and
> hopes to write it as a single very large on-the-wire-NFS-write , might find
> itself writing lots of small on-the-wire-NFS-writes.

I fully agree, see also the discussion with Christoph. One way would
indeed be to pass in an array of page map + offset, another would be to
pass something back to enable kicking + waiting on the IO. Haven't
looked in either direction yet, but I hope to do so Very Soon.

> [2]
> Please address linux-fsdevel on these patches. lkml is so crowded and after
> all these files do sit in fs/

Sure, will CC linux-fsdevel next time too.

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 0/4] Page based O_DIRECT v2
  2009-08-18  8:34 [PATCH 0/4] Page based O_DIRECT v2 Jens Axboe
                   ` (4 preceding siblings ...)
  2009-08-19 12:44 ` [PATCH 0/4] Page based O_DIRECT v2 Boaz Harrosh
@ 2009-08-19 19:05 ` Alan D. Brunelle
  2009-08-19 22:06   ` Jens Axboe
  5 siblings, 1 reply; 13+ messages in thread
From: Alan D. Brunelle @ 2009-08-19 19:05 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-kernel, zach.brown, hch

Hi Jens - 

I'm not using loop, but it appears that there may be a regression in
regular asynchronous direct I/O sequential write performance when these
patches are applied. Using my "small" machine (16-way x86_64, 256GB, two
dual-port 4GB FC HBAs connected through switches to 4 HP MSA1000s - one
MSA per port), I'm seeing a small but noticeable drop in performance for
sequential writes on the order of 2 to 6%. Random asynchronous direct
I/O and sequential reads appear to unaffected.

http://free.linux.hp.com/~adb/2009-08-19/nc.png

has a set of graphs showing the data obtained when utilizing LUNs
exported by the MSAs (increasing the number of MSAs being used along the
X-axis). The critical sequential write graph has numbers like (numbers
expressed in GB/second):

Kernel                    1MSA  2MSAs 3MSAs 4MSAs
------------------------  ----- ----- ----- -----
2.6.31-rc6              :  0.17  0.33  0.50  0.65 
2.6.31-rc6 + loop-direct:  0.15  0.31  0.46  0.61

Using all 4 devices we're seeing a drop of slightly over 6%. 

I also typically do runs utilizing just the caches on the MSAs (getting
rid of physical disk interactions (seeks &c).). Even here we see a small
drop off in sequential write performance (on the order of about 2.5%
when using all 4 MSAs)- but noticeable gains for both random reads and
(especially) random writes. That graph can be seen at:

http://free.linux.hp.com/~adb/2009-08-19/ca.png

BTW: The grace/xmgrace files that generated these can be found at - 

http://free.linux.hp.com/~adb/2009-08-19/nc.agr
http://free.linux.hp.com/~adb/2009-08-19/ca.agr

- as the specifics can be seen better whilst running xmgrace on those
files.

The 2.6.31-rc6 kernel was built using your block git trees master
branch, and the other one has your loop-direct branch at:

commit 806dec7809e1b383a3a1fc328b9d3dae1f633663
Author: Jens Axboe <jens.axboe@oracle.com>
Date:   Tue Aug 18 10:01:34 2009 +0200

At the same time I'm doing this, I'm doing some other testing on my
large machine - but the test program has hung (using the loop-direct
branch kernel). I'm tracking that down...

Alan D. Brunelle
Hewlett-Packard


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 0/4] Page based O_DIRECT v2
  2009-08-19 19:05 ` Alan D. Brunelle
@ 2009-08-19 22:06   ` Jens Axboe
  2009-08-19 22:23     ` Alan D. Brunelle
  0 siblings, 1 reply; 13+ messages in thread
From: Jens Axboe @ 2009-08-19 22:06 UTC (permalink / raw)
  To: Alan D. Brunelle; +Cc: linux-kernel, zach.brown, hch

On Wed, Aug 19 2009, Alan D. Brunelle wrote:
> Hi Jens - 
> 
> I'm not using loop, but it appears that there may be a regression in
> regular asynchronous direct I/O sequential write performance when these
> patches are applied. Using my "small" machine (16-way x86_64, 256GB, two
> dual-port 4GB FC HBAs connected through switches to 4 HP MSA1000s - one
> MSA per port), I'm seeing a small but noticeable drop in performance for
> sequential writes on the order of 2 to 6%. Random asynchronous direct
> I/O and sequential reads appear to unaffected.
> 
> http://free.linux.hp.com/~adb/2009-08-19/nc.png
> 
> has a set of graphs showing the data obtained when utilizing LUNs
> exported by the MSAs (increasing the number of MSAs being used along the
> X-axis). The critical sequential write graph has numbers like (numbers
> expressed in GB/second):
> 
> Kernel                    1MSA  2MSAs 3MSAs 4MSAs
> ------------------------  ----- ----- ----- -----
> 2.6.31-rc6              :  0.17  0.33  0.50  0.65 
> 2.6.31-rc6 + loop-direct:  0.15  0.31  0.46  0.61
> 
> Using all 4 devices we're seeing a drop of slightly over 6%. 
> 
> I also typically do runs utilizing just the caches on the MSAs (getting
> rid of physical disk interactions (seeks &c).). Even here we see a small
> drop off in sequential write performance (on the order of about 2.5%
> when using all 4 MSAs)- but noticeable gains for both random reads and
> (especially) random writes. That graph can be seen at:
> 
> http://free.linux.hp.com/~adb/2009-08-19/ca.png
> 
> BTW: The grace/xmgrace files that generated these can be found at - 
> 
> http://free.linux.hp.com/~adb/2009-08-19/nc.agr
> http://free.linux.hp.com/~adb/2009-08-19/ca.agr
> 
> - as the specifics can be seen better whilst running xmgrace on those
> files.

Thanks a lot for the test run, Alan. I wonder why writes are down while
reads are up. One possibility could be a WRITE vs WRITE_ODIRECT
difference, though I think they should be the same. The patches I posted
have not been benchmarked at all, it's still very much a work in
progress. I just wanted to show the general direction that I thought
would be interesting. So I have done absolutely zero performance
testing, it's only been tested for whether it still worked or not (to
some degree :-)...

I'll poke a bit at it here, too. I want to finish the unplug/wait
problem first. Is your test case using read/write or readv/writev?

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 0/4] Page based O_DIRECT v2
  2009-08-19 22:06   ` Jens Axboe
@ 2009-08-19 22:23     ` Alan D. Brunelle
  2009-08-20 10:40       ` Jens Axboe
  0 siblings, 1 reply; 13+ messages in thread
From: Alan D. Brunelle @ 2009-08-19 22:23 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-kernel, zach.brown, hch

On Thu, 2009-08-20 at 00:06 +0200, Jens Axboe wrote:

> 
> Thanks a lot for the test run, Alan. I wonder why writes are down while
> reads are up. One possibility could be a WRITE vs WRITE_ODIRECT
> difference, though I think they should be the same. The patches I posted
> have not been benchmarked at all, it's still very much a work in
> progress. I just wanted to show the general direction that I thought
> would be interesting. So I have done absolutely zero performance
> testing, it's only been tested for whether it still worked or not (to
> some degree :-)...
> 
> I'll poke a bit at it here, too. I want to finish the unplug/wait
> problem first. Is your test case using read/write or readv/writev?
> 

Hi Jens - I just had some extra cycles, so figured what the heck... :-)

Actually, this is using asynchronous direct I/Os (libaio/Linux native
AIO). If I get a chance tomorrow, I'll play with read/write (and/or
readv/writev). 

Alan


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 0/4] Page based O_DIRECT v2
  2009-08-19 22:23     ` Alan D. Brunelle
@ 2009-08-20 10:40       ` Jens Axboe
  2009-08-20 23:12         ` Alan D. Brunelle
  0 siblings, 1 reply; 13+ messages in thread
From: Jens Axboe @ 2009-08-20 10:40 UTC (permalink / raw)
  To: Alan D. Brunelle; +Cc: linux-kernel, zach.brown, hch

On Wed, Aug 19 2009, Alan D. Brunelle wrote:
> On Thu, 2009-08-20 at 00:06 +0200, Jens Axboe wrote:
> 
> > 
> > Thanks a lot for the test run, Alan. I wonder why writes are down while
> > reads are up. One possibility could be a WRITE vs WRITE_ODIRECT
> > difference, though I think they should be the same. The patches I posted
> > have not been benchmarked at all, it's still very much a work in
> > progress. I just wanted to show the general direction that I thought
> > would be interesting. So I have done absolutely zero performance
> > testing, it's only been tested for whether it still worked or not (to
> > some degree :-)...
> > 
> > I'll poke a bit at it here, too. I want to finish the unplug/wait
> > problem first. Is your test case using read/write or readv/writev?
> > 
> 
> Hi Jens - I just had some extra cycles, so figured what the heck... :-)
> 
> Actually, this is using asynchronous direct I/Os (libaio/Linux native
> AIO). If I get a chance tomorrow, I'll play with read/write (and/or
> readv/writev). 

OK, then I wonder what the heck is up... Did you catch any io metrics?

-- 
Jens Axboe


^ permalink raw reply	[flat|nested] 13+ messages in thread

* Re: [PATCH 0/4] Page based O_DIRECT v2
  2009-08-20 10:40       ` Jens Axboe
@ 2009-08-20 23:12         ` Alan D. Brunelle
  0 siblings, 0 replies; 13+ messages in thread
From: Alan D. Brunelle @ 2009-08-20 23:12 UTC (permalink / raw)
  To: Jens Axboe; +Cc: linux-kernel, zach.brown, hch

On Thu, 2009-08-20 at 12:40 +0200, Jens Axboe wrote:
> On Wed, Aug 19 2009, Alan D. Brunelle wrote:
> > On Thu, 2009-08-20 at 00:06 +0200, Jens Axboe wrote:
> > 
> > > 
> > > Thanks a lot for the test run, Alan. I wonder why writes are down while
> > > reads are up. One possibility could be a WRITE vs WRITE_ODIRECT
> > > difference, though I think they should be the same. The patches I posted
> > > have not been benchmarked at all, it's still very much a work in
> > > progress. I just wanted to show the general direction that I thought
> > > would be interesting. So I have done absolutely zero performance
> > > testing, it's only been tested for whether it still worked or not (to
> > > some degree :-)...
> > > lib
> > > I'll poke a bit at it here, too. I want to finish the unplug/wait
> > > problem first. Is your test case using read/write or readv/writev?
> > > 
> > 
> > Hi Jens - I just had some extra cycles, so figured what the heck... :-)
> > 
> > Actually, this is using asynchronous direct I/Os (libaio/Linux native
> > AIO). If I get a chance tomorrow, I'll play with read/write (and/or
> > readv/writev). 
> 
> OK, then I wonder what the heck is up... Did you catch any io metrics?
> 

Hi Jens - 

Took a different tack: Using FIO, I put it through its paces using the
following variables:

kernels: 2.6.31-rc6 / 2.6.31-rc6 + loop-direct git branch
I/O direction: read / write
Seek behavior: sequential / random
FIO engines (modes): libaio / posixaio / psync / sync / vsync
I/O size: 4K / 16K / 64K / 256K

Up at http://free.linux.hp.com/~adb/2009-08-20/bench_modes.png I have a
(very large) .png with all the data present - left column shows
throughput (as measured by FIO), right column has %user + %system (as
measured by FIO). To view this, download the .png & run 'eog' (or
whatever your favorite .png viewer is), blow it up and scroll down to
see the 20 pairs of graphs. 

The 2.6.31-rc6 kernel data is in red, the loop-direct results are in
blue.

It's showing some strange things at this point - The most scary thing is
certainly the random & sequential writes using posixaio - HUGE drops in
performance with the loop-direct branch. But, for some reason, random
write's using libaio look better with your loop-direct branch. 

In http://free.linux.hp.com/~adb/2009-08-20/data.tar.bz2 I have /all/
the FIO job-files & FIO output files used to generate these graphs.

I've got some scripts to automate doing these runs & generating the
graphs, so I'm all primed to continue testing this with future versions
of this patch sequence. (I can easily automate it to utilize iostat
and/or blktrace if you'd like (need?) that data as well.) (It only takes
about 4 hours (plus reboot time) to do this, so it's not a big deal.)

Alan D. Brunelle
Hewlett-Packard


^ permalink raw reply	[flat|nested] 13+ messages in thread

* [PATCH 4/4] direct-io: get rid of irq flag saving where it isn't needed
  2009-08-20 10:17                 ` [PATCH 5/6] aio: use lazy workqueues Jens Axboe
@ 2009-08-20 10:17                   ` Jens Axboe
  0 siblings, 0 replies; 13+ messages in thread
From: Jens Axboe @ 2009-08-20 10:17 UTC (permalink / raw)
  To: linux-kernel; +Cc: jeff, benh, htejun, bzolnier, alan, Jens Axboe

We use the flags saving variant of the spin lock functions everywhere
in fs/direct-io.c, even in places where we otherwise block. Get rid
of that except for the end_io path, which may indeed be called with
irqs disabled.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
---
 fs/direct-io.c |   24 ++++++++++--------------
 1 files changed, 10 insertions(+), 14 deletions(-)

diff --git a/fs/direct-io.c b/fs/direct-io.c
index 0e923f2..2f73593 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -259,13 +259,12 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev,
 static void dio_bio_submit(struct dio *dio)
 {
 	struct bio *bio = dio->bio;
-	unsigned long flags;
 
 	bio->bi_private = dio;
 
-	spin_lock_irqsave(&dio->bio_lock, flags);
+	spin_lock_irq(&dio->bio_lock);
 	dio->refcount++;
-	spin_unlock_irqrestore(&dio->bio_lock, flags);
+	spin_unlock_irq(&dio->bio_lock);
 
 	if (dio->is_async && dio->rw == READ)
 		bio_set_pages_dirty(bio);
@@ -295,10 +294,9 @@ static void dio_cleanup(struct dio *dio)
  */
 static struct bio *dio_await_one(struct dio *dio)
 {
-	unsigned long flags;
 	struct bio *bio = NULL;
 
-	spin_lock_irqsave(&dio->bio_lock, flags);
+	spin_lock_irq(&dio->bio_lock);
 
 	/*
 	 * Wait as long as the list is empty and there are bios in flight.  bio
@@ -309,17 +307,17 @@ static struct bio *dio_await_one(struct dio *dio)
 	while (dio->refcount > 1 && dio->bio_list == NULL) {
 		__set_current_state(TASK_UNINTERRUPTIBLE);
 		dio->waiter = current;
-		spin_unlock_irqrestore(&dio->bio_lock, flags);
+		spin_unlock_irq(&dio->bio_lock);
 		io_schedule();
 		/* wake up sets us TASK_RUNNING */
-		spin_lock_irqsave(&dio->bio_lock, flags);
+		spin_lock_irq(&dio->bio_lock);
 		dio->waiter = NULL;
 	}
 	if (dio->bio_list) {
 		bio = dio->bio_list;
 		dio->bio_list = bio->bi_private;
 	}
-	spin_unlock_irqrestore(&dio->bio_lock, flags);
+	spin_unlock_irq(&dio->bio_lock);
 	return bio;
 }
 
@@ -388,14 +386,13 @@ static int dio_bio_reap(struct dio *dio)
 
 	if (dio->reap_counter++ >= 64) {
 		while (dio->bio_list) {
-			unsigned long flags;
 			struct bio *bio;
 			int ret2;
 
-			spin_lock_irqsave(&dio->bio_lock, flags);
+			spin_lock_irq(&dio->bio_lock);
 			bio = dio->bio_list;
 			dio->bio_list = bio->bi_private;
-			spin_unlock_irqrestore(&dio->bio_lock, flags);
+			spin_unlock_irq(&dio->bio_lock);
 			ret2 = dio_bio_complete(dio, bio);
 			if (ret == 0)
 				ret = ret2;
@@ -870,7 +867,6 @@ direct_io_worker(struct kiocb *iocb, struct inode *inode,
 	struct dio_args *args, unsigned blkbits, get_block_t get_block,
 	dio_iodone_t end_io, struct dio *dio)
 {
-	unsigned long flags;
 	int rw = args->rw;
 	ssize_t ret = 0;
 	ssize_t ret2;
@@ -984,9 +980,9 @@ direct_io_worker(struct kiocb *iocb, struct inode *inode,
 	 * completion paths can drop their ref and use the remaining count to
 	 * decide to wake the submission path atomically.
 	 */
-	spin_lock_irqsave(&dio->bio_lock, flags);
+	spin_lock_irq(&dio->bio_lock);
 	ret2 = --dio->refcount;
-	spin_unlock_irqrestore(&dio->bio_lock, flags);
+	spin_unlock_irq(&dio->bio_lock);
 
 	if (ret2 == 0) {
 		ret = dio_complete(dio, args->offset, ret);
-- 
1.6.4.53.g3f55e


^ permalink raw reply related	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2009-08-20 23:12 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2009-08-18  8:34 [PATCH 0/4] Page based O_DIRECT v2 Jens Axboe
2009-08-18  8:34 ` [PATCH 1/4] direct-io: unify argument passing by adding a dio_args structure Jens Axboe
2009-08-18  8:34 ` [PATCH 2/4] direct-io: make O_DIRECT IO path be page based Jens Axboe
2009-08-18  8:35 ` [PATCH 3/4] direct-io: add a "IO for kernel" flag to kiocb Jens Axboe
2009-08-18  8:35 ` [PATCH 4/4] direct-io: get rid of irq flag saving where it isn't needed Jens Axboe
2009-08-19 12:44 ` [PATCH 0/4] Page based O_DIRECT v2 Boaz Harrosh
2009-08-19 13:01   ` Jens Axboe
2009-08-19 19:05 ` Alan D. Brunelle
2009-08-19 22:06   ` Jens Axboe
2009-08-19 22:23     ` Alan D. Brunelle
2009-08-20 10:40       ` Jens Axboe
2009-08-20 23:12         ` Alan D. Brunelle
2009-08-20 10:17 [PATCH 0/6] Lazy workqueues Jens Axboe
2009-08-20 10:17 ` [PATCH 1/4] direct-io: unify argument passing by adding a dio_args structure Jens Axboe
2009-08-20 10:17   ` [PATCH 1/6] workqueue: replace singlethread/freezable/rt parameters and variables with flags Jens Axboe
2009-08-20 10:17     ` [PATCH 2/4] direct-io: make O_DIRECT IO path be page based Jens Axboe
2009-08-20 10:17       ` [PATCH 2/6] workqueue: add support for lazy workqueues Jens Axboe
2009-08-20 10:17         ` [PATCH 3/6] crypto: use " Jens Axboe
2009-08-20 10:17           ` [PATCH 3/4] direct-io: add a "IO for kernel" flag to kiocb Jens Axboe
2009-08-20 10:17             ` [PATCH 4/6] libata: use lazy workqueues for the pio task Jens Axboe
2009-08-20 10:17               ` [PATCH 4/5] loop: support O_DIRECT transfer mode Jens Axboe
2009-08-20 10:17                 ` [PATCH 5/6] aio: use lazy workqueues Jens Axboe
2009-08-20 10:17                   ` [PATCH 4/4] direct-io: get rid of irq flag saving where it isn't needed Jens Axboe

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.