linux-next.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Stephen Rothwell <sfr@canb.auug.org.au>
To: Al Viro <viro@ZenIV.linux.org.uk>,
	Trond Myklebust <trond.myklebust@fys.uio.no>
Cc: linux-next@vger.kernel.org, linux-kernel@vger.kernel.org,
	Christoph Hellwig <hch@lst.de>,
	Weston Andros Adamson <dros@primarydata.com>,
	Anna Schumaker <Anna.Schumaker@netapp.com>
Subject: linux-next: manual merge of the vfs tree with the  tree
Date: Thu, 29 May 2014 13:25:35 +1000	[thread overview]
Message-ID: <20140529132535.40d6789e@canb.auug.org.au> (raw)

[-- Attachment #1: Type: text/plain, Size: 5762 bytes --]

Hi Al,

Today's linux-next merge of the vfs tree got a conflict in
fs/nfs/direct.c between commit fab5fc25d230 ("nfs: remove
->read_pageio_init from rpc ops") and possibly others from the nfs tree
and commits 619d30b4b8c4 ("convert the guts of nfs_direct_IO() to
iov_iter"), a6cbcd4a4a85 ("get rid of pointless iov_length() in
->direct_IO()") and 91f79c43d1b5 ("new helper:
iov_iter_get_pages_alloc()") from the vfs tree.

I fixed it up (I hope - see below) and can carry the fix as necessary
(no action is required).

-- 
Cheers,
Stephen Rothwell                    sfr@canb.auug.org.au

diff --cc fs/nfs/direct.c
index 4ad7bc388679,b122fe21fea0..000000000000
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@@ -414,60 -322,37 +414,37 @@@ static const struct nfs_pgio_completion
   * handled automatically by nfs_direct_read_result().  Otherwise, if
   * no requests have been sent, just return an error.
   */
- static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
- 						const struct iovec *iov,
- 						loff_t pos, bool uio)
- {
- 	struct nfs_direct_req *dreq = desc->pg_dreq;
- 	struct nfs_open_context *ctx = dreq->ctx;
- 	struct inode *inode = ctx->dentry->d_inode;
- 	unsigned long user_addr = (unsigned long)iov->iov_base;
- 	size_t count = iov->iov_len;
- 	size_t rsize = NFS_SERVER(inode)->rsize;
- 	unsigned int pgbase;
- 	int result;
- 	ssize_t started = 0;
- 	struct page **pagevec = NULL;
- 	unsigned int npages;
- 
- 	do {
- 		size_t bytes;
- 		int i;
  
- 		pgbase = user_addr & ~PAGE_MASK;
- 		bytes = min(max_t(size_t, rsize, PAGE_SIZE), count);
+ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
+ 					      struct iov_iter *iter,
+ 					      loff_t pos)
+ {
+ 	struct nfs_pageio_descriptor desc;
+ 	struct inode *inode = dreq->inode;
+ 	ssize_t result = -EINVAL;
+ 	size_t requested_bytes = 0;
+ 	size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
  
- 		result = -ENOMEM;
- 		npages = nfs_page_array_len(pgbase, bytes);
- 		if (!pagevec)
- 			pagevec = kmalloc(npages * sizeof(struct page *),
- 					  GFP_KERNEL);
- 		if (!pagevec)
- 			break;
- 		if (uio) {
- 			down_read(&current->mm->mmap_sem);
- 			result = get_user_pages(current, current->mm, user_addr,
- 					npages, 1, 0, pagevec, NULL);
- 			up_read(&current->mm->mmap_sem);
- 			if (result < 0)
- 				break;
- 		} else {
- 			WARN_ON(npages != 1);
- 			result = get_kernel_page(user_addr, 1, pagevec);
- 			if (WARN_ON(result != 1))
- 				break;
- 		}
 -	NFS_PROTO(dreq->inode)->read_pageio_init(&desc, dreq->inode,
++	nfs_pageio_init_read(&desc, dreq->inode, false,
+ 			     &nfs_direct_read_completion_ops);
+ 	get_dreq(dreq);
+ 	desc.pg_dreq = dreq;
+ 	atomic_inc(&inode->i_dio_count);
  
- 		if ((unsigned)result < npages) {
- 			bytes = result * PAGE_SIZE;
- 			if (bytes <= pgbase) {
- 				nfs_direct_release_pages(pagevec, result);
- 				break;
- 			}
- 			bytes -= pgbase;
- 			npages = result;
- 		}
+ 	while (iov_iter_count(iter)) {
+ 		struct page **pagevec;
+ 		size_t bytes;
+ 		size_t pgbase;
+ 		unsigned npages, i;
  
+ 		result = iov_iter_get_pages_alloc(iter, &pagevec, 
+ 						  rsize, &pgbase);
+ 		if (result < 0)
+ 			break;
+ 	
+ 		bytes = result;
+ 		iov_iter_advance(iter, bytes);
+ 		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
  		for (i = 0; i < npages; i++) {
  			struct nfs_page *req;
  			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
@@@ -965,24 -719,58 +813,57 @@@ static ssize_t nfs_direct_write_schedul
  	struct inode *inode = dreq->inode;
  	ssize_t result = 0;
  	size_t requested_bytes = 0;
- 	unsigned long seg;
+ 	size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
  
 -	NFS_PROTO(inode)->write_pageio_init(&desc, inode, FLUSH_COND_STABLE,
 +	nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
  			      &nfs_direct_write_completion_ops);
  	desc.pg_dreq = dreq;
  	get_dreq(dreq);
  	atomic_inc(&inode->i_dio_count);
  
- 	NFS_I(dreq->inode)->write_io += iov_length(iov, nr_segs);
- 	for (seg = 0; seg < nr_segs; seg++) {
- 		const struct iovec *vec = &iov[seg];
- 		result = nfs_direct_write_schedule_segment(&desc, vec, pos, uio);
+ 	NFS_I(inode)->write_io += iov_iter_count(iter);
+ 	while (iov_iter_count(iter)) {
+ 		struct page **pagevec;
+ 		size_t bytes;
+ 		size_t pgbase;
+ 		unsigned npages, i;
+ 
+ 		result = iov_iter_get_pages_alloc(iter, &pagevec, 
+ 						  wsize, &pgbase);
  		if (result < 0)
  			break;
- 		requested_bytes += result;
- 		if ((size_t)result < vec->iov_len)
+ 
+ 		bytes = result;
+ 		iov_iter_advance(iter, bytes);
+ 		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
+ 		for (i = 0; i < npages; i++) {
+ 			struct nfs_page *req;
+ 			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
+ 
 -			req = nfs_create_request(dreq->ctx, inode,
 -						 pagevec[i],
++			req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
+ 						 pgbase, req_len);
+ 			if (IS_ERR(req)) {
+ 				result = PTR_ERR(req);
+ 				break;
+ 			}
+ 			nfs_lock_request(req);
+ 			req->wb_index = pos >> PAGE_SHIFT;
+ 			req->wb_offset = pos & ~PAGE_MASK;
+ 			if (!nfs_pageio_add_request(&desc, req)) {
+ 				result = desc.pg_error;
+ 				nfs_unlock_and_release_request(req);
+ 				break;
+ 			}
+ 			pgbase = 0;
+ 			bytes -= req_len;
+ 			requested_bytes += req_len;
+ 			pos += req_len;
+ 			dreq->bytes_left -= req_len;
+ 		}
+ 		nfs_direct_release_pages(pagevec, npages);
+ 		kvfree(pagevec);
+ 		if (result < 0)
  			break;
- 		pos += vec->iov_len;
  	}
  	nfs_pageio_complete(&desc);
  

[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 836 bytes --]

             reply	other threads:[~2014-05-29  3:25 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-05-29  3:25 Stephen Rothwell [this message]
  -- strict thread matches above, loose matches on Subject: below --
2015-06-05  5:46 linux-next: manual merge of the vfs tree with the tree mpe@ellerman.id.au
2012-09-24  1:45 Stephen Rothwell
2009-06-09  1:08 Stephen Rothwell
2009-06-09  6:58 ` Miklos Szeredi
2009-06-09  7:27   ` Stephen Rothwell
2009-05-22  1:23 Stephen Rothwell
2009-05-24 22:03 ` Frederic Weisbecker
2009-05-24 22:07   ` Christoph Hellwig
2009-05-25 17:10     ` Frederic Weisbecker
2009-05-25  6:50   ` Stephen Rothwell
2009-05-13  2:20 Stephen Rothwell
2009-05-08  4:44 Stephen Rothwell
2009-05-08 13:40 ` Miklos Szeredi
2009-05-08 17:50   ` Al Viro
2009-05-09 16:58     ` Miklos Szeredi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20140529132535.40d6789e@canb.auug.org.au \
    --to=sfr@canb.auug.org.au \
    --cc=Anna.Schumaker@netapp.com \
    --cc=dros@primarydata.com \
    --cc=hch@lst.de \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-next@vger.kernel.org \
    --cc=trond.myklebust@fys.uio.no \
    --cc=viro@ZenIV.linux.org.uk \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).