linux-fsdevel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: john.hubbard@gmail.com
To: Andrew Morton <akpm@linux-foundation.org>
Cc: "Alexander Viro" <viro@zeniv.linux.org.uk>,
	"Anna Schumaker" <anna.schumaker@netapp.com>,
	"David S . Miller" <davem@davemloft.net>,
	"Dominique Martinet" <asmadeus@codewreck.org>,
	"Eric Van Hensbergen" <ericvh@gmail.com>,
	"Jason Gunthorpe" <jgg@ziepe.ca>,
	"Jason Wang" <jasowang@redhat.com>,
	"Jens Axboe" <axboe@kernel.dk>,
	"Latchesar Ionkov" <lucho@ionkov.net>,
	"Michael S . Tsirkin" <mst@redhat.com>,
	"Miklos Szeredi" <miklos@szeredi.hu>,
	"Trond Myklebust" <trond.myklebust@hammerspace.com>,
	"Christoph Hellwig" <hch@lst.de>,
	"Matthew Wilcox" <willy@infradead.org>,
	linux-mm@kvack.org, LKML <linux-kernel@vger.kernel.org>,
	ceph-devel@vger.kernel.org, kvm@vger.kernel.org,
	linux-block@vger.kernel.org, linux-cifs@vger.kernel.org,
	linux-fsdevel@vger.kernel.org, linux-nfs@vger.kernel.org,
	linux-rdma@vger.kernel.org, netdev@vger.kernel.org,
	samba-technical@lists.samba.org,
	v9fs-developer@lists.sourceforge.net,
	virtualization@lists.linux-foundation.org,
	"Jérôme Glisse" <jglisse@redhat.com>,
	"John Hubbard" <jhubbard@nvidia.com>, "Jan Kara" <jack@suse.cz>,
	"Dan Williams" <dan.j.williams@intel.com>,
	"Johannes Thumshirn" <jthumshirn@suse.de>,
	"Ming Lei" <ming.lei@redhat.com>,
	"Dave Chinner" <david@fromorbit.com>,
	"Boaz Harrosh" <boaz@plexistor.com>,
	"Yan, Zheng" <zyan@redhat.com>, "Sage Weil" <sage@redhat.com>,
	"Ilya Dryomov" <idryomov@gmail.com>
Subject: [PATCH 10/12] fs/ceph: convert put_page() to put_user_page*()
Date: Tue, 23 Jul 2019 21:25:16 -0700	[thread overview]
Message-ID: <20190724042518.14363-11-jhubbard@nvidia.com> (raw)
In-Reply-To: <20190724042518.14363-1-jhubbard@nvidia.com>

From: Jérôme Glisse <jglisse@redhat.com>

For pages that were retained via get_user_pages*(), release those pages
via the new put_user_page*() routines, instead of via put_page().

This is part a tree-wide conversion, as described in commit fc1d8e7cca2d
("mm: introduce put_user_page*(), placeholder versions").

Changes from Jérôme's original patch:

* Use the enhanced put_user_pages_dirty_lock().

Signed-off-by: Jérôme Glisse <jglisse@redhat.com>
Signed-off-by: John Hubbard <jhubbard@nvidia.com>
Cc: linux-fsdevel@vger.kernel.org
Cc: linux-block@vger.kernel.org
Cc: linux-mm@kvack.org
Cc: ceph-devel@vger.kernel.org
Cc: Jan Kara <jack@suse.cz>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Johannes Thumshirn <jthumshirn@suse.de>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Boaz Harrosh <boaz@plexistor.com>
Cc: "Yan, Zheng" <zyan@redhat.com>
Cc: Sage Weil <sage@redhat.com>
Cc: Ilya Dryomov <idryomov@gmail.com>
---
 fs/ceph/file.c | 62 ++++++++++++++++++++++++++++++++++++++------------
 1 file changed, 48 insertions(+), 14 deletions(-)

diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 685a03cc4b77..c628a1f96978 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -158,18 +158,26 @@ static ssize_t iter_get_bvecs_alloc(struct iov_iter *iter, size_t maxsize,
 	return bytes;
 }
 
-static void put_bvecs(struct bio_vec *bvecs, int num_bvecs, bool should_dirty)
+static void put_bvecs(struct bio_vec *bv, int num_bvecs, bool should_dirty,
+		      bool from_gup)
 {
 	int i;
 
+
 	for (i = 0; i < num_bvecs; i++) {
-		if (bvecs[i].bv_page) {
+		if (!bv[i].bv_page)
+			continue;
+
+		if (from_gup) {
+			put_user_pages_dirty_lock(&bv[i].bv_page, 1,
+						  should_dirty);
+		} else {
 			if (should_dirty)
-				set_page_dirty_lock(bvecs[i].bv_page);
-			put_page(bvecs[i].bv_page);
+				set_page_dirty_lock(bv[i].bv_page);
+			put_page(bv[i].bv_page);
 		}
 	}
-	kvfree(bvecs);
+	kvfree(bv);
 }
 
 /*
@@ -730,6 +738,7 @@ struct ceph_aio_work {
 };
 
 static void ceph_aio_retry_work(struct work_struct *work);
+static void ceph_aio_from_gup_retry_work(struct work_struct *work);
 
 static void ceph_aio_complete(struct inode *inode,
 			      struct ceph_aio_request *aio_req)
@@ -774,7 +783,7 @@ static void ceph_aio_complete(struct inode *inode,
 	kfree(aio_req);
 }
 
-static void ceph_aio_complete_req(struct ceph_osd_request *req)
+static void _ceph_aio_complete_req(struct ceph_osd_request *req, bool from_gup)
 {
 	int rc = req->r_result;
 	struct inode *inode = req->r_inode;
@@ -793,7 +802,9 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
 
 		aio_work = kmalloc(sizeof(*aio_work), GFP_NOFS);
 		if (aio_work) {
-			INIT_WORK(&aio_work->work, ceph_aio_retry_work);
+			INIT_WORK(&aio_work->work, from_gup ?
+				  ceph_aio_from_gup_retry_work :
+				  ceph_aio_retry_work);
 			aio_work->req = req;
 			queue_work(ceph_inode_to_client(inode)->inode_wq,
 				   &aio_work->work);
@@ -830,7 +841,7 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
 	}
 
 	put_bvecs(osd_data->bvec_pos.bvecs, osd_data->num_bvecs,
-		  aio_req->should_dirty);
+		  aio_req->should_dirty, from_gup);
 	ceph_osdc_put_request(req);
 
 	if (rc < 0)
@@ -840,7 +851,17 @@ static void ceph_aio_complete_req(struct ceph_osd_request *req)
 	return;
 }
 
-static void ceph_aio_retry_work(struct work_struct *work)
+static void ceph_aio_complete_req(struct ceph_osd_request *req)
+{
+	_ceph_aio_complete_req(req, false);
+}
+
+static void ceph_aio_from_gup_complete_req(struct ceph_osd_request *req)
+{
+	_ceph_aio_complete_req(req, true);
+}
+
+static void _ceph_aio_retry_work(struct work_struct *work, bool from_gup)
 {
 	struct ceph_aio_work *aio_work =
 		container_of(work, struct ceph_aio_work, work);
@@ -891,7 +912,8 @@ static void ceph_aio_retry_work(struct work_struct *work)
 
 	ceph_osdc_put_request(orig_req);
 
-	req->r_callback = ceph_aio_complete_req;
+	req->r_callback = from_gup ? ceph_aio_from_gup_complete_req :
+			  ceph_aio_complete_req;
 	req->r_inode = inode;
 	req->r_priv = aio_req;
 
@@ -899,13 +921,23 @@ static void ceph_aio_retry_work(struct work_struct *work)
 out:
 	if (ret < 0) {
 		req->r_result = ret;
-		ceph_aio_complete_req(req);
+		_ceph_aio_complete_req(req, from_gup);
 	}
 
 	ceph_put_snap_context(snapc);
 	kfree(aio_work);
 }
 
+static void ceph_aio_retry_work(struct work_struct *work)
+{
+	_ceph_aio_retry_work(work, false);
+}
+
+static void ceph_aio_from_gup_retry_work(struct work_struct *work)
+{
+	_ceph_aio_retry_work(work, true);
+}
+
 static ssize_t
 ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
 		       struct ceph_snap_context *snapc,
@@ -927,6 +959,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
 	loff_t pos = iocb->ki_pos;
 	bool write = iov_iter_rw(iter) == WRITE;
 	bool should_dirty = !write && iter_is_iovec(iter);
+	bool from_gup = iov_iter_get_pages_use_gup(iter);
 
 	if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
 		return -EROFS;
@@ -1023,7 +1056,8 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
 			aio_req->num_reqs++;
 			atomic_inc(&aio_req->pending_reqs);
 
-			req->r_callback = ceph_aio_complete_req;
+			req->r_callback = !from_gup ? ceph_aio_complete_req :
+					  ceph_aio_from_gup_complete_req;
 			req->r_inode = inode;
 			req->r_priv = aio_req;
 			list_add_tail(&req->r_private_item, &aio_req->osd_reqs);
@@ -1054,7 +1088,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
 				len = ret;
 		}
 
-		put_bvecs(bvecs, num_pages, should_dirty);
+		put_bvecs(bvecs, num_pages, should_dirty, from_gup);
 		ceph_osdc_put_request(req);
 		if (ret < 0)
 			break;
@@ -1093,7 +1127,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
 							      req, false);
 			if (ret < 0) {
 				req->r_result = ret;
-				ceph_aio_complete_req(req);
+				_ceph_aio_complete_req(req, from_gup);
 			}
 		}
 		return -EIOCBQUEUED;
-- 
2.22.0


  parent reply	other threads:[~2019-07-24  4:26 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-24  4:25 [PATCH 00/12] block/bio, fs: convert put_page() to put_user_page*() john.hubbard
2019-07-24  4:25 ` [PATCH 01/12] mm/gup: add make_dirty arg to put_user_pages_dirty_lock() john.hubbard
2019-07-24  4:25 ` [PATCH 02/12] iov_iter: add helper to test if an iter would use GUP v2 john.hubbard
2019-07-24  4:25 ` [PATCH 03/12] block: bio_release_pages: use flags arg instead of bool john.hubbard
2019-07-24  5:30   ` Christoph Hellwig
2019-07-29 20:57     ` Jerome Glisse
2019-07-30 10:25       ` Christoph Hellwig
2019-07-30 15:57         ` Jerome Glisse
2019-08-01  8:20           ` Christoph Hellwig
2019-07-24  4:25 ` [PATCH 04/12] block: bio_release_pages: convert put_page() to put_user_page*() john.hubbard
2019-07-24  4:25 ` [PATCH 05/12] block_dev: " john.hubbard
2019-07-24  4:25 ` [PATCH 06/12] fs/nfs: " john.hubbard
2019-07-24  4:25 ` [PATCH 07/12] vhost-scsi: " john.hubbard
2019-07-24  4:34   ` John Hubbard
2019-07-24  8:07   ` Michael S. Tsirkin
2019-07-24  4:25 ` [PATCH 08/12] fs/cifs: " john.hubbard
2019-07-24  4:25 ` [PATCH 09/12] fs/fuse: " john.hubbard
2019-07-24  4:25 ` john.hubbard [this message]
2019-07-24  4:25 ` [PATCH 11/12] 9p/net: " john.hubbard
2019-07-24  4:25 ` [PATCH 12/12] fs/ceph: fix a build warning: returning a value from void function john.hubbard
2019-07-24  6:17 ` [PATCH 00/12] block/bio, fs: convert put_page() to put_user_page*() Christoph Hellwig
2019-07-24 23:23   ` John Hubbard
2019-08-05 22:54   ` John Hubbard
2019-08-07  6:34     ` Christoph Hellwig
2019-08-07  6:38       ` John Hubbard
2019-07-25  0:41 ` Bob Liu
2019-07-26  1:24   ` John Hubbard

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190724042518.14363-11-jhubbard@nvidia.com \
    --to=john.hubbard@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=anna.schumaker@netapp.com \
    --cc=asmadeus@codewreck.org \
    --cc=axboe@kernel.dk \
    --cc=boaz@plexistor.com \
    --cc=ceph-devel@vger.kernel.org \
    --cc=dan.j.williams@intel.com \
    --cc=davem@davemloft.net \
    --cc=david@fromorbit.com \
    --cc=ericvh@gmail.com \
    --cc=hch@lst.de \
    --cc=idryomov@gmail.com \
    --cc=jack@suse.cz \
    --cc=jasowang@redhat.com \
    --cc=jgg@ziepe.ca \
    --cc=jglisse@redhat.com \
    --cc=jhubbard@nvidia.com \
    --cc=jthumshirn@suse.de \
    --cc=kvm@vger.kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-cifs@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nfs@vger.kernel.org \
    --cc=linux-rdma@vger.kernel.org \
    --cc=lucho@ionkov.net \
    --cc=miklos@szeredi.hu \
    --cc=ming.lei@redhat.com \
    --cc=mst@redhat.com \
    --cc=netdev@vger.kernel.org \
    --cc=sage@redhat.com \
    --cc=samba-technical@lists.samba.org \
    --cc=trond.myklebust@hammerspace.com \
    --cc=v9fs-developer@lists.sourceforge.net \
    --cc=viro@zeniv.linux.org.uk \
    --cc=virtualization@lists.linux-foundation.org \
    --cc=willy@infradead.org \
    --cc=zyan@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).