All of lore.kernel.org
 help / color / mirror / Atom feed
From: Al Viro <viro@zeniv.linux.org.uk>
To: Linus Torvalds <torvalds@linux-foundation.org>
Cc: linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
	David Sterba <dsterba@suse.com>,
	Miklos Szeredi <miklos@szeredi.hu>,
	Anton Altaparmakov <anton@tuxera.com>,
	David Howells <dhowells@redhat.com>,
	Matthew Wilcox <willy@infradead.org>,
	Pavel Begunkov <asml.silence@gmail.com>
Subject: [RFC PATCH 28/37] iov_iter: make the amount already copied available to iterator callbacks
Date: Sun,  6 Jun 2021 19:10:42 +0000	[thread overview]
Message-ID: <20210606191051.1216821-28-viro@zeniv.linux.org.uk> (raw)
In-Reply-To: <20210606191051.1216821-1-viro@zeniv.linux.org.uk>

Making iterator macros keep track of the amount of data copied is pretty
easy and it has several benefits:
	1) we no longer need the mess like (from += v.iov_len) - v.iov_len
in the callbacks - initial value + total amount copied so far would do
just fine.
	2) less obviously, we no longer need to remember the initial amount
of data we wanted to copy; the loops in iterator macros are along the lines
of
	wanted = bytes;
	while (bytes) {
		copy some
		bytes -= copied
		if short copy
			break
	}
	bytes = wanted - bytes;
Replacement is
	offs = 0;
	while (bytes) {
		copy some
		offs += copied
		bytes -= copied
		if short copy
			break
	}
	bytes = offs;
That wouldn't be a win per se, but unlike the initial value of bytes, the amount
copied so far *is* useful in callbacks.
	3) in some cases (csum_and_copy_..._iter()) we already had offs manually
maintained by the callbacks.  With that change we can drop that.

	Less boilerplate and more readable code...

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
---
 lib/iov_iter.c | 120 ++++++++++++++++++++++++---------------------------------
 1 file changed, 50 insertions(+), 70 deletions(-)

diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 5a871d001e12..14e34f9df490 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -17,15 +17,14 @@
 #define PIPE_PARANOIA /* for now */
 
 /* covers iovec and kvec alike */
-#define iterate_iovec(i, n, __v, __p, skip, STEP) {		\
-	size_t left;						\
-	size_t wanted = n;					\
+#define iterate_iovec(i, n, __v, __off, __p, skip, STEP) {	\
+	size_t __off = 0;					\
 	do {							\
 		__v.iov_len = min(n, __p->iov_len - skip);	\
 		if (likely(__v.iov_len)) {			\
 			__v.iov_base = __p->iov_base + skip;	\
-			left = (STEP);				\
-			__v.iov_len -= left;			\
+			__v.iov_len -= (STEP);			\
+			__off += __v.iov_len;			\
 			skip += __v.iov_len;			\
 			n -= __v.iov_len;			\
 			if (skip < __p->iov_len)		\
@@ -34,11 +33,11 @@
 		__p++;						\
 		skip = 0;					\
 	} while (n);						\
-	n = wanted - n;						\
+	n = __off;						\
 }
 
-#define iterate_bvec(i, n, __v, p, skip, STEP) {		\
-	size_t wanted = n;					\
+#define iterate_bvec(i, n, __v, __off, p, skip, STEP) {		\
+	size_t __off = 0;					\
 	while (n) {						\
 		unsigned offset = p->bv_offset + skip;		\
 		unsigned left;					\
@@ -50,6 +49,7 @@
 		left = (STEP);					\
 		kunmap_local(kaddr);				\
 		__v.iov_len -= left;				\
+		__off += __v.iov_len;				\
 		skip += __v.iov_len;				\
 		if (skip == p->bv_len) {			\
 			skip = 0;				\
@@ -59,13 +59,14 @@
 		if (left)					\
 			break;					\
 	}							\
-	n = wanted - n;						\
+	n = __off;						\
 }
 
-#define iterate_xarray(i, n, __v, skip, STEP) {		\
+#define iterate_xarray(i, n, __v, __off, skip, STEP) {		\
 	__label__ __out;					\
+	size_t __off = 0;					\
 	struct page *head = NULL;				\
-	size_t wanted = n, seg, offset;				\
+	size_t seg, offset;					\
 	loff_t start = i->xarray_start + skip;			\
 	pgoff_t index = start >> PAGE_SHIFT;			\
 	int j;							\
@@ -84,25 +85,26 @@
 		for (j = (head->index < index) ? index - head->index : 0; \
 		     j < thp_nr_pages(head); j++) {			\
 			void *kaddr = kmap_local_page(head + j);	\
-			offset = (i->xarray_start + skip) % PAGE_SIZE;	\
+			offset = (start + __off) % PAGE_SIZE;		\
 			__v.iov_base = kaddr + offset;			\
 			seg = PAGE_SIZE - offset;			\
 			__v.iov_len = min(n, seg);			\
 			left = (STEP);					\
 			kunmap_local(kaddr);				\
 			__v.iov_len -= left;				\
+			__off += __v.iov_len;				\
 			n -= __v.iov_len;				\
-			skip += __v.iov_len;				\
 			if (left || n == 0)				\
 				goto __out;				\
 		}							\
 	}							\
 __out:								\
 	rcu_read_unlock();					\
-	n = wanted - n;						\
+	skip += __off;						\
+	n = __off;						\
 }
 
-#define __iterate_and_advance(i, n, v, I, K) {			\
+#define __iterate_and_advance(i, n, v, off, I, K) {		\
 	if (unlikely(i->count < n))				\
 		n = i->count;					\
 	if (likely(n)) {					\
@@ -110,31 +112,31 @@ __out:								\
 		if (likely(iter_is_iovec(i))) {			\
 			const struct iovec *iov = i->iov;	\
 			struct iovec v;				\
-			iterate_iovec(i, n, v, iov, skip, (I))	\
+			iterate_iovec(i, n, v, off, iov, skip, (I))	\
 			i->nr_segs -= iov - i->iov;		\
 			i->iov = iov;				\
 		} else if (iov_iter_is_bvec(i)) {		\
 			const struct bio_vec *bvec = i->bvec;	\
 			struct kvec v;				\
-			iterate_bvec(i, n, v, bvec, skip, (K))	\
+			iterate_bvec(i, n, v, off, bvec, skip, (K))	\
 			i->nr_segs -= bvec - i->bvec;		\
 			i->bvec = bvec;				\
 		} else if (iov_iter_is_kvec(i)) {		\
 			const struct kvec *kvec = i->kvec;	\
 			struct kvec v;				\
-			iterate_iovec(i, n, v, kvec, skip, (K))	\
+			iterate_iovec(i, n, v, off, kvec, skip, (K))	\
 			i->nr_segs -= kvec - i->kvec;		\
 			i->kvec = kvec;				\
 		} else if (iov_iter_is_xarray(i)) {		\
 			struct kvec v;				\
-			iterate_xarray(i, n, v, skip, (K))	\
+			iterate_xarray(i, n, v, off, skip, (K))	\
 		}						\
 		i->count -= n;					\
 		i->iov_offset = skip;				\
 	}							\
 }
-#define iterate_and_advance(i, n, v, I, K) \
-	__iterate_and_advance(i, n, v, I, ((void)(K),0))
+#define iterate_and_advance(i, n, v, off, I, K) \
+	__iterate_and_advance(i, n, v, off, I, ((void)(K),0))
 
 static int copyout(void __user *to, const void *from, size_t n)
 {
@@ -618,14 +620,13 @@ static size_t csum_and_copy_to_pipe_iter(const void *addr, size_t bytes,
 
 size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 {
-	const char *from = addr;
 	if (unlikely(iov_iter_is_pipe(i)))
 		return copy_pipe_to_iter(addr, bytes, i);
 	if (iter_is_iovec(i))
 		might_fault();
-	iterate_and_advance(i, bytes, v,
-		copyout(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
-		memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
+	iterate_and_advance(i, bytes, v, off,
+		copyout(v.iov_base, addr + off, v.iov_len),
+		memcpy(v.iov_base, addr + off, v.iov_len)
 	)
 
 	return bytes;
@@ -714,17 +715,13 @@ static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
  */
 size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 {
-	const char *from = addr;
-
 	if (unlikely(iov_iter_is_pipe(i)))
 		return copy_mc_pipe_to_iter(addr, bytes, i);
 	if (iter_is_iovec(i))
 		might_fault();
-	__iterate_and_advance(i, bytes, v,
-		copyout_mc(v.iov_base, (from += v.iov_len) - v.iov_len,
-			   v.iov_len),
-		copy_mc_to_kernel(v.iov_base, (from += v.iov_len)
-					- v.iov_len, v.iov_len)
+	__iterate_and_advance(i, bytes, v, off,
+		copyout_mc(v.iov_base, addr + off, v.iov_len),
+		copy_mc_to_kernel(v.iov_base, addr + off, v.iov_len)
 	)
 
 	return bytes;
@@ -734,16 +731,15 @@ EXPORT_SYMBOL_GPL(_copy_mc_to_iter);
 
 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
 {
-	char *to = addr;
 	if (unlikely(iov_iter_is_pipe(i))) {
 		WARN_ON(1);
 		return 0;
 	}
 	if (iter_is_iovec(i))
 		might_fault();
-	iterate_and_advance(i, bytes, v,
-		copyin((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
-		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+	iterate_and_advance(i, bytes, v, off,
+		copyin(addr + off, v.iov_base, v.iov_len),
+		memcpy(addr + off, v.iov_base, v.iov_len)
 	)
 
 	return bytes;
@@ -752,15 +748,14 @@ EXPORT_SYMBOL(_copy_from_iter);
 
 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
 {
-	char *to = addr;
 	if (unlikely(iov_iter_is_pipe(i))) {
 		WARN_ON(1);
 		return 0;
 	}
-	iterate_and_advance(i, bytes, v,
-		__copy_from_user_inatomic_nocache((to += v.iov_len) - v.iov_len,
+	iterate_and_advance(i, bytes, v, off,
+		__copy_from_user_inatomic_nocache(addr + off,
 					 v.iov_base, v.iov_len),
-		memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+		memcpy(addr + off, v.iov_base, v.iov_len)
 	)
 
 	return bytes;
@@ -784,16 +779,13 @@ EXPORT_SYMBOL(_copy_from_iter_nocache);
  */
 size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
 {
-	char *to = addr;
 	if (unlikely(iov_iter_is_pipe(i))) {
 		WARN_ON(1);
 		return 0;
 	}
-	iterate_and_advance(i, bytes, v,
-		__copy_from_user_flushcache((to += v.iov_len) - v.iov_len,
-					 v.iov_base, v.iov_len),
-		memcpy_flushcache((to += v.iov_len) - v.iov_len, v.iov_base,
-			v.iov_len)
+	iterate_and_advance(i, bytes, v, off,
+		__copy_from_user_flushcache(addr + off, v.iov_base, v.iov_len),
+		memcpy_flushcache(addr + off, v.iov_base, v.iov_len)
 	)
 
 	return bytes;
@@ -922,7 +914,7 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
 {
 	if (unlikely(iov_iter_is_pipe(i)))
 		return pipe_zero(bytes, i);
-	iterate_and_advance(i, bytes, v,
+	iterate_and_advance(i, bytes, v, count,
 		clear_user(v.iov_base, v.iov_len),
 		memset(v.iov_base, 0, v.iov_len)
 	)
@@ -944,9 +936,9 @@ size_t copy_page_from_iter_atomic(struct page *page, unsigned offset, size_t byt
 		WARN_ON(1);
 		return 0;
 	}
-	iterate_and_advance(i, bytes, v,
-		copyin((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len),
-		memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
+	iterate_and_advance(i, bytes, v, off,
+		copyin(p + off, v.iov_base, v.iov_len),
+		memcpy(p + off, v.iov_base, v.iov_len)
 	)
 	kunmap_atomic(kaddr);
 	return bytes;
@@ -1669,28 +1661,22 @@ EXPORT_SYMBOL(iov_iter_get_pages_alloc);
 size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum,
 			       struct iov_iter *i)
 {
-	char *to = addr;
 	__wsum sum, next;
-	size_t off = 0;
 	sum = *csum;
 	if (unlikely(iov_iter_is_pipe(i) || iov_iter_is_discard(i))) {
 		WARN_ON(1);
 		return 0;
 	}
-	iterate_and_advance(i, bytes, v, ({
+	iterate_and_advance(i, bytes, v, off, ({
 		next = csum_and_copy_from_user(v.iov_base,
-					       (to += v.iov_len) - v.iov_len,
+					       addr + off,
 					       v.iov_len);
-		if (next) {
+		if (next)
 			sum = csum_block_add(sum, next, off);
-			off += v.iov_len;
-		}
 		next ? 0 : v.iov_len;
 	}), ({
-		sum = csum_and_memcpy((to += v.iov_len) - v.iov_len,
-				      v.iov_base, v.iov_len,
+		sum = csum_and_memcpy(addr + off, v.iov_base, v.iov_len,
 				      sum, off);
-		off += v.iov_len;
 	})
 	)
 	*csum = sum;
@@ -1702,33 +1688,27 @@ size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *_csstate,
 			     struct iov_iter *i)
 {
 	struct csum_state *csstate = _csstate;
-	const char *from = addr;
 	__wsum sum, next;
-	size_t off;
 
 	if (unlikely(iov_iter_is_pipe(i)))
 		return csum_and_copy_to_pipe_iter(addr, bytes, _csstate, i);
 
 	sum = csum_shift(csstate->csum, csstate->off);
-	off = 0;
 	if (unlikely(iov_iter_is_discard(i))) {
 		WARN_ON(1);	/* for now */
 		return 0;
 	}
-	iterate_and_advance(i, bytes, v, ({
-		next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len,
+	iterate_and_advance(i, bytes, v, off, ({
+		next = csum_and_copy_to_user(addr + off,
 					     v.iov_base,
 					     v.iov_len);
-		if (next) {
+		if (next)
 			sum = csum_block_add(sum, next, off);
-			off += v.iov_len;
-		}
 		next ? 0 : v.iov_len;
 	}), ({
 		sum = csum_and_memcpy(v.iov_base,
-				     (from += v.iov_len) - v.iov_len,
+				     addr + off,
 				     v.iov_len, sum, off);
-		off += v.iov_len;
 	})
 	)
 	csstate->csum = csum_shift(sum, csstate->off);
-- 
2.11.0


  parent reply	other threads:[~2021-06-06 19:13 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-06 19:07 [RFC][PATCHSET] iov_iter work Al Viro
2021-06-06 19:10 ` [RFC PATCH 01/37] ntfs_copy_from_user_iter(): don't bother with copying iov_iter Al Viro
2021-06-06 19:10   ` [RFC PATCH 02/37] generic_perform_write()/iomap_write_actor(): saner logics for short copy Al Viro
2021-06-06 19:10   ` [RFC PATCH 03/37] fuse_fill_write_pages(): don't bother with iov_iter_single_seg_count() Al Viro
2021-06-06 19:10   ` [RFC PATCH 04/37] iov_iter: Remove iov_iter_for_each_range() Al Viro
2021-06-06 19:10   ` [RFC PATCH 05/37] teach copy_page_to_iter() to handle compound pages Al Viro
2021-06-06 19:10   ` [RFC PATCH 06/37] copy_page_to_iter(): fix ITER_DISCARD case Al Viro
2021-06-06 19:10   ` [RFC PATCH 07/37] [xarray] iov_iter_fault_in_readable() should do nothing in xarray case Al Viro
2021-06-06 19:10   ` [RFC PATCH 08/37] iov_iter_advance(): use consistent semantics for move past the end Al Viro
2021-06-06 19:10   ` [RFC PATCH 09/37] iov_iter: switch ..._full() variants of primitives to use of iov_iter_revert() Al Viro
2021-06-06 19:10   ` [RFC PATCH 10/37] iov_iter: reorder handling of flavours in primitives Al Viro
2021-06-06 19:10   ` [RFC PATCH 11/37] iov_iter_advance(): don't modify ->iov_offset for ITER_DISCARD Al Viro
2021-06-06 19:10   ` [RFC PATCH 12/37] iov_iter: separate direction from flavour Al Viro
2021-06-06 19:10   ` [RFC PATCH 13/37] iov_iter: optimize iov_iter_advance() for iovec and kvec Al Viro
2021-06-06 19:10   ` [RFC PATCH 14/37] sanitize iov_iter_fault_in_readable() Al Viro
2021-06-06 19:10   ` [RFC PATCH 15/37] iov_iter_alignment(): don't bother with iterate_all_kinds() Al Viro
2021-06-06 19:10   ` [RFC PATCH 16/37] iov_iter_gap_alignment(): get rid of iterate_all_kinds() Al Viro
2021-06-09 13:01     ` Qian Cai
2021-06-09 18:06       ` Al Viro
2021-06-06 19:10   ` [RFC PATCH 17/37] get rid of iterate_all_kinds() in iov_iter_get_pages()/iov_iter_get_pages_alloc() Al Viro
2021-06-06 19:10   ` [RFC PATCH 18/37] iov_iter_npages(): don't bother with iterate_all_kinds() Al Viro
2021-06-06 19:10   ` [RFC PATCH 19/37] [xarray] iov_iter_npages(): just use DIV_ROUND_UP() Al Viro
2021-06-06 19:10   ` [RFC PATCH 20/37] iov_iter: replace iov_iter_copy_from_user_atomic() with iterator-advancing variant Al Viro
2021-06-06 19:10   ` [RFC PATCH 21/37] csum_and_copy_to_iter(): massage into form closer to csum_and_copy_from_iter() Al Viro
2021-06-06 19:10   ` [RFC PATCH 22/37] iterate_and_advance(): get rid of magic in case when n is 0 Al Viro
2021-06-06 19:10   ` [RFC PATCH 23/37] iov_iter: massage iterate_iovec and iterate_kvec to logics similar to iterate_bvec Al Viro
2021-06-06 19:10   ` [RFC PATCH 24/37] iov_iter: unify iterate_iovec and iterate_kvec Al Viro
2021-06-06 19:10   ` [RFC PATCH 25/37] iterate_bvec(): expand bvec.h macro forest, massage a bit Al Viro
2021-06-06 19:10   ` [RFC PATCH 26/37] iov_iter: teach iterate_{bvec,xarray}() about possible short copies Al Viro
2021-06-06 19:10   ` [RFC PATCH 27/37] iov_iter: get rid of separate bvec and xarray callbacks Al Viro
2021-06-06 19:10   ` Al Viro [this message]
2021-06-06 19:10   ` [RFC PATCH 29/37] iov_iter: make iterator callbacks use base and len instead of iovec Al Viro
2021-06-06 19:10   ` [RFC PATCH 30/37] pull handling of ->iov_offset into iterate_{iovec,bvec,xarray} Al Viro
2021-06-06 19:10   ` [RFC PATCH 31/37] iterate_xarray(): only of the first iteration we might get offset != 0 Al Viro
2021-06-06 19:10   ` [RFC PATCH 32/37] copy_page_to_iter(): don't bother with kmap_atomic() for bvec/kvec cases Al Viro
2021-06-06 19:10   ` [RFC PATCH 33/37] copy_page_from_iter(): don't need kmap_atomic() for kvec/bvec cases Al Viro
2021-06-06 19:10   ` [RFC PATCH 34/37] iov_iter: clean csum_and_copy_...() primitives up a bit Al Viro
2021-06-06 19:10   ` [RFC PATCH 35/37] pipe_zero(): we don't need no stinkin' kmap_atomic() Al Viro
2021-06-06 19:10   ` [RFC PATCH 36/37] clean up copy_mc_pipe_to_iter() Al Viro
2021-06-06 19:10   ` [RFC PATCH 37/37] csum_and_copy_to_pipe_iter(): leave handling of csum_state to caller Al Viro
2021-06-06 22:05 ` [RFC][PATCHSET] iov_iter work Linus Torvalds
2021-06-06 22:46   ` Linus Torvalds
2021-06-07  9:28     ` Christoph Hellwig
2021-06-07 14:43       ` Al Viro
2021-06-07 15:59         ` Christoph Hellwig
2021-06-07 21:07           ` Al Viro
2021-06-07 22:01             ` Linus Torvalds
2021-06-07 23:35               ` Linus Torvalds
2021-06-08  5:25                 ` Christoph Hellwig
2021-06-08 11:27                 ` Al Viro
2021-06-06 23:29   ` Al Viro
2021-06-07 10:38     ` Pavel Begunkov
2021-06-08 14:43 ` David Laight
2021-06-10 14:29 ` Qian Cai
2021-06-10 15:35   ` Al Viro
2021-06-10 15:48     ` Al Viro
2021-06-10 19:08     ` Qian Cai

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210606191051.1216821-28-viro@zeniv.linux.org.uk \
    --to=viro@zeniv.linux.org.uk \
    --cc=anton@tuxera.com \
    --cc=asml.silence@gmail.com \
    --cc=dhowells@redhat.com \
    --cc=dsterba@suse.com \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=miklos@szeredi.hu \
    --cc=torvalds@linux-foundation.org \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.