From: Jeff Layton <jlayton@kernel.org>
To: Al Viro <viro@zeniv.linux.org.uk>, linux-fsdevel@vger.kernel.org
Cc: Linus Torvalds <torvalds@linux-foundation.org>,
Jens Axboe <axboe@kernel.dk>, Christoph Hellwig <hch@lst.de>,
Matthew Wilcox <willy@infradead.org>,
David Howells <dhowells@redhat.com>,
Dominique Martinet <asmadeus@codewreck.org>,
Christian Brauner <brauner@kernel.org>
Subject: Re: [PATCH 09/44] new iov_iter flavour - ITER_UBUF
Date: Mon, 27 Jun 2022 14:47:03 -0400 [thread overview]
Message-ID: <07ad7be25bab03c164bbd1f2d2264c9e6f79b70d.camel@kernel.org> (raw)
In-Reply-To: <20220622041552.737754-9-viro@zeniv.linux.org.uk>
On Wed, 2022-06-22 at 05:15 +0100, Al Viro wrote:
> Equivalent of single-segment iovec. Initialized by iov_iter_ubuf(),
> checked for by iter_is_ubuf(), otherwise behaves like ITER_IOVEC
> ones.
>
> We are going to expose the things like ->write_iter() et.al. to those
> in subsequent commits.
>
> New predicate (user_backed_iter()) that is true for ITER_IOVEC and
> ITER_UBUF; places like direct-IO handling should use that for
> checking that pages we modify after getting them from iov_iter_get_pages()
> would need to be dirtied.
>
> DO NOT assume that replacing iter_is_iovec() with user_backed_iter()
> will solve all problems - there's code that uses iter_is_iovec() to
> decide how to poke around in iov_iter guts and for that the predicate
> replacement obviously won't suffice.
>
> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
> ---
> block/fops.c | 6 +--
> fs/ceph/file.c | 2 +-
> fs/cifs/file.c | 2 +-
> fs/direct-io.c | 2 +-
> fs/fuse/dev.c | 4 +-
> fs/fuse/file.c | 2 +-
> fs/gfs2/file.c | 2 +-
> fs/iomap/direct-io.c | 2 +-
> fs/nfs/direct.c | 2 +-
> include/linux/uio.h | 26 ++++++++++++
> lib/iov_iter.c | 94 ++++++++++++++++++++++++++++++++++----------
> mm/shmem.c | 2 +-
> 12 files changed, 113 insertions(+), 33 deletions(-)
>
> diff --git a/block/fops.c b/block/fops.c
> index 6e86931ab847..3e68d69e0ee3 100644
> --- a/block/fops.c
> +++ b/block/fops.c
> @@ -69,7 +69,7 @@ static ssize_t __blkdev_direct_IO_simple(struct kiocb *iocb,
>
> if (iov_iter_rw(iter) == READ) {
> bio_init(&bio, bdev, vecs, nr_pages, REQ_OP_READ);
> - if (iter_is_iovec(iter))
> + if (user_backed_iter(iter))
> should_dirty = true;
> } else {
> bio_init(&bio, bdev, vecs, nr_pages, dio_bio_write_op(iocb));
> @@ -199,7 +199,7 @@ static ssize_t __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
> }
>
> dio->size = 0;
> - if (is_read && iter_is_iovec(iter))
> + if (is_read && user_backed_iter(iter))
> dio->flags |= DIO_SHOULD_DIRTY;
>
> blk_start_plug(&plug);
> @@ -331,7 +331,7 @@ static ssize_t __blkdev_direct_IO_async(struct kiocb *iocb,
> dio->size = bio->bi_iter.bi_size;
>
> if (is_read) {
> - if (iter_is_iovec(iter)) {
> + if (user_backed_iter(iter)) {
> dio->flags |= DIO_SHOULD_DIRTY;
> bio_set_pages_dirty(bio);
> }
> diff --git a/fs/ceph/file.c b/fs/ceph/file.c
> index 8c8226c0feac..e132adeeaf16 100644
> --- a/fs/ceph/file.c
> +++ b/fs/ceph/file.c
> @@ -1262,7 +1262,7 @@ ceph_direct_read_write(struct kiocb *iocb, struct iov_iter *iter,
> size_t count = iov_iter_count(iter);
> loff_t pos = iocb->ki_pos;
> bool write = iov_iter_rw(iter) == WRITE;
> - bool should_dirty = !write && iter_is_iovec(iter);
> + bool should_dirty = !write && user_backed_iter(iter);
>
> if (write && ceph_snap(file_inode(file)) != CEPH_NOSNAP)
> return -EROFS;
> diff --git a/fs/cifs/file.c b/fs/cifs/file.c
> index 1618e0537d58..4b4129d9a90c 100644
> --- a/fs/cifs/file.c
> +++ b/fs/cifs/file.c
> @@ -4004,7 +4004,7 @@ static ssize_t __cifs_readv(
> if (!is_sync_kiocb(iocb))
> ctx->iocb = iocb;
>
> - if (iter_is_iovec(to))
> + if (user_backed_iter(to))
> ctx->should_dirty = true;
>
> if (direct) {
> diff --git a/fs/direct-io.c b/fs/direct-io.c
> index 39647eb56904..72237f49ad94 100644
> --- a/fs/direct-io.c
> +++ b/fs/direct-io.c
> @@ -1245,7 +1245,7 @@ ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
> spin_lock_init(&dio->bio_lock);
> dio->refcount = 1;
>
> - dio->should_dirty = iter_is_iovec(iter) && iov_iter_rw(iter) == READ;
> + dio->should_dirty = user_backed_iter(iter) && iov_iter_rw(iter) == READ;
> sdio.iter = iter;
> sdio.final_block_in_request = end >> blkbits;
>
> diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
> index 0e537e580dc1..8d657c2cd6f7 100644
> --- a/fs/fuse/dev.c
> +++ b/fs/fuse/dev.c
> @@ -1356,7 +1356,7 @@ static ssize_t fuse_dev_read(struct kiocb *iocb, struct iov_iter *to)
> if (!fud)
> return -EPERM;
>
> - if (!iter_is_iovec(to))
> + if (!user_backed_iter(to))
> return -EINVAL;
>
> fuse_copy_init(&cs, 1, to);
> @@ -1949,7 +1949,7 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, struct iov_iter *from)
> if (!fud)
> return -EPERM;
>
> - if (!iter_is_iovec(from))
> + if (!user_backed_iter(from))
> return -EINVAL;
>
> fuse_copy_init(&cs, 0, from);
> diff --git a/fs/fuse/file.c b/fs/fuse/file.c
> index 00fa861aeead..c982e3afe3b4 100644
> --- a/fs/fuse/file.c
> +++ b/fs/fuse/file.c
> @@ -1465,7 +1465,7 @@ ssize_t fuse_direct_io(struct fuse_io_priv *io, struct iov_iter *iter,
> inode_unlock(inode);
> }
>
> - io->should_dirty = !write && iter_is_iovec(iter);
> + io->should_dirty = !write && user_backed_iter(iter);
> while (count) {
> ssize_t nres;
> fl_owner_t owner = current->files;
> diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
> index 2cceb193dcd8..48e6cc74fdc1 100644
> --- a/fs/gfs2/file.c
> +++ b/fs/gfs2/file.c
> @@ -780,7 +780,7 @@ static inline bool should_fault_in_pages(struct iov_iter *i,
>
> if (!count)
> return false;
> - if (!iter_is_iovec(i))
> + if (!user_backed_iter(i))
> return false;
>
> size = PAGE_SIZE;
> diff --git a/fs/iomap/direct-io.c b/fs/iomap/direct-io.c
> index 31c7f1035b20..d5c7d019653b 100644
> --- a/fs/iomap/direct-io.c
> +++ b/fs/iomap/direct-io.c
> @@ -533,7 +533,7 @@ __iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
> iomi.flags |= IOMAP_NOWAIT;
> }
>
> - if (iter_is_iovec(iter))
> + if (user_backed_iter(iter))
> dio->flags |= IOMAP_DIO_DIRTY;
> } else {
> iomi.flags |= IOMAP_WRITE;
> diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
> index 4eb2a8380a28..022e1ce63e62 100644
> --- a/fs/nfs/direct.c
> +++ b/fs/nfs/direct.c
> @@ -478,7 +478,7 @@ ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
> if (!is_sync_kiocb(iocb))
> dreq->iocb = iocb;
>
> - if (iter_is_iovec(iter))
> + if (user_backed_iter(iter))
> dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
>
> if (!swap)
> diff --git a/include/linux/uio.h b/include/linux/uio.h
> index 76d305f3d4c2..6ab4260c3d6c 100644
> --- a/include/linux/uio.h
> +++ b/include/linux/uio.h
> @@ -26,6 +26,7 @@ enum iter_type {
> ITER_PIPE,
> ITER_XARRAY,
> ITER_DISCARD,
> + ITER_UBUF,
> };
>
> struct iov_iter_state {
> @@ -38,6 +39,7 @@ struct iov_iter {
> u8 iter_type;
> bool nofault;
> bool data_source;
> + bool user_backed;
> size_t iov_offset;
> size_t count;
> union {
> @@ -46,6 +48,7 @@ struct iov_iter {
> const struct bio_vec *bvec;
> struct xarray *xarray;
> struct pipe_inode_info *pipe;
> + void __user *ubuf;
> };
> union {
> unsigned long nr_segs;
> @@ -70,6 +73,11 @@ static inline void iov_iter_save_state(struct iov_iter *iter,
> state->nr_segs = iter->nr_segs;
> }
>
> +static inline bool iter_is_ubuf(const struct iov_iter *i)
> +{
> + return iov_iter_type(i) == ITER_UBUF;
> +}
> +
> static inline bool iter_is_iovec(const struct iov_iter *i)
> {
> return iov_iter_type(i) == ITER_IOVEC;
> @@ -105,6 +113,11 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i)
> return i->data_source ? WRITE : READ;
> }
>
> +static inline bool user_backed_iter(const struct iov_iter *i)
> +{
> + return i->user_backed;
> +}
> +
nit: I wonder whether this new boolean is worth it over just checking
is_iter_iovec() || is_iter_ubuf. Not a big deal though.
> /*
> * Total number of bytes covered by an iovec.
> *
> @@ -320,4 +333,17 @@ ssize_t __import_iovec(int type, const struct iovec __user *uvec,
> int import_single_range(int type, void __user *buf, size_t len,
> struct iovec *iov, struct iov_iter *i);
>
> +static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
> + void __user *buf, size_t count)
> +{
> + WARN_ON(direction & ~(READ | WRITE));
> + *i = (struct iov_iter) {
> + .iter_type = ITER_UBUF,
> + .user_backed = true,
> + .data_source = direction,
> + .ubuf = buf,
> + .count = count
> + };
> +}
> +
> #endif
> diff --git a/lib/iov_iter.c b/lib/iov_iter.c
> index 4c658a25e29c..8275b28e886b 100644
> --- a/lib/iov_iter.c
> +++ b/lib/iov_iter.c
> @@ -16,6 +16,16 @@
>
> #define PIPE_PARANOIA /* for now */
>
> +/* covers ubuf and kbuf alike */
> +#define iterate_buf(i, n, base, len, off, __p, STEP) { \
> + size_t __maybe_unused off = 0; \
> + len = n; \
> + base = __p + i->iov_offset; \
> + len -= (STEP); \
> + i->iov_offset += len; \
> + n = len; \
> +}
> +
> /* covers iovec and kvec alike */
> #define iterate_iovec(i, n, base, len, off, __p, STEP) { \
> size_t off = 0; \
> @@ -110,7 +120,12 @@ __out: \
> if (unlikely(i->count < n)) \
> n = i->count; \
> if (likely(n)) { \
> - if (likely(iter_is_iovec(i))) { \
> + if (likely(iter_is_ubuf(i))) { \
> + void __user *base; \
> + size_t len; \
> + iterate_buf(i, n, base, len, off, \
> + i->ubuf, (I)) \
> + } else if (likely(iter_is_iovec(i))) { \
> const struct iovec *iov = i->iov; \
> void __user *base; \
> size_t len; \
> @@ -275,7 +290,11 @@ static size_t copy_page_to_iter_pipe(struct page *page, size_t offset, size_t by
> */
> size_t fault_in_iov_iter_readable(const struct iov_iter *i, size_t size)
> {
> - if (iter_is_iovec(i)) {
> + if (iter_is_ubuf(i)) {
> + size_t n = min(size, iov_iter_count(i));
> + n -= fault_in_readable(i->ubuf + i->iov_offset, n);
> + return size - n;
> + } else if (iter_is_iovec(i)) {
> size_t count = min(size, iov_iter_count(i));
> const struct iovec *p;
> size_t skip;
> @@ -314,7 +333,11 @@ EXPORT_SYMBOL(fault_in_iov_iter_readable);
> */
> size_t fault_in_iov_iter_writeable(const struct iov_iter *i, size_t size)
> {
> - if (iter_is_iovec(i)) {
> + if (iter_is_ubuf(i)) {
> + size_t n = min(size, iov_iter_count(i));
> + n -= fault_in_safe_writeable(i->ubuf + i->iov_offset, n);
> + return size - n;
> + } else if (iter_is_iovec(i)) {
> size_t count = min(size, iov_iter_count(i));
> const struct iovec *p;
> size_t skip;
> @@ -345,6 +368,7 @@ void iov_iter_init(struct iov_iter *i, unsigned int direction,
> *i = (struct iov_iter) {
> .iter_type = ITER_IOVEC,
> .nofault = false,
> + .user_backed = true,
> .data_source = direction,
> .iov = iov,
> .nr_segs = nr_segs,
> @@ -494,7 +518,7 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
> {
> if (unlikely(iov_iter_is_pipe(i)))
> return copy_pipe_to_iter(addr, bytes, i);
> - if (iter_is_iovec(i))
> + if (user_backed_iter(i))
> might_fault();
> iterate_and_advance(i, bytes, base, len, off,
> copyout(base, addr + off, len),
> @@ -576,7 +600,7 @@ size_t _copy_mc_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
> {
> if (unlikely(iov_iter_is_pipe(i)))
> return copy_mc_pipe_to_iter(addr, bytes, i);
> - if (iter_is_iovec(i))
> + if (user_backed_iter(i))
> might_fault();
> __iterate_and_advance(i, bytes, base, len, off,
> copyout_mc(base, addr + off, len),
> @@ -594,7 +618,7 @@ size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
> WARN_ON(1);
> return 0;
> }
> - if (iter_is_iovec(i))
> + if (user_backed_iter(i))
> might_fault();
> iterate_and_advance(i, bytes, base, len, off,
> copyin(addr + off, base, len),
> @@ -882,16 +906,16 @@ void iov_iter_advance(struct iov_iter *i, size_t size)
> {
> if (unlikely(i->count < size))
> size = i->count;
> - if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
> + if (likely(iter_is_ubuf(i)) || unlikely(iov_iter_is_xarray(i))) {
> + i->iov_offset += size;
> + i->count -= size;
> + } else if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i))) {
> /* iovec and kvec have identical layouts */
> iov_iter_iovec_advance(i, size);
> } else if (iov_iter_is_bvec(i)) {
> iov_iter_bvec_advance(i, size);
> } else if (iov_iter_is_pipe(i)) {
> pipe_advance(i, size);
> - } else if (unlikely(iov_iter_is_xarray(i))) {
> - i->iov_offset += size;
> - i->count -= size;
> } else if (iov_iter_is_discard(i)) {
> i->count -= size;
> }
> @@ -938,7 +962,7 @@ void iov_iter_revert(struct iov_iter *i, size_t unroll)
> return;
> }
> unroll -= i->iov_offset;
> - if (iov_iter_is_xarray(i)) {
> + if (iov_iter_is_xarray(i) || iter_is_ubuf(i)) {
> BUG(); /* We should never go beyond the start of the specified
> * range since we might then be straying into pages that
> * aren't pinned.
> @@ -1129,6 +1153,13 @@ static unsigned long iov_iter_alignment_bvec(const struct iov_iter *i)
>
> unsigned long iov_iter_alignment(const struct iov_iter *i)
> {
> + if (likely(iter_is_ubuf(i))) {
> + size_t size = i->count;
> + if (size)
> + return ((unsigned long)i->ubuf + i->iov_offset) | size;
> + return 0;
> + }
> +
> /* iovec and kvec have identical layouts */
> if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
> return iov_iter_alignment_iovec(i);
> @@ -1159,6 +1190,9 @@ unsigned long iov_iter_gap_alignment(const struct iov_iter *i)
> size_t size = i->count;
> unsigned k;
>
> + if (iter_is_ubuf(i))
> + return 0;
> +
> if (WARN_ON(!iter_is_iovec(i)))
> return ~0U;
>
> @@ -1287,7 +1321,19 @@ static ssize_t iter_xarray_get_pages(struct iov_iter *i,
> return actual;
> }
>
> -/* must be done on non-empty ITER_IOVEC one */
> +static unsigned long found_ubuf_segment(unsigned long addr,
> + size_t len,
> + size_t *size, size_t *start,
> + unsigned maxpages)
> +{
> + len += (*start = addr % PAGE_SIZE);
> + if (len > maxpages * PAGE_SIZE)
> + len = maxpages * PAGE_SIZE;
> + *size = len;
> + return addr & PAGE_MASK;
> +}
> +
> +/* must be done on non-empty ITER_UBUF or ITER_IOVEC one */
> static unsigned long first_iovec_segment(const struct iov_iter *i,
> size_t *size, size_t *start,
> size_t maxsize, unsigned maxpages)
> @@ -1295,6 +1341,11 @@ static unsigned long first_iovec_segment(const struct iov_iter *i,
> size_t skip;
> long k;
>
> + if (iter_is_ubuf(i)) {
> + unsigned long addr = (unsigned long)i->ubuf + i->iov_offset;
> + return found_ubuf_segment(addr, maxsize, size, start, maxpages);
> + }
> +
> for (k = 0, skip = i->iov_offset; k < i->nr_segs; k++, skip = 0) {
> unsigned long addr = (unsigned long)i->iov[k].iov_base + skip;
> size_t len = i->iov[k].iov_len - skip;
> @@ -1303,11 +1354,7 @@ static unsigned long first_iovec_segment(const struct iov_iter *i,
> continue;
> if (len > maxsize)
> len = maxsize;
> - len += (*start = addr % PAGE_SIZE);
> - if (len > maxpages * PAGE_SIZE)
> - len = maxpages * PAGE_SIZE;
> - *size = len;
> - return addr & PAGE_MASK;
> + return found_ubuf_segment(addr, len, size, start, maxpages);
> }
> BUG(); // if it had been empty, we wouldn't get called
> }
> @@ -1344,7 +1391,7 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
> if (!maxsize)
> return 0;
>
> - if (likely(iter_is_iovec(i))) {
> + if (likely(user_backed_iter(i))) {
> unsigned int gup_flags = 0;
> unsigned long addr;
>
> @@ -1470,7 +1517,7 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
> if (!maxsize)
> return 0;
>
> - if (likely(iter_is_iovec(i))) {
> + if (likely(user_backed_iter(i))) {
> unsigned int gup_flags = 0;
> unsigned long addr;
>
> @@ -1624,6 +1671,11 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
> {
> if (unlikely(!i->count))
> return 0;
> + if (likely(iter_is_ubuf(i))) {
> + unsigned offs = offset_in_page(i->ubuf + i->iov_offset);
> + int npages = DIV_ROUND_UP(offs + i->count, PAGE_SIZE);
> + return min(npages, maxpages);
> + }
> /* iovec and kvec have identical layouts */
> if (likely(iter_is_iovec(i) || iov_iter_is_kvec(i)))
> return iov_npages(i, maxpages);
> @@ -1862,10 +1914,12 @@ EXPORT_SYMBOL(import_single_range);
> void iov_iter_restore(struct iov_iter *i, struct iov_iter_state *state)
> {
> if (WARN_ON_ONCE(!iov_iter_is_bvec(i) && !iter_is_iovec(i)) &&
> - !iov_iter_is_kvec(i))
> + !iov_iter_is_kvec(i) && !iter_is_ubuf(i))
> return;
> i->iov_offset = state->iov_offset;
> i->count = state->count;
> + if (iter_is_ubuf(i))
> + return;
> /*
> * For the *vec iters, nr_segs + iov is constant - if we increment
> * the vec, then we also decrement the nr_segs count. Hence we don't
> diff --git a/mm/shmem.c b/mm/shmem.c
> index a6f565308133..6b83f3971795 100644
> --- a/mm/shmem.c
> +++ b/mm/shmem.c
> @@ -2603,7 +2603,7 @@ static ssize_t shmem_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
> ret = copy_page_to_iter(page, offset, nr, to);
> put_page(page);
>
> - } else if (iter_is_iovec(to)) {
> + } else if (!user_backed_iter(to)) {
> /*
> * Copy to user tends to be so well optimized, but
> * clear_user() not so much, that it is noticeably
The code looks reasonable but is there any real benefit here? It seems
like the only user of it so far is new_sync_{read,write}, and both seem
to just use it to avoid allocating a single iovec on the stack.
--
Jeff Layton <jlayton@kernel.org>
next prev parent reply other threads:[~2022-06-27 18:47 UTC|newest]
Thread overview: 118+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-06-22 4:10 [RFC][CFT][PATCHSET] iov_iter stuff Al Viro
2022-06-22 4:15 ` [PATCH 01/44] 9p: handling Rerror without copy_from_iter_full() Al Viro
2022-06-22 4:15 ` [PATCH 02/44] No need of likely/unlikely on calls of check_copy_size() Al Viro
2022-06-22 4:15 ` [PATCH 03/44] teach iomap_dio_rw() to suppress dsync Al Viro
2022-06-22 4:15 ` [PATCH 04/44] btrfs: use IOMAP_DIO_NOSYNC Al Viro
2022-06-22 4:15 ` [PATCH 05/44] struct file: use anonymous union member for rcuhead and llist Al Viro
2022-06-22 4:15 ` [PATCH 06/44] iocb: delay evaluation of IS_SYNC(...) until we want to check IOCB_DSYNC Al Viro
2022-06-22 4:15 ` [PATCH 07/44] keep iocb_flags() result cached in struct file Al Viro
2022-06-22 4:15 ` [PATCH 08/44] copy_page_{to,from}_iter(): switch iovec variants to generic Al Viro
2022-06-27 18:31 ` Jeff Layton
2022-06-28 12:32 ` Christian Brauner
2022-06-28 18:36 ` Al Viro
2022-06-22 4:15 ` [PATCH 09/44] new iov_iter flavour - ITER_UBUF Al Viro
2022-06-27 18:47 ` Jeff Layton [this message]
2022-06-28 18:41 ` Al Viro
2022-06-28 12:38 ` Christian Brauner
2022-06-28 18:44 ` Al Viro
2022-07-28 9:55 ` [PATCH 9/44] " Alexander Gordeev
2022-07-29 17:21 ` Al Viro
2022-07-29 21:12 ` Alexander Gordeev
2022-07-30 0:03 ` Al Viro
2022-06-22 4:15 ` [PATCH 10/44] switch new_sync_{read,write}() to ITER_UBUF Al Viro
2022-06-22 4:15 ` [PATCH 11/44] iov_iter_bvec_advance(): don't bother with bvec_iter Al Viro
2022-06-27 18:48 ` Jeff Layton
2022-06-28 12:40 ` Christian Brauner
2022-06-22 4:15 ` [PATCH 12/44] fix short copy handling in copy_mc_pipe_to_iter() Al Viro
2022-06-27 19:15 ` Jeff Layton
2022-06-28 12:42 ` Christian Brauner
2022-06-22 4:15 ` [PATCH 13/44] splice: stop abusing iov_iter_advance() to flush a pipe Al Viro
2022-06-27 19:17 ` Jeff Layton
2022-06-28 12:43 ` Christian Brauner
2022-06-22 4:15 ` [PATCH 14/44] ITER_PIPE: helper for getting pipe buffer by index Al Viro
2022-06-28 10:38 ` Jeff Layton
2022-06-28 12:45 ` Christian Brauner
2022-06-22 4:15 ` [PATCH 15/44] ITER_PIPE: helpers for adding pipe buffers Al Viro
2022-06-28 11:32 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 16/44] ITER_PIPE: allocate buffers as we go in copy-to-pipe primitives Al Viro
2022-06-22 4:15 ` [PATCH 17/44] ITER_PIPE: fold push_pipe() into __pipe_get_pages() Al Viro
2022-06-22 4:15 ` [PATCH 18/44] ITER_PIPE: lose iter_head argument of __pipe_get_pages() Al Viro
2022-06-22 4:15 ` [PATCH 19/44] ITER_PIPE: clean pipe_advance() up Al Viro
2022-06-22 4:15 ` [PATCH 20/44] ITER_PIPE: clean iov_iter_revert() Al Viro
2022-06-22 4:15 ` [PATCH 21/44] ITER_PIPE: cache the type of last buffer Al Viro
2022-06-22 4:15 ` [PATCH 22/44] ITER_PIPE: fold data_start() and pipe_space_for_user() together Al Viro
2022-06-22 4:15 ` [PATCH 23/44] iov_iter_get_pages{,_alloc}(): cap the maxsize with MAX_RW_COUNT Al Viro
2022-06-28 11:41 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 24/44] iov_iter_get_pages_alloc(): lift freeing pages array on failure exits into wrapper Al Viro
2022-06-28 11:45 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 25/44] iov_iter_get_pages(): sanity-check arguments Al Viro
2022-06-28 11:47 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 26/44] unify pipe_get_pages() and pipe_get_pages_alloc() Al Viro
2022-06-28 11:49 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 27/44] unify xarray_get_pages() and xarray_get_pages_alloc() Al Viro
2022-06-28 11:50 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 28/44] unify the rest of iov_iter_get_pages()/iov_iter_get_pages_alloc() guts Al Viro
2022-06-28 11:54 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 29/44] ITER_XARRAY: don't open-code DIV_ROUND_UP() Al Viro
2022-06-28 11:54 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 30/44] iov_iter: lift dealing with maxpages out of first_{iovec,bvec}_segment() Al Viro
2022-06-28 11:56 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 31/44] iov_iter: first_{iovec,bvec}_segment() - simplify a bit Al Viro
2022-06-28 11:58 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 32/44] iov_iter: massage calling conventions for first_{iovec,bvec}_segment() Al Viro
2022-06-28 12:06 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 33/44] found_iovec_segment(): just return address Al Viro
2022-06-28 12:09 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 34/44] fold __pipe_get_pages() into pipe_get_pages() Al Viro
2022-06-28 12:11 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 35/44] iov_iter: saner helper for page array allocation Al Viro
2022-06-28 12:12 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 36/44] iov_iter: advancing variants of iov_iter_get_pages{,_alloc}() Al Viro
2022-06-28 12:13 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 37/44] block: convert to " Al Viro
2022-06-28 12:16 ` Jeff Layton
2022-06-30 22:11 ` [block.git conflicts] " Al Viro
2022-06-30 22:39 ` Al Viro
2022-07-01 2:07 ` Keith Busch
2022-07-01 17:40 ` Al Viro
2022-07-01 17:53 ` Keith Busch
2022-07-01 18:07 ` Al Viro
2022-07-01 18:12 ` Al Viro
2022-07-01 18:38 ` Keith Busch
2022-07-01 19:08 ` Al Viro
2022-07-01 19:28 ` Keith Busch
2022-07-01 19:43 ` Al Viro
2022-07-01 19:56 ` Keith Busch
2022-07-02 5:35 ` Al Viro
2022-07-02 21:02 ` Keith Busch
2022-07-01 19:05 ` Keith Busch
2022-07-01 21:30 ` Jens Axboe
2022-06-30 23:07 ` Jens Axboe
2022-07-10 18:04 ` Sedat Dilek
2022-06-22 4:15 ` [PATCH 38/44] iter_to_pipe(): switch to advancing variant of iov_iter_get_pages() Al Viro
2022-06-28 12:18 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 39/44] af_alg_make_sg(): " Al Viro
2022-06-28 12:18 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 40/44] 9p: convert to advancing variant of iov_iter_get_pages_alloc() Al Viro
2022-07-01 9:01 ` Dominique Martinet
2022-07-01 13:47 ` Christian Schoenebeck
2022-07-06 22:06 ` Christian Schoenebeck
2022-06-22 4:15 ` [PATCH 41/44] ceph: switch the last caller " Al Viro
2022-06-28 12:20 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 42/44] get rid of non-advancing variants Al Viro
2022-06-28 12:21 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 43/44] pipe_get_pages(): switch to append_pipe() Al Viro
2022-06-28 12:23 ` Jeff Layton
2022-06-22 4:15 ` [PATCH 44/44] expand those iov_iter_advance() Al Viro
2022-06-28 12:23 ` Jeff Layton
2022-07-01 6:21 ` [PATCH 01/44] 9p: handling Rerror without copy_from_iter_full() Dominique Martinet
2022-07-01 6:25 ` Dominique Martinet
2022-07-01 16:02 ` Christian Schoenebeck
2022-07-01 21:00 ` Dominique Martinet
2022-07-03 13:30 ` Christian Schoenebeck
2022-08-01 12:42 ` [PATCH 09/44] new iov_iter flavour - ITER_UBUF David Howells
2022-08-01 21:14 ` Al Viro
2022-08-01 22:54 ` David Howells
2022-06-23 15:21 ` [RFC][CFT][PATCHSET] iov_iter stuff David Howells
2022-06-23 20:32 ` Al Viro
2022-06-28 12:25 ` Jeff Layton
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=07ad7be25bab03c164bbd1f2d2264c9e6f79b70d.camel@kernel.org \
--to=jlayton@kernel.org \
--cc=asmadeus@codewreck.org \
--cc=axboe@kernel.dk \
--cc=brauner@kernel.org \
--cc=dhowells@redhat.com \
--cc=hch@lst.de \
--cc=linux-fsdevel@vger.kernel.org \
--cc=torvalds@linux-foundation.org \
--cc=viro@zeniv.linux.org.uk \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).