All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jens Axboe <axboe@kernel.dk>
To: Jann Horn <jannh@google.com>
Cc: linux-aio@kvack.org, linux-block@vger.kernel.org,
	Linux API <linux-api@vger.kernel.org>,
	hch@lst.de, jmoyer@redhat.com, Avi Kivity <avi@scylladb.com>,
	Al Viro <viro@zeniv.linux.org.uk>
Subject: Re: [PATCH 12/19] io_uring: add support for pre-mapped user IO buffers
Date: Fri, 22 Feb 2019 15:29:15 -0700	[thread overview]
Message-ID: <ab313a80-deee-d044-9447-72977110cf2d@kernel.dk> (raw)
In-Reply-To: <CAG48ez2jrdFEATYor3-tRUx=LBpXo9581tPCWUgjWf6atMiGzA@mail.gmail.com>

On 2/19/19 12:08 PM, Jann Horn wrote:
> On Mon, Feb 11, 2019 at 8:01 PM Jens Axboe <axboe@kernel.dk> wrote:
>> If we have fixed user buffers, we can map them into the kernel when we
>> setup the io_uring. That avoids the need to do get_user_pages() for
>> each and every IO.
>>
>> To utilize this feature, the application must call io_uring_register()
>> after having setup an io_uring instance, passing in
>> IORING_REGISTER_BUFFERS as the opcode. The argument must be a pointer to
>> an iovec array, and the nr_args should contain how many iovecs the
>> application wishes to map.
>>
>> If successful, these buffers are now mapped into the kernel, eligible
>> for IO. To use these fixed buffers, the application must use the
>> IORING_OP_READ_FIXED and IORING_OP_WRITE_FIXED opcodes, and then
>> set sqe->index to the desired buffer index. sqe->addr..sqe->addr+seq->len
>> must point to somewhere inside the indexed buffer.
>>
>> The application may register buffers throughout the lifetime of the
>> io_uring instance. It can call io_uring_register() with
>> IORING_UNREGISTER_BUFFERS as the opcode to unregister the current set of
>> buffers, and then register a new set. The application need not
>> unregister buffers explicitly before shutting down the io_uring
>> instance.
>>
>> It's perfectly valid to setup a larger buffer, and then sometimes only
>> use parts of it for an IO. As long as the range is within the originally
>> mapped region, it will work just fine.
>>
>> For now, buffers must not be file backed. If file backed buffers are
>> passed in, the registration will fail with -1/EOPNOTSUPP. This
>> restriction may be relaxed in the future.
>>
>> RLIMIT_MEMLOCK is used to check how much memory we can pin. A somewhat
>> arbitrary 1G per buffer size is also imposed.
>>
>> Reviewed-by: Hannes Reinecke <hare@suse.com>
>> Signed-off-by: Jens Axboe <axboe@kernel.dk>
>> ---
> [...]
>>  static void io_sq_wq_submit_work(struct work_struct *work)
>>  {
>>         struct io_kiocb *req = container_of(work, struct io_kiocb, work);
>>         struct sqe_submit *s = &req->submit;
>>         const struct io_uring_sqe *sqe = s->sqe;
>>         struct io_ring_ctx *ctx = req->ctx;
>> -       mm_segment_t old_fs = get_fs();
>> +       mm_segment_t old_fs;
>> +       bool needs_user;
>>         int ret;
>>
>>          /* Ensure we clear previously set forced non-block flag */
>>         req->flags &= ~REQ_F_FORCE_NONBLOCK;
>>         req->rw.ki_flags &= ~IOCB_NOWAIT;
>>
>> -       if (!mmget_not_zero(ctx->sqo_mm)) {
>> -               ret = -EFAULT;
>> -               goto err;
>> -       }
>> -
>> -       use_mm(ctx->sqo_mm);
>> -       set_fs(USER_DS);
>> -       s->has_user = true;
>>         s->needs_lock = true;
>> +       s->has_user = false;
>> +
>> +       /*
>> +        * If we're doing IO to fixed buffers, we don't need to get/set
>> +        * user context
>> +        */
>> +       needs_user = io_sqe_needs_user(s->sqe);
>> +       if (needs_user) {
>> +               if (!mmget_not_zero(ctx->sqo_mm)) {
>> +                       ret = -EFAULT;
>> +                       goto err;
>> +               }
>> +               use_mm(ctx->sqo_mm);
>> +               old_fs = get_fs();
>> +               set_fs(USER_DS);
>> +               s->has_user = true;
>> +       }
>>
>>         do {
>>                 ret = __io_submit_sqe(ctx, req, s, false, NULL);
>> @@ -1011,9 +1110,11 @@ static void io_sq_wq_submit_work(struct work_struct *work)
>>                 cond_resched();
>>         } while (1);
>>
>> -       set_fs(old_fs);
>> -       unuse_mm(ctx->sqo_mm);
>> -       mmput(ctx->sqo_mm);
>> +       if (needs_user) {
>> +               set_fs(old_fs);
>> +               unuse_mm(ctx->sqo_mm);
>> +               mmput(ctx->sqo_mm);
>> +       }
>>  err:
>>         if (ret) {
>>                 io_cqring_add_event(ctx, sqe->user_data, ret, 0);
>> @@ -1308,6 +1409,197 @@ static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
>>         return (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
>>  }
>>
>> +static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
>> +{
>> +       int i, j;
>> +
>> +       if (!ctx->user_bufs)
>> +               return -ENXIO;
>> +
>> +       for (i = 0; i < ctx->nr_user_bufs; i++) {
>> +               struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
>> +
>> +               for (j = 0; j < imu->nr_bvecs; j++)
>> +                       put_page(imu->bvec[j].bv_page);
>> +
>> +               if (ctx->account_mem)
>> +                       io_unaccount_mem(ctx->user, imu->nr_bvecs);
>> +               kfree(imu->bvec);
>> +               imu->nr_bvecs = 0;
>> +       }
>> +
>> +       kfree(ctx->user_bufs);
>> +       ctx->user_bufs = NULL;
>> +       ctx->nr_user_bufs = 0;
>> +       return 0;
>> +}
> [...]
>> +static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
>> +                                 unsigned nr_args)
>> +{
>> +       struct vm_area_struct **vmas = NULL;
>> +       struct page **pages = NULL;
>> +       int i, j, got_pages = 0;
>> +       int ret = -EINVAL;
>> +
>> +       if (ctx->user_bufs)
>> +               return -EBUSY;
>> +       if (!nr_args || nr_args > UIO_MAXIOV)
>> +               return -EINVAL;
>> +
>> +       ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
>> +                                       GFP_KERNEL);
>> +       if (!ctx->user_bufs)
>> +               return -ENOMEM;
>> +
>> +       for (i = 0; i < nr_args; i++) {
>> +               struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
>> +               unsigned long off, start, end, ubuf;
>> +               int pret, nr_pages;
>> +               struct iovec iov;
>> +               size_t size;
>> +
>> +               ret = io_copy_iov(ctx, &iov, arg, i);
>> +               if (ret)
>> +                       break;
>> +
>> +               /*
>> +                * Don't impose further limits on the size and buffer
>> +                * constraints here, we'll -EINVAL later when IO is
>> +                * submitted if they are wrong.
>> +                */
>> +               ret = -EFAULT;
>> +               if (!iov.iov_base || !iov.iov_len)
>> +                       goto err;
>> +
>> +               /* arbitrary limit, but we need something */
>> +               if (iov.iov_len > SZ_1G)
>> +                       goto err;
>> +
>> +               ubuf = (unsigned long) iov.iov_base;
>> +               end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
>> +               start = ubuf >> PAGE_SHIFT;
>> +               nr_pages = end - start;
>> +
>> +               if (ctx->account_mem) {
>> +                       ret = io_account_mem(ctx->user, nr_pages);
>> +                       if (ret)
>> +                               goto err;
>> +               }
>> +
>> +               ret = 0;
>> +               if (!pages || nr_pages > got_pages) {
> 
> Nit: No need to check for `!pages` as long as `pages` and `got_pages`
> are synchronized (which guarantees that `!pages` implies
> `got_pages==0`).

I just prefer it that way, less confusion and past history this always
confuses the compiler and then we have to deal with a bogus warning.

>> +                       kfree(vmas);
>> +                       kfree(pages);
>> +                       pages = kmalloc_array(nr_pages, sizeof(struct page *),
>> +                                               GFP_KERNEL);
>> +                       vmas = kmalloc_array(nr_pages,
>> +                                       sizeof(struct vma_area_struct *),
> 
> typo: s/vma_area_struct/vm_area_struct/

Fixed, thanks.

>> +                                       GFP_KERNEL);
>> +                       if (!pages || !vmas) {
>> +                               ret = -ENOMEM;
>> +                               if (ctx->account_mem)
>> +                                       io_unaccount_mem(ctx->user, nr_pages);
>> +                               goto err;
>> +                       }
>> +                       got_pages = nr_pages;
>> +               }
>> +
>> +               imu->bvec = kmalloc_array(nr_pages, sizeof(struct bio_vec),
>> +                                               GFP_KERNEL);
>> +               ret = -ENOMEM;
>> +               if (!imu->bvec) {
>> +                       if (ctx->account_mem)
>> +                               io_unaccount_mem(ctx->user, nr_pages);
>> +                       goto err;
>> +               }
>> +
>> +               ret = 0;
>> +               down_read(&current->mm->mmap_sem);
>> +               pret = get_user_pages_longterm(ubuf, nr_pages, FOLL_WRITE,
>> +                                               pages, vmas);
>> +               if (pret == nr_pages) {
>> +                       /* don't support file backed memory */
>> +                       for (j = 0; j < nr_pages; j++) {
>> +                               struct vm_area_struct *vma = vmas[j];
>> +
>> +                               if (vma->vm_file &&
>> +                                   !is_file_hugepages(vma->vm_file)) {
>> +                                       ret = -EOPNOTSUPP;
>> +                                       break;
>> +                               }
>> +                       }
>> +               } else {
>> +                       ret = pret < 0 ? pret : -EFAULT;
>> +               }
>> +               up_read(&current->mm->mmap_sem);
>> +               if (ret) {
>> +                       /*
>> +                        * if we did partial map, or found file backed vmas,
>> +                        * release any pages we did get
>> +                        */
>> +                       if (pret > 0) {
>> +                               for (j = 0; j < pret; j++)
>> +                                       put_page(pages[j]);
>> +                       }
>> +                       if (ctx->account_mem)
>> +                               io_unaccount_mem(ctx->user, nr_pages);
>> +                       goto err;
>> +               }
>> +
>> +               off = ubuf & ~PAGE_MASK;
>> +               size = iov.iov_len;
>> +               for (j = 0; j < nr_pages; j++) {
>> +                       size_t vec_len;
>> +
>> +                       vec_len = min_t(size_t, size, PAGE_SIZE - off);
>> +                       imu->bvec[j].bv_page = pages[j];
>> +                       imu->bvec[j].bv_len = vec_len;
>> +                       imu->bvec[j].bv_offset = off;
>> +                       off = 0;
>> +                       size -= vec_len;
>> +               }
>> +               /* store original address for later verification */
>> +               imu->ubuf = ubuf;
>> +               imu->len = iov.iov_len;
>> +               imu->nr_bvecs = nr_pages;
>> +       }
>> +       kfree(pages);
>> +       kfree(vmas);
>> +       ctx->nr_user_bufs = nr_args;
>> +       return 0;
>> +err:
>> +       kfree(pages);
>> +       kfree(vmas);
>> +       io_sqe_buffer_unregister(ctx);
> 
> io_sqe_buffer_unregister() gets rid of elements up to
> ctx->nr_user_bufs, but as far as I can tell, ctx->nr_user_bufs is
> always zero here. I think that's going to cause a reference leak.

Fixed, thanks.

-- 
Jens Axboe


WARNING: multiple messages have this Message-ID (diff)
From: Jens Axboe <axboe@kernel.dk>
To: Jann Horn <jannh@google.com>
Cc: linux-aio@kvack.org, linux-block@vger.kernel.org,
	Linux API <linux-api@vger.kernel.org>,
	hch@lst.de, jmoyer@redhat.com, Avi Kivity <avi@scylladb.com>,
	Al Viro <viro@zeniv.linux.org.uk>
Subject: Re: [PATCH 12/19] io_uring: add support for pre-mapped user IO buffers
Date: Fri, 22 Feb 2019 15:29:15 -0700	[thread overview]
Message-ID: <ab313a80-deee-d044-9447-72977110cf2d@kernel.dk> (raw)
In-Reply-To: <CAG48ez2jrdFEATYor3-tRUx=LBpXo9581tPCWUgjWf6atMiGzA@mail.gmail.com>

On 2/19/19 12:08 PM, Jann Horn wrote:
> On Mon, Feb 11, 2019 at 8:01 PM Jens Axboe <axboe@kernel.dk> wrote:
>> If we have fixed user buffers, we can map them into the kernel when we
>> setup the io_uring. That avoids the need to do get_user_pages() for
>> each and every IO.
>>
>> To utilize this feature, the application must call io_uring_register()
>> after having setup an io_uring instance, passing in
>> IORING_REGISTER_BUFFERS as the opcode. The argument must be a pointer to
>> an iovec array, and the nr_args should contain how many iovecs the
>> application wishes to map.
>>
>> If successful, these buffers are now mapped into the kernel, eligible
>> for IO. To use these fixed buffers, the application must use the
>> IORING_OP_READ_FIXED and IORING_OP_WRITE_FIXED opcodes, and then
>> set sqe->index to the desired buffer index. sqe->addr..sqe->addr+seq->len
>> must point to somewhere inside the indexed buffer.
>>
>> The application may register buffers throughout the lifetime of the
>> io_uring instance. It can call io_uring_register() with
>> IORING_UNREGISTER_BUFFERS as the opcode to unregister the current set of
>> buffers, and then register a new set. The application need not
>> unregister buffers explicitly before shutting down the io_uring
>> instance.
>>
>> It's perfectly valid to setup a larger buffer, and then sometimes only
>> use parts of it for an IO. As long as the range is within the originally
>> mapped region, it will work just fine.
>>
>> For now, buffers must not be file backed. If file backed buffers are
>> passed in, the registration will fail with -1/EOPNOTSUPP. This
>> restriction may be relaxed in the future.
>>
>> RLIMIT_MEMLOCK is used to check how much memory we can pin. A somewhat
>> arbitrary 1G per buffer size is also imposed.
>>
>> Reviewed-by: Hannes Reinecke <hare@suse.com>
>> Signed-off-by: Jens Axboe <axboe@kernel.dk>
>> ---
> [...]
>>  static void io_sq_wq_submit_work(struct work_struct *work)
>>  {
>>         struct io_kiocb *req = container_of(work, struct io_kiocb, work);
>>         struct sqe_submit *s = &req->submit;
>>         const struct io_uring_sqe *sqe = s->sqe;
>>         struct io_ring_ctx *ctx = req->ctx;
>> -       mm_segment_t old_fs = get_fs();
>> +       mm_segment_t old_fs;
>> +       bool needs_user;
>>         int ret;
>>
>>          /* Ensure we clear previously set forced non-block flag */
>>         req->flags &= ~REQ_F_FORCE_NONBLOCK;
>>         req->rw.ki_flags &= ~IOCB_NOWAIT;
>>
>> -       if (!mmget_not_zero(ctx->sqo_mm)) {
>> -               ret = -EFAULT;
>> -               goto err;
>> -       }
>> -
>> -       use_mm(ctx->sqo_mm);
>> -       set_fs(USER_DS);
>> -       s->has_user = true;
>>         s->needs_lock = true;
>> +       s->has_user = false;
>> +
>> +       /*
>> +        * If we're doing IO to fixed buffers, we don't need to get/set
>> +        * user context
>> +        */
>> +       needs_user = io_sqe_needs_user(s->sqe);
>> +       if (needs_user) {
>> +               if (!mmget_not_zero(ctx->sqo_mm)) {
>> +                       ret = -EFAULT;
>> +                       goto err;
>> +               }
>> +               use_mm(ctx->sqo_mm);
>> +               old_fs = get_fs();
>> +               set_fs(USER_DS);
>> +               s->has_user = true;
>> +       }
>>
>>         do {
>>                 ret = __io_submit_sqe(ctx, req, s, false, NULL);
>> @@ -1011,9 +1110,11 @@ static void io_sq_wq_submit_work(struct work_struct *work)
>>                 cond_resched();
>>         } while (1);
>>
>> -       set_fs(old_fs);
>> -       unuse_mm(ctx->sqo_mm);
>> -       mmput(ctx->sqo_mm);
>> +       if (needs_user) {
>> +               set_fs(old_fs);
>> +               unuse_mm(ctx->sqo_mm);
>> +               mmput(ctx->sqo_mm);
>> +       }
>>  err:
>>         if (ret) {
>>                 io_cqring_add_event(ctx, sqe->user_data, ret, 0);
>> @@ -1308,6 +1409,197 @@ static unsigned long ring_pages(unsigned sq_entries, unsigned cq_entries)
>>         return (bytes + PAGE_SIZE - 1) / PAGE_SIZE;
>>  }
>>
>> +static int io_sqe_buffer_unregister(struct io_ring_ctx *ctx)
>> +{
>> +       int i, j;
>> +
>> +       if (!ctx->user_bufs)
>> +               return -ENXIO;
>> +
>> +       for (i = 0; i < ctx->nr_user_bufs; i++) {
>> +               struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
>> +
>> +               for (j = 0; j < imu->nr_bvecs; j++)
>> +                       put_page(imu->bvec[j].bv_page);
>> +
>> +               if (ctx->account_mem)
>> +                       io_unaccount_mem(ctx->user, imu->nr_bvecs);
>> +               kfree(imu->bvec);
>> +               imu->nr_bvecs = 0;
>> +       }
>> +
>> +       kfree(ctx->user_bufs);
>> +       ctx->user_bufs = NULL;
>> +       ctx->nr_user_bufs = 0;
>> +       return 0;
>> +}
> [...]
>> +static int io_sqe_buffer_register(struct io_ring_ctx *ctx, void __user *arg,
>> +                                 unsigned nr_args)
>> +{
>> +       struct vm_area_struct **vmas = NULL;
>> +       struct page **pages = NULL;
>> +       int i, j, got_pages = 0;
>> +       int ret = -EINVAL;
>> +
>> +       if (ctx->user_bufs)
>> +               return -EBUSY;
>> +       if (!nr_args || nr_args > UIO_MAXIOV)
>> +               return -EINVAL;
>> +
>> +       ctx->user_bufs = kcalloc(nr_args, sizeof(struct io_mapped_ubuf),
>> +                                       GFP_KERNEL);
>> +       if (!ctx->user_bufs)
>> +               return -ENOMEM;
>> +
>> +       for (i = 0; i < nr_args; i++) {
>> +               struct io_mapped_ubuf *imu = &ctx->user_bufs[i];
>> +               unsigned long off, start, end, ubuf;
>> +               int pret, nr_pages;
>> +               struct iovec iov;
>> +               size_t size;
>> +
>> +               ret = io_copy_iov(ctx, &iov, arg, i);
>> +               if (ret)
>> +                       break;
>> +
>> +               /*
>> +                * Don't impose further limits on the size and buffer
>> +                * constraints here, we'll -EINVAL later when IO is
>> +                * submitted if they are wrong.
>> +                */
>> +               ret = -EFAULT;
>> +               if (!iov.iov_base || !iov.iov_len)
>> +                       goto err;
>> +
>> +               /* arbitrary limit, but we need something */
>> +               if (iov.iov_len > SZ_1G)
>> +                       goto err;
>> +
>> +               ubuf = (unsigned long) iov.iov_base;
>> +               end = (ubuf + iov.iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
>> +               start = ubuf >> PAGE_SHIFT;
>> +               nr_pages = end - start;
>> +
>> +               if (ctx->account_mem) {
>> +                       ret = io_account_mem(ctx->user, nr_pages);
>> +                       if (ret)
>> +                               goto err;
>> +               }
>> +
>> +               ret = 0;
>> +               if (!pages || nr_pages > got_pages) {
> 
> Nit: No need to check for `!pages` as long as `pages` and `got_pages`
> are synchronized (which guarantees that `!pages` implies
> `got_pages==0`).

I just prefer it that way, less confusion and past history this always
confuses the compiler and then we have to deal with a bogus warning.

>> +                       kfree(vmas);
>> +                       kfree(pages);
>> +                       pages = kmalloc_array(nr_pages, sizeof(struct page *),
>> +                                               GFP_KERNEL);
>> +                       vmas = kmalloc_array(nr_pages,
>> +                                       sizeof(struct vma_area_struct *),
> 
> typo: s/vma_area_struct/vm_area_struct/

Fixed, thanks.

>> +                                       GFP_KERNEL);
>> +                       if (!pages || !vmas) {
>> +                               ret = -ENOMEM;
>> +                               if (ctx->account_mem)
>> +                                       io_unaccount_mem(ctx->user, nr_pages);
>> +                               goto err;
>> +                       }
>> +                       got_pages = nr_pages;
>> +               }
>> +
>> +               imu->bvec = kmalloc_array(nr_pages, sizeof(struct bio_vec),
>> +                                               GFP_KERNEL);
>> +               ret = -ENOMEM;
>> +               if (!imu->bvec) {
>> +                       if (ctx->account_mem)
>> +                               io_unaccount_mem(ctx->user, nr_pages);
>> +                       goto err;
>> +               }
>> +
>> +               ret = 0;
>> +               down_read(&current->mm->mmap_sem);
>> +               pret = get_user_pages_longterm(ubuf, nr_pages, FOLL_WRITE,
>> +                                               pages, vmas);
>> +               if (pret == nr_pages) {
>> +                       /* don't support file backed memory */
>> +                       for (j = 0; j < nr_pages; j++) {
>> +                               struct vm_area_struct *vma = vmas[j];
>> +
>> +                               if (vma->vm_file &&
>> +                                   !is_file_hugepages(vma->vm_file)) {
>> +                                       ret = -EOPNOTSUPP;
>> +                                       break;
>> +                               }
>> +                       }
>> +               } else {
>> +                       ret = pret < 0 ? pret : -EFAULT;
>> +               }
>> +               up_read(&current->mm->mmap_sem);
>> +               if (ret) {
>> +                       /*
>> +                        * if we did partial map, or found file backed vmas,
>> +                        * release any pages we did get
>> +                        */
>> +                       if (pret > 0) {
>> +                               for (j = 0; j < pret; j++)
>> +                                       put_page(pages[j]);
>> +                       }
>> +                       if (ctx->account_mem)
>> +                               io_unaccount_mem(ctx->user, nr_pages);
>> +                       goto err;
>> +               }
>> +
>> +               off = ubuf & ~PAGE_MASK;
>> +               size = iov.iov_len;
>> +               for (j = 0; j < nr_pages; j++) {
>> +                       size_t vec_len;
>> +
>> +                       vec_len = min_t(size_t, size, PAGE_SIZE - off);
>> +                       imu->bvec[j].bv_page = pages[j];
>> +                       imu->bvec[j].bv_len = vec_len;
>> +                       imu->bvec[j].bv_offset = off;
>> +                       off = 0;
>> +                       size -= vec_len;
>> +               }
>> +               /* store original address for later verification */
>> +               imu->ubuf = ubuf;
>> +               imu->len = iov.iov_len;
>> +               imu->nr_bvecs = nr_pages;
>> +       }
>> +       kfree(pages);
>> +       kfree(vmas);
>> +       ctx->nr_user_bufs = nr_args;
>> +       return 0;
>> +err:
>> +       kfree(pages);
>> +       kfree(vmas);
>> +       io_sqe_buffer_unregister(ctx);
> 
> io_sqe_buffer_unregister() gets rid of elements up to
> ctx->nr_user_bufs, but as far as I can tell, ctx->nr_user_bufs is
> always zero here. I think that's going to cause a reference leak.

Fixed, thanks.

-- 
Jens Axboe

--
To unsubscribe, send a message with 'unsubscribe linux-aio' in
the body to majordomo@kvack.org.  For more info on Linux AIO,
see: http://www.kvack.org/aio/
Don't email: <a href=mailto:"aart@kvack.org">aart@kvack.org</a>

  reply	other threads:[~2019-02-22 22:29 UTC|newest]

Thread overview: 121+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-02-11 19:00 [PATCHSET v15] io_uring IO interface Jens Axboe
2019-02-11 19:00 ` Jens Axboe
2019-02-11 19:00 ` [PATCH 01/19] fs: add an iopoll method to struct file_operations Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-11 19:00 ` [PATCH] io_uring: add io_uring_event cache hit information Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-11 19:00 ` [PATCH 02/19] block: wire up block device iopoll method Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-11 19:00 ` [PATCH 03/19] block: add bio_set_polled() helper Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-11 19:00 ` [PATCH 04/19] iomap: wire up the iopoll method Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-11 19:00 ` [PATCH 05/19] Add io_uring IO interface Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-11 19:00 ` [PATCH 06/19] io_uring: add fsync support Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-11 19:00 ` [PATCH 07/19] io_uring: support for IO polling Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-11 19:00 ` [PATCH 08/19] fs: add fget_many() and fput_many() Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-11 19:00 ` [PATCH 09/19] io_uring: use fget/fput_many() for file references Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-11 19:00 ` [PATCH 10/19] io_uring: batch io_kiocb allocation Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-11 19:00 ` [PATCH 11/19] block: implement bio helper to add iter bvec pages to bio Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-20 22:58   ` Ming Lei
2019-02-20 22:58     ` Ming Lei
2019-02-21 17:45     ` Jens Axboe
2019-02-21 17:45       ` Jens Axboe
2019-02-26  3:46       ` Eric Biggers
2019-02-26  3:46         ` Eric Biggers
2019-02-26  4:34         ` Jens Axboe
2019-02-26  4:34           ` Jens Axboe
2019-02-26 15:54           ` Jens Axboe
2019-02-26 15:54             ` Jens Axboe
2019-02-27  1:21             ` Ming Lei
2019-02-27  1:21               ` Ming Lei
2019-02-27  1:47               ` Jens Axboe
2019-02-27  1:47                 ` Jens Axboe
2019-02-27  1:53                 ` Ming Lei
2019-02-27  1:53                   ` Ming Lei
2019-02-27  1:57                   ` Jens Axboe
2019-02-27  1:57                     ` Jens Axboe
2019-02-27  2:03                     ` Jens Axboe
2019-02-27  2:21                     ` Ming Lei
2019-02-27  2:21                       ` Ming Lei
2019-02-27  2:28                       ` Jens Axboe
2019-02-27  2:28                         ` Jens Axboe
2019-02-27  2:37                         ` Ming Lei
2019-02-27  2:37                           ` Ming Lei
2019-02-27  2:43                           ` Jens Axboe
2019-02-27  2:43                             ` Jens Axboe
2019-02-27  3:09                             ` Ming Lei
2019-02-27  3:09                               ` Ming Lei
2019-02-27  3:37                               ` Jens Axboe
2019-02-27  3:37                                 ` Jens Axboe
2019-02-27  3:43                                 ` Jens Axboe
2019-02-27  3:43                                   ` Jens Axboe
2019-02-27  3:44                                 ` Ming Lei
2019-02-27  3:44                                   ` Ming Lei
2019-02-27  4:05                                   ` Jens Axboe
2019-02-27  4:05                                     ` Jens Axboe
2019-02-27  4:06                                     ` Jens Axboe
2019-02-27  4:06                                       ` Jens Axboe
2019-02-27 19:42                                       ` Christoph Hellwig
2019-02-27 19:42                                         ` Christoph Hellwig
2019-02-28  8:37                                         ` Ming Lei
2019-02-28  8:37                                           ` Ming Lei
2019-02-27 23:35                         ` Ming Lei
2019-02-27 23:35                           ` Ming Lei
2019-03-08  7:55                         ` Christoph Hellwig
2019-03-08  7:55                           ` Christoph Hellwig
2019-03-08  9:12                           ` Ming Lei
2019-03-08  9:12                             ` Ming Lei
2019-03-08  8:18                     ` Christoph Hellwig
2019-03-08  8:18                       ` Christoph Hellwig
2019-02-11 19:00 ` [PATCH 12/19] io_uring: add support for pre-mapped user IO buffers Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-19 19:08   ` Jann Horn
2019-02-19 19:08     ` Jann Horn
2019-02-22 22:29     ` Jens Axboe [this message]
2019-02-22 22:29       ` Jens Axboe
2019-02-11 19:00 ` [PATCH 13/19] net: split out functions related to registering inflight socket files Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-11 19:00 ` [PATCH 14/19] io_uring: add file set registration Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-19 16:12   ` Jann Horn
2019-02-19 16:12     ` Jann Horn
2019-02-22 22:29     ` Jens Axboe
2019-02-22 22:29       ` Jens Axboe
2019-02-11 19:00 ` [PATCH 15/19] io_uring: add submission polling Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-11 19:00 ` [PATCH 16/19] io_uring: add io_kiocb ref count Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-11 19:00 ` [PATCH 17/19] io_uring: add support for IORING_OP_POLL Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-11 19:00 ` [PATCH 18/19] io_uring: allow workqueue item to handle multiple buffered requests Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-11 19:00 ` [PATCH 19/19] io_uring: add io_uring_event cache hit information Jens Axboe
2019-02-11 19:00   ` Jens Axboe
2019-02-21 12:10 ` [PATCHSET v15] io_uring IO interface Marek Majkowski
2019-02-21 12:10   ` Marek Majkowski
2019-02-21 17:48   ` Jens Axboe
2019-02-21 17:48     ` Jens Axboe
2019-02-22 15:01     ` Marek Majkowski
2019-02-22 15:01       ` Marek Majkowski
2019-02-22 22:32       ` Jens Axboe
2019-02-22 22:32         ` Jens Axboe
  -- strict thread matches above, loose matches on Subject: below --
2019-02-09 21:13 [PATCHSET v14] " Jens Axboe
2019-02-09 21:13 ` [PATCH 12/19] io_uring: add support for pre-mapped user IO buffers Jens Axboe
2019-02-09 21:13   ` Jens Axboe
2019-02-08 17:34 [PATCHSET v13] io_uring IO interface Jens Axboe
2019-02-08 17:34 ` [PATCH 12/19] io_uring: add support for pre-mapped user IO buffers Jens Axboe
2019-02-08 17:34   ` Jens Axboe
2019-02-08 22:54   ` Jann Horn
2019-02-08 22:54     ` Jann Horn
2019-02-08 23:38     ` Jens Axboe
2019-02-08 23:38       ` Jens Axboe
2019-02-09 16:50       ` Jens Axboe
2019-02-09 16:50         ` Jens Axboe
2019-02-09  9:48   ` Hannes Reinecke
2019-02-09  9:48     ` Hannes Reinecke

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=ab313a80-deee-d044-9447-72977110cf2d@kernel.dk \
    --to=axboe@kernel.dk \
    --cc=avi@scylladb.com \
    --cc=hch@lst.de \
    --cc=jannh@google.com \
    --cc=jmoyer@redhat.com \
    --cc=linux-aio@kvack.org \
    --cc=linux-api@vger.kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=viro@zeniv.linux.org.uk \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.