All of lore.kernel.org
 help / color / mirror / Atom feed
* [patch,rfc] aio: allocate kiocbs in batches
@ 2011-07-05 20:35 Jeff Moyer
  2011-09-01  0:13 ` Andrew Morton
  0 siblings, 1 reply; 5+ messages in thread
From: Jeff Moyer @ 2011-07-05 20:35 UTC (permalink / raw)
  To: linux-aio, linux-fsdevel

Hi,

When looking at perf data for my aio-stress test runs against some
relatively fast storage, I noticed that __aio_get_req was taking up more
CPU than I expected.  The command line for aio-stress was:
  aio-stress -O -r 8 -d 256 -b 32 -l -s 16384 /dev/sdk
which means we're sending 32 iocbs to io_submit at a time.  Here's the
perf report -g output:

Events: 41K cycles
-      7.33%  aio-stress  [kernel.kallsyms]             [k] _raw_spin_lock_irq
   - _raw_spin_lock_irq
      + 75.38% scsi_request_fn
      - 10.67% __aio_get_req
           io_submit_one

The spinlock is the ctx_lock, taken when allocating the request.  This
seemed like pretty low-hanging fruit to me, so I cooked up a patch to
allocate 32 kiocbs at a time.  This is the result:

Events: 50K cycles
-      5.54%  aio-stress  [kernel.kallsyms]             [k] _raw_spin_lock_irq
   - _raw_spin_lock_irq
      + 83.23% scsi_request_fn
      - 5.14% io_submit_one

So, aio-stress now takes up ~2% less CPU.  This seems like a worth-while
thing to do, but I don't have a system handy that would show any real
performance gains from it.  The higher the number of iocbs passed to
io_submit, the more of a win this will be.

FWIW, I tested this with multiple different batch sizes (1, 32, 33, 64,
256) and it didn't blow up.

Comments?

Cheers,
Jeff

Signed-off-by: Jeff Moyer <jmoyer@redhat.com>

diff --git a/fs/aio.c b/fs/aio.c
index e29ec48..ae6066f 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -440,8 +440,6 @@ void exit_aio(struct mm_struct *mm)
 static struct kiocb *__aio_get_req(struct kioctx *ctx)
 {
 	struct kiocb *req = NULL;
-	struct aio_ring *ring;
-	int okay = 0;
 
 	req = kmem_cache_alloc(kiocb_cachep, GFP_KERNEL);
 	if (unlikely(!req))
@@ -459,40 +457,107 @@ static struct kiocb *__aio_get_req(struct kioctx *ctx)
 	INIT_LIST_HEAD(&req->ki_run_list);
 	req->ki_eventfd = NULL;
 
-	/* Check if the completion queue has enough free space to
-	 * accept an event from this io.
+	return req;
+}
+
+#define KIOCB_STASH_SIZE	32
+struct kiocb_stash {
+	unsigned head;
+	unsigned tail;
+	unsigned total;
+	unsigned used;
+	struct kiocb *kiocbs[KIOCB_STASH_SIZE];
+};
+static void kiocb_stash_init(struct kiocb_stash *stash, int nr)
+{
+	stash->head = stash->tail = 0;
+	stash->total = nr;
+	stash->used = 0;
+}
+static void kiocb_stash_free(struct kiocb_stash *stash, int used)
+{
+	int i;
+
+	for (i = used; i < stash->total; i++)
+		kmem_cache_free(kiocb_cachep, stash->kiocbs[i]);
+}
+static inline unsigned kiocb_stash_avail(struct kiocb_stash *stash)
+{
+	return stash->tail - stash->head;
+}
+
+/*
+ * Allocate nr kiocbs.  This aovids taking and dropping the lock a
+ * lot during setup.
+ */
+static int kiocb_stash_refill(struct kioctx *ctx, struct kiocb_stash *stash)
+{
+	int i, nr = KIOCB_STASH_SIZE;
+	int avail;
+	int called_fput = 0;
+	struct aio_ring *ring;
+
+	/*
+	 * Allocate the requests up-front.  Later, we'll trim the list
+	 * if there isn't enough room in the completion ring.
 	 */
+	stash->head = 0;
+	if ((stash->total - stash->used) % KIOCB_STASH_SIZE)
+		/* don't need to allocate a full batch */
+		nr = stash->total - stash->used;
+
+	for (i = 0; i < nr; i++) {
+		stash->kiocbs[i] = __aio_get_req(ctx);
+		if (!stash->kiocbs[i] && !called_fput) {
+			/* Handle a potential starvation case --
+			 * should be exceedingly rare as requests will
+			 * be stuck on fput_head only if the
+			 * aio_fput_routine is delayed and the
+			 * requests were the last user of the struct
+			 * file.
+			 */
+			aio_fput_routine(NULL);
+			called_fput = 1;
+			i--;
+			continue;
+		}
+	}
+	stash->tail = i;
+
 	spin_lock_irq(&ctx->ctx_lock);
 	ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0);
-	if (ctx->reqs_active < aio_ring_avail(&ctx->ring_info, ring)) {
-		list_add(&req->ki_list, &ctx->active_reqs);
+
+	avail = aio_ring_avail(&ctx->ring_info, ring) + ctx->reqs_active;
+	if (avail < stash->tail) {
+		/* Trim back the number of requests. */
+		for (i = avail; i < stash->tail; i++)
+			kmem_cache_free(kiocb_cachep, stash->kiocbs[i]);
+		stash->tail = avail;
+	}
+	for (i = 0; i < stash->tail; i++) {
+		list_add(&stash->kiocbs[i]->ki_list, &ctx->active_reqs);
 		ctx->reqs_active++;
-		okay = 1;
 	}
 	kunmap_atomic(ring, KM_USER0);
 	spin_unlock_irq(&ctx->ctx_lock);
 
-	if (!okay) {
-		kmem_cache_free(kiocb_cachep, req);
-		req = NULL;
-	}
-
-	return req;
+	if (stash->tail == 0)
+		return -1;
+	return 0;
 }
 
-static inline struct kiocb *aio_get_req(struct kioctx *ctx)
+static inline struct kiocb *aio_get_req(struct kioctx *ctx,
+					struct kiocb_stash *stash)
 {
-	struct kiocb *req;
-	/* Handle a potential starvation case -- should be exceedingly rare as 
-	 * requests will be stuck on fput_head only if the aio_fput_routine is 
-	 * delayed and the requests were the last user of the struct file.
-	 */
-	req = __aio_get_req(ctx);
-	if (unlikely(NULL == req)) {
-		aio_fput_routine(NULL);
-		req = __aio_get_req(ctx);
+	int ret;
+
+	if (kiocb_stash_avail(stash) == 0) {
+		ret = kiocb_stash_refill(ctx, stash);
+		if (ret)
+			return NULL;
 	}
-	return req;
+	stash->used++;
+	return stash->kiocbs[stash->head++];
 }
 
 static inline void really_put_req(struct kioctx *ctx, struct kiocb *req)
@@ -1515,7 +1580,8 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat)
 }
 
 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
-			 struct iocb *iocb, bool compat)
+			 struct iocb *iocb, struct kiocb_stash *stash,
+			 bool compat)
 {
 	struct kiocb *req;
 	struct file *file;
@@ -1541,7 +1607,7 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
 	if (unlikely(!file))
 		return -EBADF;
 
-	req = aio_get_req(ctx);		/* returns with 2 references to req */
+	req = aio_get_req(ctx, stash);	/* returns with 2 references to req */
 	if (unlikely(!req)) {
 		fput(file);
 		return -EAGAIN;
@@ -1621,8 +1687,9 @@ long do_io_submit(aio_context_t ctx_id, long nr,
 {
 	struct kioctx *ctx;
 	long ret = 0;
-	int i;
+	int i = 0;
 	struct blk_plug plug;
+	struct kiocb_stash kiocbs;
 
 	if (unlikely(nr < 0))
 		return -EINVAL;
@@ -1639,6 +1706,8 @@ long do_io_submit(aio_context_t ctx_id, long nr,
 		return -EINVAL;
 	}
 
+	kiocb_stash_init(&kiocbs, nr);
+
 	blk_start_plug(&plug);
 
 	/*
@@ -1659,12 +1728,13 @@ long do_io_submit(aio_context_t ctx_id, long nr,
 			break;
 		}
 
-		ret = io_submit_one(ctx, user_iocb, &tmp, compat);
+		ret = io_submit_one(ctx, user_iocb, &tmp, &kiocbs, compat);
 		if (ret)
 			break;
 	}
 	blk_finish_plug(&plug);
 
+	kiocb_stash_free(&kiocbs, i);
 	put_ioctx(ctx);
 	return i ? i : ret;
 }

^ permalink raw reply related	[flat|nested] 5+ messages in thread

* Re: [patch,rfc] aio: allocate kiocbs in batches
  2011-07-05 20:35 [patch,rfc] aio: allocate kiocbs in batches Jeff Moyer
@ 2011-09-01  0:13 ` Andrew Morton
  2011-09-01 13:56   ` Jeff Moyer
  0 siblings, 1 reply; 5+ messages in thread
From: Andrew Morton @ 2011-09-01  0:13 UTC (permalink / raw)
  To: Jeff Moyer; +Cc: linux-aio, linux-fsdevel

On Tue, 05 Jul 2011 16:35:21 -0400
Jeff Moyer <jmoyer@redhat.com> wrote:

> Hi,
> 
> When looking at perf data for my aio-stress test runs against some
> relatively fast storage, I noticed that __aio_get_req was taking up more
> CPU than I expected.  The command line for aio-stress was:
>   aio-stress -O -r 8 -d 256 -b 32 -l -s 16384 /dev/sdk
> which means we're sending 32 iocbs to io_submit at a time.  Here's the
> perf report -g output:
> 
> Events: 41K cycles
> -      7.33%  aio-stress  [kernel.kallsyms]             [k] _raw_spin_lock_irq
>    - _raw_spin_lock_irq
>       + 75.38% scsi_request_fn
>       - 10.67% __aio_get_req
>            io_submit_one
> 
> The spinlock is the ctx_lock, taken when allocating the request.  This
> seemed like pretty low-hanging fruit to me, so I cooked up a patch to
> allocate 32 kiocbs at a time.  This is the result:
> 
> Events: 50K cycles
> -      5.54%  aio-stress  [kernel.kallsyms]             [k] _raw_spin_lock_irq
>    - _raw_spin_lock_irq
>       + 83.23% scsi_request_fn
>       - 5.14% io_submit_one
> 
> So, aio-stress now takes up ~2% less CPU.  This seems like a worth-while
> thing to do, but I don't have a system handy that would show any real
> performance gains from it.  The higher the number of iocbs passed to
> io_submit, the more of a win this will be.
> 
> FWIW, I tested this with multiple different batch sizes (1, 32, 33, 64,
> 256) and it didn't blow up.
> 

So the benefit comes from reducing the frequency with which ctx_lock is
taken, rather than from reducing its per-acquisition hold times.

>
> ...
>
> +#define KIOCB_STASH_SIZE	32
> +struct kiocb_stash {
> +	unsigned head;
> +	unsigned tail;
> +	unsigned total;
> +	unsigned used;
> +	struct kiocb *kiocbs[KIOCB_STASH_SIZE];
> +};

Laziness begets laziness: you didn't bother documenting how this ring
thing works and why it exists, so I didn't bother reverse-engineering
that infomation.

Once all this has been fixed, I might be wondering why it was done with
a yukky fixed-size array rather than with a list.



A pox on you for telling us there's a "potential starvation case" but
not telling us what it is!  It's a kmalloc failure - why is this
described as "starvation"?

Once all this has been fixed, I might be wondering how we test the
"potential starvation case".




Various trivial fixies:



From: Andrew Morton <akpm@linux-foundation.org>

- coding-style fixes

- improve variable name(s)

- fix typo(s)

- use new kmap_atomic() interface

- change comment to use the number of columns God gave us

- use "bool" for a boolean

Cc: Jeff Moyer <jmoyer@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---

 fs/aio.c |   30 ++++++++++++++++--------------
 1 file changed, 16 insertions(+), 14 deletions(-)

diff -puN fs/aio.c~aio-allocate-kiocbs-in-batches-fix fs/aio.c
--- a/fs/aio.c~aio-allocate-kiocbs-in-batches-fix
+++ a/fs/aio.c
@@ -468,12 +468,14 @@ struct kiocb_stash {
 	unsigned used;
 	struct kiocb *kiocbs[KIOCB_STASH_SIZE];
 };
-static void kiocb_stash_init(struct kiocb_stash *stash, int nr)
+
+static void kiocb_stash_init(struct kiocb_stash *stash, int total)
 {
 	stash->head = stash->tail = 0;
-	stash->total = nr;
+	stash->total = total;
 	stash->used = 0;
 }
+
 static void kiocb_stash_free(struct kiocb_stash *stash, int used)
 {
 	int i;
@@ -481,24 +483,25 @@ static void kiocb_stash_free(struct kioc
 	for (i = used; i < stash->total; i++)
 		kmem_cache_free(kiocb_cachep, stash->kiocbs[i]);
 }
+
 static inline unsigned kiocb_stash_avail(struct kiocb_stash *stash)
 {
 	return stash->tail - stash->head;
 }
 
 /*
- * Allocate nr kiocbs.  This aovids taking and dropping the lock a
+ * Allocate nr kiocbs.  This avoids taking and dropping the lock a
  * lot during setup.
  */
 static int kiocb_stash_refill(struct kioctx *ctx, struct kiocb_stash *stash)
 {
 	int i, nr = KIOCB_STASH_SIZE;
 	int avail;
-	int called_fput = 0;
+	bool called_fput;
 	struct aio_ring *ring;
 
 	/*
-	 * Allocate the requests up-front.  Later, we'll trim the list
+	 * Allocate the requests up front.  Later, we'll trim the list
 	 * if there isn't enough room in the completion ring.
 	 */
 	stash->head = 0;
@@ -509,15 +512,14 @@ static int kiocb_stash_refill(struct kio
 	for (i = 0; i < nr; i++) {
 		stash->kiocbs[i] = __aio_get_req(ctx);
 		if (!stash->kiocbs[i] && !called_fput) {
-			/* Handle a potential starvation case --
-			 * should be exceedingly rare as requests will
-			 * be stuck on fput_head only if the
-			 * aio_fput_routine is delayed and the
-			 * requests were the last user of the struct
-			 * file.
+			/*
+			 * Handle a potential starvation case.  Should be
+			 * exceedingly rare as requests will be stuck on
+			 * fput_head only if the aio_fput_routine is delayed and
+			 * the requests were the last user of the struct file.
 			 */
 			aio_fput_routine(NULL);
-			called_fput = 1;
+			called_fput = true;
 			i--;
 			continue;
 		}
@@ -525,7 +527,7 @@ static int kiocb_stash_refill(struct kio
 	stash->tail = i;
 
 	spin_lock_irq(&ctx->ctx_lock);
-	ring = kmap_atomic(ctx->ring_info.ring_pages[0], KM_USER0);
+	ring = kmap_atomic(ctx->ring_info.ring_pages[0]);
 
 	avail = aio_ring_avail(&ctx->ring_info, ring) + ctx->reqs_active;
 	if (avail < stash->tail) {
@@ -538,7 +540,7 @@ static int kiocb_stash_refill(struct kio
 		list_add(&stash->kiocbs[i]->ki_list, &ctx->active_reqs);
 		ctx->reqs_active++;
 	}
-	kunmap_atomic(ring, KM_USER0);
+	kunmap_atomic(ring);
 	spin_unlock_irq(&ctx->ctx_lock);
 
 	if (stash->tail == 0)
_

--
To unsubscribe, send a message with 'unsubscribe linux-aio' in
the body to majordomo@kvack.org.  For more info on Linux AIO,
see: http://www.kvack.org/aio/
Don't email: <a href=mailto:"aart@kvack.org">aart@kvack.org</a>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [patch,rfc] aio: allocate kiocbs in batches
  2011-09-01  0:13 ` Andrew Morton
@ 2011-09-01 13:56   ` Jeff Moyer
  2011-09-01 22:35     ` Andrew Morton
  0 siblings, 1 reply; 5+ messages in thread
From: Jeff Moyer @ 2011-09-01 13:56 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-aio, linux-fsdevel

Andrew Morton <akpm@linux-foundation.org> writes:

> On Tue, 05 Jul 2011 16:35:21 -0400
> Jeff Moyer <jmoyer@redhat.com> wrote:
>
>> Hi,
>> 
>> When looking at perf data for my aio-stress test runs against some
>> relatively fast storage, I noticed that __aio_get_req was taking up more
>> CPU than I expected.  The command line for aio-stress was:
>>   aio-stress -O -r 8 -d 256 -b 32 -l -s 16384 /dev/sdk
>> which means we're sending 32 iocbs to io_submit at a time.  Here's the
>> perf report -g output:
>> 
>> Events: 41K cycles
>> -      7.33%  aio-stress  [kernel.kallsyms]             [k] _raw_spin_lock_irq
>>    - _raw_spin_lock_irq
>>       + 75.38% scsi_request_fn
>>       - 10.67% __aio_get_req
>>            io_submit_one
>> 
>> The spinlock is the ctx_lock, taken when allocating the request.  This
>> seemed like pretty low-hanging fruit to me, so I cooked up a patch to
>> allocate 32 kiocbs at a time.  This is the result:
>> 
>> Events: 50K cycles
>> -      5.54%  aio-stress  [kernel.kallsyms]             [k] _raw_spin_lock_irq
>>    - _raw_spin_lock_irq
>>       + 83.23% scsi_request_fn
>>       - 5.14% io_submit_one
>> 
>> So, aio-stress now takes up ~2% less CPU.  This seems like a worth-while
>> thing to do, but I don't have a system handy that would show any real
>> performance gains from it.  The higher the number of iocbs passed to
>> io_submit, the more of a win this will be.
>> 
>> FWIW, I tested this with multiple different batch sizes (1, 32, 33, 64,
>> 256) and it didn't blow up.
>> 
>
> So the benefit comes from reducing the frequency with which ctx_lock is
> taken, rather than from reducing its per-acquisition hold times.

That's right.  The lock is taken in the submission and completion paths.

>> ...
>>
>> +#define KIOCB_STASH_SIZE	32
>> +struct kiocb_stash {
>> +	unsigned head;
>> +	unsigned tail;
>> +	unsigned total;
>> +	unsigned used;
>> +	struct kiocb *kiocbs[KIOCB_STASH_SIZE];
>> +};
>
> Laziness begets laziness: you didn't bother documenting how this ring
> thing works and why it exists, so I didn't bother reverse-engineering
> that infomation.

Sorry.  I used a ring as I didn't want to introduce more allocations in
the submission path (though I have no real data on which to base this
premature optimization).  As for documenting how it works, I thought it
was pretty straight-forward.

> Once all this has been fixed, I might be wondering why it was done with
> a yukky fixed-size array rather than with a list.

I could change it to a list, if that's preferred.  Remember, this was an
RFC.

> A pox on you for telling us there's a "potential starvation case" but
> not telling us what it is!  It's a kmalloc failure - why is this
> described as "starvation"?

Perhaps you missed that I simply moved that comment in the code;  I
didn't add it.  When allocating an I/O context, the user supplies a
maximum number of events.  That number is used to size the ring buffer
that holds completion data.  It is possible that there is a completed
request waiting for a final fput that is keeping a new allocation from
succeeding.  Calling the aio_fput_routine can free that up, allowing the
allocation to succeed.

> Once all this has been fixed, I might be wondering how we test the
> "potential starvation case".

Well, probably a close on the fd prior to event completion, coupled with
submitting new I/O (on a different fd) when the event completion is
delivered to userspace but before the fput on the fd.

> Various trivial fixies:
>
>
> From: Andrew Morton <akpm@linux-foundation.org>
>
> - coding-style fixes
>
> - improve variable name(s)
>
> - fix typo(s)
>
> - use new kmap_atomic() interface
>
> - change comment to use the number of columns God gave us
>
> - use "bool" for a boolean
>
> Cc: Jeff Moyer <jmoyer@redhat.com>
> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>

This was an RFC.  I'm happy to have it merged, but I would have been
equally happy to have addressed your review comments first.  In the end,
whatever works best for you is fine with me.

Thanks for taking time to look at it!

Cheers,
Jeff

--
To unsubscribe, send a message with 'unsubscribe linux-aio' in
the body to majordomo@kvack.org.  For more info on Linux AIO,
see: http://www.kvack.org/aio/
Don't email: <a href=mailto:"aart@kvack.org">aart@kvack.org</a>

^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [patch,rfc] aio: allocate kiocbs in batches
  2011-09-01 13:56   ` Jeff Moyer
@ 2011-09-01 22:35     ` Andrew Morton
  2011-09-08 17:54       ` Jeff Moyer
  0 siblings, 1 reply; 5+ messages in thread
From: Andrew Morton @ 2011-09-01 22:35 UTC (permalink / raw)
  To: Jeff Moyer; +Cc: linux-aio, linux-fsdevel

On Thu, 01 Sep 2011 09:56:38 -0400
Jeff Moyer <jmoyer@redhat.com> wrote:

> >> ...
> >>
> >> +#define KIOCB_STASH_SIZE	32
> >> +struct kiocb_stash {
> >> +	unsigned head;
> >> +	unsigned tail;
> >> +	unsigned total;
> >> +	unsigned used;
> >> +	struct kiocb *kiocbs[KIOCB_STASH_SIZE];
> >> +};
> >
> > Laziness begets laziness: you didn't bother documenting how this ring
> > thing works and why it exists, so I didn't bother reverse-engineering
> > that infomation.
> 
> Sorry.  I used a ring as I didn't want to introduce more allocations in
> the submission path (though I have no real data on which to base this
> premature optimization).

A list_head in the kiocb wouldn't require additional allocation
operations.

>  As for documenting how it works, I thought it
> was pretty straight-forward.

Well you would, you wrote it!  Others must go backwards from the
implementation and arrive at the model you had in your mind at the
time.

That isn't super-hard of course, but when I find myself scratching my
head even a small amount over what was in your mind, that's when I
stop.  Because I know that everyone else who reads this code will also
end up scratching their head.
 
> > Once all this has been fixed, I might be wondering why it was done with
> > a yukky fixed-size array rather than with a list.
> 
> I could change it to a list, if that's preferred.

To prefer that I'd need to go back to head-scratching reverse-engineering ;)

afacit and afair, we don't do any random indexing into that array or
anything like that - we always access just the head element and the
tail.  It seems like a good fit for a list.

>  Remember, this was an RFC.

Yes, I C'ed ;)

When a decent-looking patch has been lying fallow for over a month it's
time to kick it along a bit.

> 
> > A pox on you for telling us there's a "potential starvation case" but
> > not telling us what it is!  It's a kmalloc failure - why is this
> > described as "starvation"?
> 
> Perhaps you missed that I simply moved that comment in the code;  I
> didn't add it.

Oh, OK.  A pox on you for not fixing it ;)

>  When allocating an I/O context, the user supplies a
> maximum number of events.  That number is used to size the ring buffer
> that holds completion data.  It is possible that there is a completed
> request waiting for a final fput that is keeping a new allocation from
> succeeding.  Calling the aio_fput_routine can free that up, allowing the
> allocation to succeed.

Oh well, if you can think of a way of clarifying and completing that
comment, please do so.

> > Once all this has been fixed, I might be wondering how we test the
> > "potential starvation case".
> 
> Well, probably a close on the fd prior to event completion, coupled with
> submitting new I/O (on a different fd) when the event completion is
> delivered to userspace but before the fput on the fd.

Please put a printk in there and see if you can get it to go off?

> > Various trivial fixies:
> >
> >
> > From: Andrew Morton <akpm@linux-foundation.org>
> >
> > - coding-style fixes
> >
> > - improve variable name(s)
> >
> > - fix typo(s)
> >
> > - use new kmap_atomic() interface
> >
> > - change comment to use the number of columns God gave us
> >
> > - use "bool" for a boolean
> >
> > Cc: Jeff Moyer <jmoyer@redhat.com>
> > Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
> 
> This was an RFC.  I'm happy to have it merged, but I would have been
> equally happy to have addressed your review comments first.  In the end,
> whatever works best for you is fine with me.
> 

I do think the patch (and the surrounding code) can be improved in
several ways.


^ permalink raw reply	[flat|nested] 5+ messages in thread

* Re: [patch,rfc] aio: allocate kiocbs in batches
  2011-09-01 22:35     ` Andrew Morton
@ 2011-09-08 17:54       ` Jeff Moyer
  0 siblings, 0 replies; 5+ messages in thread
From: Jeff Moyer @ 2011-09-08 17:54 UTC (permalink / raw)
  To: Andrew Morton; +Cc: linux-aio, linux-fsdevel

Andrew Morton <akpm@linux-foundation.org> writes:

> I do think the patch (and the surrounding code) can be improved in
> several ways.

Would you like a new patch that is a roll-up of the two you have in
your tree + any changes I'm making, or would you prefer a 3rd patch?

Btw, when changing 'called_fput' from an int to a bool, you forgot to
initialize it.  I'll fix it in the next round.

Cheers,
Jeff

--
To unsubscribe, send a message with 'unsubscribe linux-aio' in
the body to majordomo@kvack.org.  For more info on Linux AIO,
see: http://www.kvack.org/aio/
Don't email: <a href=mailto:"aart@kvack.org">aart@kvack.org</a>

^ permalink raw reply	[flat|nested] 5+ messages in thread

end of thread, other threads:[~2011-09-08 17:54 UTC | newest]

Thread overview: 5+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-07-05 20:35 [patch,rfc] aio: allocate kiocbs in batches Jeff Moyer
2011-09-01  0:13 ` Andrew Morton
2011-09-01 13:56   ` Jeff Moyer
2011-09-01 22:35     ` Andrew Morton
2011-09-08 17:54       ` Jeff Moyer

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.