* Re: [PATCH]: block: try-2 (modified): Initialize bi_rw in mpage so bio_add can make use of it.
2011-07-18 18:54 ` Muthu Kumar
@ 2011-07-18 19:13 ` Muthu Kumar
2011-07-25 17:22 ` Muthu Kumar
0 siblings, 1 reply; 8+ messages in thread
From: Muthu Kumar @ 2011-07-18 19:13 UTC (permalink / raw)
To: Jens Axboe; +Cc: linux-kernel
[-- Attachment #1: Type: text/plain, Size: 4139 bytes --]
Patch attached (as one big file) for review.
On Mon, Jul 18, 2011 at 11:54 AM, Muthu Kumar <muthu.lkml@gmail.com> wrote:
> Jens,
> Here is the diff stats. Its changing more files than I expected. Let
> me know what you think... (If this is OK I can also split and send the
> patches for review).
>
> FYI:
> muthu@sakthi linux-2.6-head]$ wc -l blk-bio-init-rw-before-add-page.patch
> 1610 blk-bio-init-rw-before-add-page.patch
>
> ---------------------------
> block/blk-core.c | 2 +-
> block/blk-flush.c | 2 +-
> block/blk-lib.c | 4 +-
> block/blk-map.c | 12 ++++-----
> drivers/block/drbd/drbd_actlog.c | 3 +-
> drivers/block/drbd/drbd_bitmap.c | 3 +-
> drivers/block/drbd/drbd_receiver.c | 3 +-
> drivers/block/floppy.c | 2 +-
> drivers/block/loop.c | 2 +-
> drivers/block/osdblk.c | 2 +-
> drivers/block/pktcdvd.c | 15 +++++------
> drivers/block/virtio_blk.c | 2 +-
> drivers/block/xen-blkback/blkback.c | 4 +-
> drivers/md/dm-crypt.c | 5 +--
> drivers/md/dm-io.c | 2 +-
> drivers/md/dm.c | 10 +++-----
> drivers/md/md.c | 17 +++++++------
> drivers/md/md.h | 2 +-
> drivers/md/raid1.c | 7 +++++-
> drivers/md/raid10.c | 7 +++++-
> drivers/md/raid5.c | 2 +-
> drivers/scsi/osd/osd_initiator.c | 21 ++++++----------
> drivers/target/target_core_iblock.c | 3 +-
> fs/bio.c | 43 ++++++++++++++++++-----------------
> fs/btrfs/compression.c | 12 +++++-----
> fs/btrfs/extent_io.c | 8 +++---
> fs/btrfs/extent_io.h | 2 +-
> fs/btrfs/inode.c | 21 +++++++++--------
> fs/btrfs/scrub.c | 4 +-
> fs/buffer.c | 2 +-
> fs/direct-io.c | 12 +++++-----
> fs/exofs/ios.c | 6 +++-
> fs/ext4/page-io.c | 6 +++-
> fs/gfs2/ops_fstype.c | 6 +++-
> fs/hfsplus/wrapper.c | 2 +-
> fs/jfs/jfs_logmgr.c | 12 ++++++---
> fs/jfs/jfs_metapage.c | 4 +-
> fs/logfs/dev_bdev.c | 10 ++++----
> fs/mpage.c | 11 +++++----
> fs/nfs/objlayout/objio_osd.c | 8 +++---
> fs/nilfs2/segbuf.c | 9 ++++---
> fs/ocfs2/cluster/heartbeat.c | 9 ++++---
> fs/xfs/linux-2.6/xfs_aops.c | 8 ++++--
> fs/xfs/linux-2.6/xfs_buf.c | 2 +-
> include/linux/bio.h | 10 ++++----
> kernel/power/block_io.c | 2 +-
> mm/bounce.c | 2 +-
> mm/page_io.c | 14 ++++++-----
> 48 files changed, 187 insertions(+), 170 deletions(-)
> -----------------------------------
>
>
> On Mon, Jul 11, 2011 at 10:59 AM, Jens Axboe <jaxboe@fusionio.com> wrote:
>> On 2011-07-11 19:52, Muthu Kumar wrote:
>>>> For this particular case, doing it when the bio is allocated makes more
>>>> sense. That will avoid a similar error in there in the future.
>>>>
>>>
>>> Sounds good. Thanks. How about for other cases that alloc a new bio
>>> and do bio_add_page() - like blkdev_issue_zeroout() and similar.
>>> Should we add there too?
>>
>> Good question, ideally the allocator should be passed in the rw argument
>> since we need it before even submitting it for the merge cases. We don't
>> have _that_ many callers of bio_alloc() or bio_kmalloc(), so probably
>> the best option is just to bite the bullet and change the prototypes to
>> take the 'rw' argument there. Oh, and the bioset variants, too.
>> bio_init() should be passed the 'rw' argument from the allcators, so we
>> catch any private use of that, too.
>>
>> --
>> Jens Axboe
>>
>>
>
[-- Attachment #2: blk-bio-init-rw-before-add-page.patch --]
[-- Type: application/octet-stream, Size: 54789 bytes --]
diff --git a/block/blk-core.c b/block/blk-core.c
index d2f8f40..92bfafc 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2570,7 +2570,7 @@ int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
blk_rq_init(NULL, rq);
__rq_for_each_bio(bio_src, rq_src) {
- bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bs);
+ bio = bio_alloc_bioset(gfp_mask, bio_src->bi_max_vecs, bio_src->bi_rw, bs);
if (!bio)
goto free_and_out;
diff --git a/block/blk-flush.c b/block/blk-flush.c
index bb21e4c..a87e1f0 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -417,7 +417,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
if (!q->make_request_fn)
return -ENXIO;
- bio = bio_alloc(gfp_mask, 0);
+ bio = bio_alloc(gfp_mask, 0, WRITE_FLUSH);
bio->bi_end_io = bio_end_flush;
bio->bi_bdev = bdev;
bio->bi_private = &wait;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 78e627e..9fa1a0e 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -76,7 +76,7 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
bb.wait = &wait;
while (nr_sects) {
- bio = bio_alloc(gfp_mask, 1);
+ bio = bio_alloc(gfp_mask, 1, type);
if (!bio) {
ret = -ENOMEM;
break;
@@ -138,7 +138,7 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
ret = 0;
while (nr_sects != 0) {
bio = bio_alloc(gfp_mask,
- min(nr_sects, (sector_t)BIO_MAX_PAGES));
+ min(nr_sects, (sector_t)BIO_MAX_PAGES), WRITE);
if (!bio) {
ret = -ENOMEM;
break;
diff --git a/block/blk-map.c b/block/blk-map.c
index e663ac2..4c657a9 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -290,11 +290,10 @@ EXPORT_SYMBOL(blk_rq_unmap_user);
int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
unsigned int len, gfp_t gfp_mask)
{
- int reading = rq_data_dir(rq) == READ;
unsigned long addr = (unsigned long) kbuf;
int do_copy = 0;
struct bio *bio;
- int ret;
+ int ret, rw;
if (len > (queue_max_hw_sectors(q) << 9))
return -EINVAL;
@@ -302,17 +301,16 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
return -EINVAL;
do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
+
+ rw = (rq_data_dir(rq) == WRITE) ? WRITE : READ;
if (do_copy)
- bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
+ bio = bio_copy_kern(q, kbuf, len, gfp_mask, rw == READ);
else
- bio = bio_map_kern(q, kbuf, len, gfp_mask);
+ bio = bio_map_kern(q, kbuf, len, rw, gfp_mask);
if (IS_ERR(bio))
return PTR_ERR(bio);
- if (rq_data_dir(rq) == WRITE)
- bio->bi_rw |= REQ_WRITE;
-
if (do_copy)
rq->cmd_flags |= REQ_COPY_USER;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index cf0e63d..5e19e53 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -82,7 +82,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
rw |= REQ_FUA | REQ_FLUSH;
rw |= REQ_SYNC;
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc(GFP_NOIO, 1, rw);
bio->bi_bdev = bdev->md_bdev;
bio->bi_sector = sector;
ok = (bio_add_page(bio, page, size, 0) == size);
@@ -90,7 +90,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
goto out;
bio->bi_private = &md_io;
bio->bi_end_io = drbd_md_io_complete;
- bio->bi_rw = rw;
if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD))
bio_endio(bio, -EIO);
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 7b97629..c62eba4 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -944,7 +944,7 @@ static void bm_async_io_complete(struct bio *bio, int error)
static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must_hold(local)
{
/* we are process context. we always get a bio */
- struct bio *bio = bio_alloc(GFP_KERNEL, 1);
+ struct bio *bio = bio_alloc(GFP_KERNEL, 1, rw);
struct drbd_conf *mdev = ctx->mdev;
struct drbd_bitmap *b = mdev->bitmap;
struct page *page;
@@ -987,7 +987,6 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
bio->bi_end_io = bm_async_io_complete;
if (drbd_insert_fault(mdev, (rw & WRITE) ? DRBD_FAULT_MD_WR : DRBD_FAULT_MD_RD)) {
- bio->bi_rw |= rw;
bio_endio(bio, -EIO);
} else {
submit_bio(rw, bio);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 43beaca..b3b0813 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1098,7 +1098,7 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
* side than those of the sending peer, we may need to submit the
* request in more than one bio. */
next_bio:
- bio = bio_alloc(GFP_NOIO, nr_pages);
+ bio = bio_alloc(GFP_NOIO, nr_pages, rw);
if (!bio) {
dev_err(DEV, "submit_ee: Allocation of a bio failed\n");
goto fail;
@@ -1106,7 +1106,6 @@ next_bio:
/* > e->sector, unless this is the first bio */
bio->bi_sector = sector;
bio->bi_bdev = mdev->ldev->backing_bdev;
- bio->bi_rw = rw;
bio->bi_private = e;
bio->bi_end_io = drbd_endio_sec;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 98de8f4..b527946 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3823,7 +3823,7 @@ static int __floppy_read_block_0(struct block_device *bdev)
if (!size)
size = 1024;
- bio_init(&bio);
+ bio_init(&bio, READ);
bio.bi_io_vec = &bio_vec;
bio_vec.bv_page = page;
bio_vec.bv_len = size;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 76c8da7..f0b7d4b 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -604,7 +604,7 @@ static int loop_thread(void *data)
static int loop_switch(struct loop_device *lo, struct file *file)
{
struct switch_request w;
- struct bio *bio = bio_alloc(GFP_KERNEL, 0);
+ struct bio *bio = bio_alloc(GFP_KERNEL, 0, READ);
if (!bio)
return -ENOMEM;
init_completion(&w.wait);
diff --git a/drivers/block/osdblk.c b/drivers/block/osdblk.c
index 87311eb..244a48b 100644
--- a/drivers/block/osdblk.c
+++ b/drivers/block/osdblk.c
@@ -266,7 +266,7 @@ static struct bio *bio_chain_clone(struct bio *old_chain, gfp_t gfpmask)
struct bio *tmp, *new_chain = NULL, *tail = NULL;
while (old_chain) {
- tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs);
+ tmp = bio_kmalloc(gfpmask, old_chain->bi_max_vecs, old_chain->bi_rw);
if (!tmp)
goto err_out;
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index 07a382e..199479e 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -528,7 +528,7 @@ static void pkt_bio_destructor(struct bio *bio)
kfree(bio);
}
-static struct bio *pkt_bio_alloc(int nr_iovecs)
+static struct bio *pkt_bio_alloc(int nr_iovecs, int rw)
{
struct bio_vec *bvl = NULL;
struct bio *bio;
@@ -536,7 +536,7 @@ static struct bio *pkt_bio_alloc(int nr_iovecs)
bio = kmalloc(sizeof(struct bio), GFP_KERNEL);
if (!bio)
goto no_bio;
- bio_init(bio);
+ bio_init(bio, rw);
bvl = kcalloc(nr_iovecs, sizeof(struct bio_vec), GFP_KERNEL);
if (!bvl)
@@ -567,7 +567,7 @@ static struct packet_data *pkt_alloc_packet_data(int frames)
goto no_pkt;
pkt->frames = frames;
- pkt->w_bio = pkt_bio_alloc(frames);
+ pkt->w_bio = pkt_bio_alloc(frames, WRITE);
if (!pkt->w_bio)
goto no_bio;
@@ -581,7 +581,7 @@ static struct packet_data *pkt_alloc_packet_data(int frames)
bio_list_init(&pkt->orig_bios);
for (i = 0; i < frames; i++) {
- struct bio *bio = pkt_bio_alloc(1);
+ struct bio *bio = pkt_bio_alloc(1, READ);
if (!bio)
goto no_rd_bio;
pkt->r_bios[i] = bio;
@@ -1118,7 +1118,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
continue;
bio = pkt->r_bios[f];
vec = bio->bi_io_vec;
- bio_init(bio);
+ bio_init(bio, READ);
bio->bi_max_vecs = 1;
bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
bio->bi_bdev = pd->bdev;
@@ -1135,7 +1135,6 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
BUG();
atomic_inc(&pkt->io_wait);
- bio->bi_rw = READ;
pkt_queue_bio(pd, bio);
frames_read++;
}
@@ -1418,7 +1417,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
}
/* Start the write request */
- bio_init(pkt->w_bio);
+ bio_init(pkt->w_bio, WRITE);
pkt->w_bio->bi_max_vecs = PACKET_MAX_SIZE;
pkt->w_bio->bi_sector = pkt->sector;
pkt->w_bio->bi_bdev = pd->bdev;
@@ -1426,13 +1425,13 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
pkt->w_bio->bi_private = pkt;
pkt->w_bio->bi_io_vec = bvec;
pkt->w_bio->bi_destructor = pkt_bio_destructor;
+
for (f = 0; f < pkt->frames; f++)
if (!bio_add_page(pkt->w_bio, bvec[f].bv_page, CD_FRAMESIZE, bvec[f].bv_offset))
BUG();
VPRINTK(DRIVER_NAME": vcnt=%d\n", pkt->w_bio->bi_vcnt);
atomic_set(&pkt->io_wait, 1);
- pkt->w_bio->bi_rw = WRITE;
pkt_queue_bio(pd, pkt->w_bio);
}
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 079c088..dfa8988 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -207,7 +207,7 @@ static int virtblk_get_id(struct gendisk *disk, char *id_str)
int err;
bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
- GFP_KERNEL);
+ READ, GFP_KERNEL);
if (IS_ERR(bio))
return PTR_ERR(bio);
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 5cf2993..70de6b8 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -622,7 +622,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
seg[i].nsec << 9,
seg[i].buf & ~PAGE_MASK) == 0)) {
- bio = bio_alloc(GFP_KERNEL, nseg-i);
+ bio = bio_alloc(GFP_KERNEL, nseg-i, operation);
if (unlikely(bio == NULL))
goto fail_put_bio;
@@ -640,7 +640,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
if (!bio) {
BUG_ON(operation != WRITE_FLUSH);
- bio = bio_alloc(GFP_KERNEL, 0);
+ bio = bio_alloc(GFP_KERNEL, 0, operation);
if (unlikely(bio == NULL))
goto fail_put_bio;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index c8827ff..5355998 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -834,7 +834,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
unsigned i, len;
struct page *page;
- clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, cc->bs);
+ clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, io->base_bio->bi_rw, cc->bs);
if (!clone)
return NULL;
@@ -987,7 +987,6 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone)
clone->bi_private = io;
clone->bi_end_io = crypt_endio;
clone->bi_bdev = cc->dev->bdev;
- clone->bi_rw = io->base_bio->bi_rw;
clone->bi_destructor = dm_crypt_bio_destructor;
}
@@ -1002,7 +1001,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
* copy the required bvecs because we need the original
* one in order to decrypt the whole bio data *afterwards*.
*/
- clone = bio_alloc_bioset(gfp, bio_segments(base_bio), cc->bs);
+ clone = bio_alloc_bioset(gfp, bio_segments(base_bio), base_bio->bi_rw, cc->bs);
if (!clone)
return 1;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2067288..5e60218 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -299,7 +299,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
num_bvecs = dm_sector_div_up(remaining,
(PAGE_SIZE >> SECTOR_SHIFT));
num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs);
- bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
+ bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, rw, io->client->bios);
bio->bi_sector = where->sector + (where->count - remaining);
bio->bi_bdev = where->bdev;
bio->bi_end_io = endio;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 0cf68b4..0f150e4 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1035,13 +1035,12 @@ static struct bio *split_bvec(struct bio *bio, sector_t sector,
struct bio *clone;
struct bio_vec *bv = bio->bi_io_vec + idx;
- clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
+ clone = bio_alloc_bioset(GFP_NOIO, 1, bio->bi_rw, bs);
clone->bi_destructor = dm_bio_destructor;
*clone->bi_io_vec = *bv;
clone->bi_sector = sector;
clone->bi_bdev = bio->bi_bdev;
- clone->bi_rw = bio->bi_rw;
clone->bi_vcnt = 1;
clone->bi_size = to_bytes(len);
clone->bi_io_vec->bv_offset = offset;
@@ -1066,7 +1065,7 @@ static struct bio *clone_bio(struct bio *bio, sector_t sector,
{
struct bio *clone;
- clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
+ clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bio->bi_rw, bs);
__bio_clone(clone, bio);
clone->bi_destructor = dm_bio_destructor;
clone->bi_sector = sector;
@@ -1111,7 +1110,7 @@ static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
* ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
* and discard, so no need for concern about wasted bvec allocations.
*/
- clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs);
+ clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->bio->bi_rw, ci->md->bs);
__bio_clone(clone, ci->bio);
clone->bi_destructor = dm_bio_destructor;
if (len) {
@@ -1875,9 +1874,8 @@ static struct mapped_device *alloc_dev(int minor)
if (!md->bdev)
goto bad_bdev;
- bio_init(&md->flush_bio);
+ bio_init(&md->flush_bio, WRITE_FLUSH);
md->flush_bio.bi_bdev = md->bdev;
- md->flush_bio.bi_rw = WRITE_FLUSH;
/* Populate the mapping, nobody knows we exist yet */
spin_lock(&_minor_lock);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 91e31e2..7024cf4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -163,16 +163,16 @@ static void mddev_bio_destructor(struct bio *bio)
bio_free(bio, mddev->bio_set);
}
-struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
+struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, int rw,
mddev_t *mddev)
{
struct bio *b;
mddev_t **mddevp;
if (!mddev || !mddev->bio_set)
- return bio_alloc(gfp_mask, nr_iovecs);
+ return bio_alloc(gfp_mask, nr_iovecs, rw);
- b = bio_alloc_bioset(gfp_mask, nr_iovecs,
+ b = bio_alloc_bioset(gfp_mask, nr_iovecs, rw,
mddev->bio_set);
if (!b)
return NULL;
@@ -192,7 +192,7 @@ struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
if (!mddev || !mddev->bio_set)
return bio_clone(bio, gfp_mask);
- b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs,
+ b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, bio->bi_rw,
mddev->bio_set);
if (!b)
return NULL;
@@ -402,7 +402,7 @@ static void submit_flushes(struct work_struct *ws)
atomic_inc(&rdev->nr_pending);
atomic_inc(&rdev->nr_pending);
rcu_read_unlock();
- bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev);
+ bi = bio_alloc_mddev(GFP_KERNEL, 0, WRITE_FLUSH, mddev);
bi->bi_end_io = md_end_flush;
bi->bi_private = rdev;
bi->bi_bdev = rdev->bdev;
@@ -786,7 +786,8 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
* if zero is reached.
* If an error occurred, call md_error
*/
- struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
+ int rw = REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA;
+ struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rw, mddev);
bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
bio->bi_sector = sector;
@@ -795,7 +796,7 @@ void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev,
bio->bi_end_io = super_written;
atomic_inc(&mddev->pending_writes);
- submit_bio(REQ_WRITE | REQ_SYNC | REQ_FLUSH | REQ_FUA, bio);
+ submit_bio(rw, bio);
}
void md_super_wait(mddev_t *mddev)
@@ -819,7 +820,7 @@ static void bi_complete(struct bio *bio, int error)
int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size,
struct page *page, int rw, bool metadata_op)
{
- struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
+ struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rw, rdev->mddev);
struct completion event;
int ret;
diff --git a/drivers/md/md.h b/drivers/md/md.h
index 1c26c7a..b691123 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -511,7 +511,7 @@ extern void mddev_suspend(mddev_t *mddev);
extern void mddev_resume(mddev_t *mddev);
extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
mddev_t *mddev);
-extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
+extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, int rw,
mddev_t *mddev);
extern int mddev_check_plugged(mddev_t *mddev);
#endif /* _MD_MD_H */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f7431b6..d9ee5e8 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -91,7 +91,12 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
* Allocate bios : 1 for reading, n-1 for writing
*/
for (j = pi->raid_disks ; j-- ; ) {
- bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
+ /*
+ * For bio_kmalloc we set RW=0, but it will be set properly
+ * before we use the bio when we know the bdev and operation.
+ * NOTE: It will be done before we bio_add_page().
+ */
+ bio = bio_kmalloc(gfp_flags, RESYNC_PAGES, 0);
if (!bio)
goto out_free_bio;
r1_bio->bios[j] = bio;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 6e84668..8592cd0 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -111,7 +111,12 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
* Allocate bios.
*/
for (j = nalloc ; j-- ; ) {
- bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
+ /*
+ * For bio_kmalloc we set RW=0, but it will be set properly
+ * before we use the bio when we know the bdev and operation.
+ * NOTE: It will be done before we bio_add_page().
+ */
+ bio = bio_kmalloc(gfp_flags, RESYNC_PAGES, 0);
if (!bio)
goto out_free_bio;
r10_bio->devs[j].bio = bio;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index b72edf3..e72d70d 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -1679,7 +1679,7 @@ static void raid5_build_block(struct stripe_head *sh, int i, int previous)
{
struct r5dev *dev = &sh->dev[i];
- bio_init(&dev->req);
+ bio_init(&dev->req, 0);
dev->req.bi_io_vec = &dev->vec;
dev->req.bi_vcnt++;
dev->req.bi_max_vecs++;
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 86afb13f..45806be 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -718,13 +718,12 @@ static int _osd_req_list_objects(struct osd_request *or,
_osd_req_encode_olist(or, list);
WARN_ON(or->in.bio);
- bio = bio_map_kern(q, list, len, or->alloc_flags);
+ bio = bio_map_kern(q, list, len, READ, or->alloc_flags);
if (IS_ERR(bio)) {
OSD_ERR("!!! Failed to allocate list_objects BIO\n");
return PTR_ERR(bio);
}
- bio->bi_rw &= ~REQ_WRITE;
or->in.bio = bio;
or->in.total_bytes = bio->bi_size;
return 0;
@@ -832,12 +831,11 @@ int osd_req_write_kern(struct osd_request *or,
const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
{
struct request_queue *req_q = osd_request_queue(or->osd_dev);
- struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
+ struct bio *bio = bio_map_kern(req_q, buff, len, REQ_WRITE, GFP_KERNEL);
if (IS_ERR(bio))
return PTR_ERR(bio);
- bio->bi_rw |= REQ_WRITE; /* FIXME: bio_set_dir() */
osd_req_write(or, obj, offset, bio, len);
return 0;
}
@@ -883,7 +881,7 @@ int osd_req_read_kern(struct osd_request *or,
const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
{
struct request_queue *req_q = osd_request_queue(or->osd_dev);
- struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
+ struct bio *bio = bio_map_kern(req_q, buff, len, READ, GFP_KERNEL);
if (IS_ERR(bio))
return PTR_ERR(bio);
@@ -950,12 +948,10 @@ static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
/* create a bio for continuation segment */
bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes,
- GFP_KERNEL);
+ REQ_WRITE, GFP_KERNEL);
if (IS_ERR(bio))
return PTR_ERR(bio);
- bio->bi_rw |= REQ_WRITE;
-
/* integrity check the continuation before the bio is linked
* with the other data segments since the continuation
* integrity is separate from the other data segments.
@@ -1036,13 +1032,13 @@ EXPORT_SYMBOL(osd_req_read_sg);
* NOTE: Each buffer + len should not cross a page boundary.
*/
static struct bio *_create_sg_bios(struct osd_request *or,
- void **buff, const struct osd_sg_entry *sglist, unsigned numentries)
+ void **buff, const struct osd_sg_entry *sglist, unsigned numentries, int rw)
{
struct request_queue *q = osd_request_queue(or->osd_dev);
struct bio *bio;
unsigned i;
- bio = bio_kmalloc(GFP_KERNEL, numentries);
+ bio = bio_kmalloc(GFP_KERNEL, numentries, rw);
if (unlikely(!bio)) {
OSD_DEBUG("Faild to allocate BIO size=%u\n", numentries);
return ERR_PTR(-ENOMEM);
@@ -1071,11 +1067,10 @@ int osd_req_write_sg_kern(struct osd_request *or,
const struct osd_obj_id *obj, void **buff,
const struct osd_sg_entry *sglist, unsigned numentries)
{
- struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
+ struct bio *bio = _create_sg_bios(or, buff, sglist, numentries, REQ_WRITE);
if (IS_ERR(bio))
return PTR_ERR(bio);
- bio->bi_rw |= REQ_WRITE;
osd_req_write_sg(or, obj, bio, sglist, numentries);
return 0;
@@ -1086,7 +1081,7 @@ int osd_req_read_sg_kern(struct osd_request *or,
const struct osd_obj_id *obj, void **buff,
const struct osd_sg_entry *sglist, unsigned numentries)
{
- struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
+ struct bio *bio = _create_sg_bios(or, buff, sglist, numentries, READ);
if (IS_ERR(bio))
return PTR_ERR(bio);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 8663900..de360a0 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -584,7 +584,8 @@ static struct bio *iblock_get_bio(
{
struct bio *bio;
- bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
+ /* FIXME: Set the bio direction (rw) properly */
+ bio = bio_alloc_bioset(GFP_NOIO, sg_num, 0, ib_dev->ibd_bio_set);
if (!(bio)) {
printk(KERN_ERR "Unable to allocate memory for bio\n");
*ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
diff --git a/fs/bio.c b/fs/bio.c
index 9bfade8..5e20db7 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -251,10 +251,11 @@ void bio_free(struct bio *bio, struct bio_set *bs)
}
EXPORT_SYMBOL(bio_free);
-void bio_init(struct bio *bio)
+void bio_init(struct bio *bio, int rw)
{
memset(bio, 0, sizeof(*bio));
bio->bi_flags = 1 << BIO_UPTODATE;
+ bio->bi_rw = rw;
bio->bi_comp_cpu = -1;
atomic_set(&bio->bi_cnt, 1);
}
@@ -264,6 +265,7 @@ EXPORT_SYMBOL(bio_init);
* bio_alloc_bioset - allocate a bio for I/O
* @gfp_mask: the GFP_ mask given to the slab allocator
* @nr_iovecs: number of iovecs to pre-allocate
+ * @rw: direction of IO (read/write)
* @bs: the bio_set to allocate from.
*
* Description:
@@ -275,7 +277,7 @@ EXPORT_SYMBOL(bio_init);
* of a bio, to do the appropriate freeing of the bio once the reference
* count drops to zero.
**/
-struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
+struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, int rw, struct bio_set *bs)
{
unsigned long idx = BIO_POOL_NONE;
struct bio_vec *bvl = NULL;
@@ -287,7 +289,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
return NULL;
bio = p + bs->front_pad;
- bio_init(bio);
+ bio_init(bio, rw);
if (unlikely(!nr_iovecs))
goto out_set;
@@ -323,6 +325,7 @@ static void bio_fs_destructor(struct bio *bio)
* bio_alloc - allocate a new bio, memory pool backed
* @gfp_mask: allocation mask to use
* @nr_iovecs: number of iovecs
+ * @rw: direction of IO (read/write)
*
* bio_alloc will allocate a bio and associated bio_vec array that can hold
* at least @nr_iovecs entries. Allocations will be done from the
@@ -338,9 +341,9 @@ static void bio_fs_destructor(struct bio *bio)
* RETURNS:
* Pointer to new bio on success, NULL on failure.
*/
-struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs)
+struct bio *bio_alloc(gfp_t gfp_mask, int nr_iovecs, int rw)
{
- struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, fs_bio_set);
+ struct bio *bio = bio_alloc_bioset(gfp_mask, nr_iovecs, rw, fs_bio_set);
if (bio)
bio->bi_destructor = bio_fs_destructor;
@@ -360,13 +363,14 @@ static void bio_kmalloc_destructor(struct bio *bio)
* bio_kmalloc - allocate a bio for I/O using kmalloc()
* @gfp_mask: the GFP_ mask given to the slab allocator
* @nr_iovecs: number of iovecs to pre-allocate
+ * @rw: direction of IO (read/write)
*
* Description:
* Allocate a new bio with @nr_iovecs bvecs. If @gfp_mask contains
* %__GFP_WAIT, the allocation is guaranteed to succeed.
*
**/
-struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
+struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs, int rw)
{
struct bio *bio;
@@ -378,7 +382,7 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
if (unlikely(!bio))
return NULL;
- bio_init(bio);
+ bio_init(bio, rw);
bio->bi_flags |= BIO_POOL_NONE << BIO_POOL_OFFSET;
bio->bi_max_vecs = nr_iovecs;
bio->bi_io_vec = bio->bi_inline_vecs;
@@ -471,7 +475,7 @@ EXPORT_SYMBOL(__bio_clone);
*/
struct bio *bio_clone(struct bio *bio, gfp_t gfp_mask)
{
- struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, fs_bio_set);
+ struct bio *b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, bio->bi_rw, fs_bio_set);
if (!b)
return NULL;
@@ -852,13 +856,12 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
return ERR_PTR(-ENOMEM);
ret = -ENOMEM;
- bio = bio_kmalloc(gfp_mask, nr_pages);
+
+ /* write_to_vm is reading from device and writing to pages */
+ bio = bio_kmalloc(gfp_mask, nr_pages, write_to_vm ? READ: WRITE);
if (!bio)
goto out_bmd;
- if (!write_to_vm)
- bio->bi_rw |= REQ_WRITE;
-
ret = 0;
if (map_data) {
@@ -985,7 +988,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
if (!nr_pages)
return ERR_PTR(-EINVAL);
- bio = bio_kmalloc(gfp_mask, nr_pages);
+ bio = bio_kmalloc(gfp_mask, nr_pages, write_to_vm ? READ: WRITE);
if (!bio)
return ERR_PTR(-ENOMEM);
@@ -1041,11 +1044,8 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
kfree(pages);
/*
- * set data direction, and check if mapped pages need bouncing
+ * check if mapped pages need bouncing
*/
- if (!write_to_vm)
- bio->bi_rw |= REQ_WRITE;
-
bio->bi_bdev = bdev;
bio->bi_flags |= (1 << BIO_USER_MAPPED);
return bio;
@@ -1161,7 +1161,7 @@ static void bio_map_kern_endio(struct bio *bio, int err)
}
static struct bio *__bio_map_kern(struct request_queue *q, void *data,
- unsigned int len, gfp_t gfp_mask)
+ unsigned int len, int rw, gfp_t gfp_mask)
{
unsigned long kaddr = (unsigned long)data;
unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1170,7 +1170,7 @@ static struct bio *__bio_map_kern(struct request_queue *q, void *data,
int offset, i;
struct bio *bio;
- bio = bio_kmalloc(gfp_mask, nr_pages);
+ bio = bio_kmalloc(gfp_mask, nr_pages, rw);
if (!bio)
return ERR_PTR(-ENOMEM);
@@ -1202,17 +1202,18 @@ static struct bio *__bio_map_kern(struct request_queue *q, void *data,
* @q: the struct request_queue for the bio
* @data: pointer to buffer to map
* @len: length in bytes
+ * @rw: direction of io
* @gfp_mask: allocation flags for bio allocation
*
* Map the kernel address into a bio suitable for io to a block
* device. Returns an error pointer in case of error.
*/
struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
- gfp_t gfp_mask)
+ int rw, gfp_t gfp_mask)
{
struct bio *bio;
- bio = __bio_map_kern(q, data, len, gfp_mask);
+ bio = __bio_map_kern(q, data, len, rw, gfp_mask);
if (IS_ERR(bio))
return bio;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index bfe42b0..7f811f8 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -92,12 +92,12 @@ static inline int compressed_bio_size(struct btrfs_root *root,
}
static struct bio *compressed_bio_alloc(struct block_device *bdev,
- u64 first_byte, gfp_t gfp_flags)
+ u64 first_byte, int rw, gfp_t gfp_flags)
{
int nr_vecs;
nr_vecs = bio_get_nr_vecs(bdev);
- return btrfs_bio_alloc(bdev, first_byte >> 9, nr_vecs, gfp_flags);
+ return btrfs_bio_alloc(bdev, first_byte >> 9, nr_vecs, rw, gfp_flags);
}
static int check_compressed_csum(struct inode *inode,
@@ -356,7 +356,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
- bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
+ bio = compressed_bio_alloc(bdev, first_byte, WRITE, GFP_NOFS);
if(!bio) {
kfree(cb);
return -ENOMEM;
@@ -400,7 +400,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
bio_put(bio);
- bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
+ bio = compressed_bio_alloc(bdev, first_byte, WRITE, GFP_NOFS);
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
@@ -628,7 +628,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
cb->len = uncompressed_len;
- comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
+ comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, READ, GFP_NOFS);
if (!comp_bio)
goto fail2;
comp_bio->bi_private = cb;
@@ -678,7 +678,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
bio_put(comp_bio);
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
- GFP_NOFS);
+ READ, GFP_NOFS);
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 7055d11..fbe2d4b 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1817,15 +1817,15 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
struct bio *
btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
- gfp_t gfp_flags)
+ int rw, gfp_t gfp_flags)
{
struct bio *bio;
- bio = bio_alloc(gfp_flags, nr_vecs);
+ bio = bio_alloc(gfp_flags, nr_vecs, rw);
if (bio == NULL && (current->flags & PF_MEMALLOC)) {
while (!bio && (nr_vecs /= 2))
- bio = bio_alloc(gfp_flags, nr_vecs);
+ bio = bio_alloc(gfp_flags, nr_vecs, rw);
}
if (bio) {
@@ -1906,7 +1906,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
else
nr = bio_get_nr_vecs(bdev);
- bio = btrfs_bio_alloc(bdev, sector, nr, GFP_NOFS | __GFP_HIGH);
+ bio = btrfs_bio_alloc(bdev, sector, nr, rw, GFP_NOFS | __GFP_HIGH);
if (!bio)
return -ENOMEM;
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index a11a92e..6644886 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -296,5 +296,5 @@ int extent_clear_unlock_delalloc(struct inode *inode,
unsigned long op);
struct bio *
btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
- gfp_t gfp_flags);
+ int rw, gfp_t gfp_flags);
#endif
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 3601f0a..5a0fa9d 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1912,7 +1912,13 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
kfree(failrec);
return -EIO;
}
- bio = bio_alloc(GFP_NOFS, 1);
+
+ if (failed_bio->bi_rw & REQ_WRITE)
+ rw = WRITE;
+ else
+ rw = READ;
+
+ bio = bio_alloc(GFP_NOFS, 1, rw);
bio->bi_private = state;
bio->bi_end_io = failed_bio->bi_end_io;
bio->bi_sector = failrec->logical >> 9;
@@ -1920,11 +1926,6 @@ static int btrfs_io_failed_hook(struct bio *failed_bio,
bio->bi_size = 0;
bio_add_page(bio, page, failrec->len, start - page_offset(page));
- if (failed_bio->bi_rw & REQ_WRITE)
- rw = WRITE;
- else
- rw = READ;
-
ret = BTRFS_I(inode)->io_tree.ops->submit_bio_hook(inode, rw, bio,
failrec->last_mirror,
failrec->bio_flags, 0);
@@ -5874,10 +5875,10 @@ out:
}
static struct bio *btrfs_dio_bio_alloc(struct block_device *bdev,
- u64 first_sector, gfp_t gfp_flags)
+ u64 first_sector, int rw, gfp_t gfp_flags)
{
int nr_vecs = bio_get_nr_vecs(bdev);
- return btrfs_bio_alloc(bdev, first_sector, nr_vecs, gfp_flags);
+ return btrfs_bio_alloc(bdev, first_sector, nr_vecs, rw, gfp_flags);
}
static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
@@ -5958,7 +5959,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
}
async_submit = 1;
- bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, GFP_NOFS);
+ bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev, start_sector, rw, GFP_NOFS);
if (!bio)
return -ENOMEM;
bio->bi_private = dip;
@@ -5995,7 +5996,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
nr_pages = 0;
bio = btrfs_dio_bio_alloc(orig_bio->bi_bdev,
- start_sector, GFP_NOFS);
+ start_sector, rw, GFP_NOFS);
if (!bio)
goto out_err;
bio->bi_private = dip;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index a8d03d5..571f467 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -342,7 +342,7 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
int ret;
DECLARE_COMPLETION_ONSTACK(complete);
- bio = bio_alloc(GFP_NOFS, 1);
+ bio = bio_alloc(GFP_NOFS, 1, rw);
bio->bi_bdev = bdev;
bio->bi_sector = sector;
bio_add_page(bio, page, PAGE_SIZE, 0);
@@ -565,7 +565,7 @@ static int scrub_submit(struct scrub_dev *sdev)
sbio = sdev->bios[sdev->curr];
- bio = bio_alloc(GFP_NOFS, sbio->count);
+ bio = bio_alloc(GFP_NOFS, sbio->count, READ);
if (!bio)
goto nomem;
diff --git a/fs/buffer.c b/fs/buffer.c
index 1a80b04..3679738 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2927,7 +2927,7 @@ int submit_bh(int rw, struct buffer_head * bh)
* from here on down, it's all bio -- do the initial mapping,
* submit_bio -> generic_make_request may further map this bio around
*/
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc(GFP_NOIO, 1, rw);
bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index ac5f164..310fa4d 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -327,7 +327,7 @@ EXPORT_SYMBOL_GPL(dio_end_io);
static void
dio_bio_alloc(struct dio *dio, struct block_device *bdev,
- sector_t first_sector, int nr_vecs)
+ sector_t first_sector, int nr_vecs, int rw)
{
struct bio *bio;
@@ -335,7 +335,7 @@ dio_bio_alloc(struct dio *dio, struct block_device *bdev,
* bio_alloc() is guaranteed to return a bio when called with
* __GFP_WAIT and we request a valid number of vectors.
*/
- bio = bio_alloc(GFP_KERNEL, nr_vecs);
+ bio = bio_alloc(GFP_KERNEL, nr_vecs, rw);
bio->bi_bdev = bdev;
bio->bi_sector = first_sector;
@@ -576,7 +576,7 @@ static int get_more_blocks(struct dio *dio)
/*
* There is no bio. Make one now.
*/
-static int dio_new_bio(struct dio *dio, sector_t start_sector)
+static int dio_new_bio(struct dio *dio, sector_t start_sector, int rw)
{
sector_t sector;
int ret, nr_pages;
@@ -588,7 +588,7 @@ static int dio_new_bio(struct dio *dio, sector_t start_sector)
nr_pages = min(dio->pages_in_io, bio_get_nr_vecs(dio->map_bh.b_bdev));
nr_pages = min(nr_pages, BIO_MAX_PAGES);
BUG_ON(nr_pages <= 0);
- dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages);
+ dio_bio_alloc(dio, dio->map_bh.b_bdev, sector, nr_pages, rw);
dio->boundary = 0;
out:
return ret;
@@ -668,14 +668,14 @@ static int dio_send_cur_page(struct dio *dio)
}
if (dio->bio == NULL) {
- ret = dio_new_bio(dio, dio->cur_page_block);
+ ret = dio_new_bio(dio, dio->cur_page_block, dio->rw);
if (ret)
goto out;
}
if (dio_bio_add_page(dio) != 0) {
dio_bio_submit(dio);
- ret = dio_new_bio(dio, dio->cur_page_block);
+ ret = dio_new_bio(dio, dio->cur_page_block, dio->rw);
if (ret == 0) {
ret = dio_bio_add_page(dio);
BUG_ON(ret != 0);
diff --git a/fs/exofs/ios.c b/fs/exofs/ios.c
index f74a2ec..da14994 100644
--- a/fs/exofs/ios.c
+++ b/fs/exofs/ios.c
@@ -359,7 +359,8 @@ static int _add_stripe_unit(struct exofs_io_state *ios, unsigned *cur_pg,
unsigned bio_size = (ios->nr_pages + pages_in_stripe) /
ios->layout->group_width;
- per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size);
+ /* FIXME: Set the bio rw properly */
+ per_dev->bio = bio_kmalloc(GFP_KERNEL, bio_size, 0);
if (unlikely(!per_dev->bio)) {
EXOFS_DBGMSG("Failed to allocate BIO size=%u\n",
bio_size);
@@ -560,8 +561,9 @@ static int _sbi_write_mirror(struct exofs_io_state *ios, int cur_comp)
struct bio *bio;
if (per_dev != master_dev) {
+ /* FIXME: Set the bio rw properly */
bio = bio_kmalloc(GFP_KERNEL,
- master_dev->bio->bi_max_vecs);
+ master_dev->bio->bi_max_vecs, 0);
if (unlikely(!bio)) {
EXOFS_DBGMSG(
"Failed to allocate BIO size=%u\n",
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 7bb8f76..42df62b 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -281,12 +281,14 @@ static int io_submit_init(struct ext4_io_submit *io,
struct page *page = bh->b_page;
int nvecs = bio_get_nr_vecs(bh->b_bdev);
struct bio *bio;
+ int wop;
io_end = ext4_init_io_end(inode, GFP_NOFS);
if (!io_end)
return -ENOMEM;
+ wop = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
do {
- bio = bio_alloc(GFP_NOIO, nvecs);
+ bio = bio_alloc(GFP_NOIO, nvecs, wop);
nvecs >>= 1;
} while (bio == NULL);
@@ -298,7 +300,7 @@ static int io_submit_init(struct ext4_io_submit *io,
io_end->offset = (page->index << PAGE_CACHE_SHIFT) + bh_offset(bh);
io->io_bio = bio;
- io->io_op = (wbc->sync_mode == WB_SYNC_ALL ? WRITE_SYNC : WRITE);
+ io->io_op = wop;
io->io_next_block = bh->b_blocknr;
return 0;
}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 8ac9ae1..0ddb401 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -207,6 +207,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
struct gfs2_sb *p;
struct page *page;
struct bio *bio;
+ int rw;
page = alloc_page(GFP_NOFS);
if (unlikely(!page))
@@ -216,14 +217,15 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
ClearPageDirty(page);
lock_page(page);
- bio = bio_alloc(GFP_NOFS, 1);
+ rw = READ_SYNC | REQ_META;
+ bio = bio_alloc(GFP_NOFS, 1, rw);
bio->bi_sector = sector * (sb->s_blocksize >> 9);
bio->bi_bdev = sb->s_bdev;
bio_add_page(bio, page, PAGE_SIZE, 0);
bio->bi_end_io = end_bio_io_page;
bio->bi_private = page;
- submit_bio(READ_SYNC | REQ_META, bio);
+ submit_bio(rw, bio);
wait_on_page_locked(page);
bio_put(bio);
if (!PageUptodate(page)) {
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 4ac88ff..aead946 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -38,7 +38,7 @@ int hfsplus_submit_bio(struct block_device *bdev, sector_t sector,
struct bio *bio;
int ret = 0;
- bio = bio_alloc(GFP_NOIO, 1);
+ bio = bio_alloc(GFP_NOIO, 1, rw);
bio->bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_end_io = hfsplus_end_io_sync;
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 583636f..e78b772 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1985,6 +1985,7 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
{
struct bio *bio;
struct lbuf *bp;
+ int rw;
/*
* allocate a log buffer
@@ -1994,7 +1995,8 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
bp->l_flag |= lbmREAD;
- bio = bio_alloc(GFP_NOFS, 1);
+ rw = READ_SYNC;
+ bio = bio_alloc(GFP_NOFS, 1, rw);
bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
bio->bi_bdev = log->bdev;
@@ -2008,7 +2010,7 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
bio->bi_end_io = lbmIODone;
bio->bi_private = bp;
- submit_bio(READ_SYNC, bio);
+ submit_bio(rw, bio);
wait_event(bp->l_ioevent, (bp->l_flag != lbmREAD));
@@ -2133,10 +2135,12 @@ static void lbmStartIO(struct lbuf * bp)
{
struct bio *bio;
struct jfs_log *log = bp->l_log;
+ int rw;
jfs_info("lbmStartIO\n");
- bio = bio_alloc(GFP_NOFS, 1);
+ rw = WRITE_SYNC;
+ bio = bio_alloc(GFP_NOFS, 1, rw);
bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
bio->bi_bdev = log->bdev;
bio->bi_io_vec[0].bv_page = bp->l_page;
@@ -2155,7 +2159,7 @@ static void lbmStartIO(struct lbuf * bp)
bio->bi_size = 0;
lbmIODone(bio, 0);
} else {
- submit_bio(WRITE_SYNC, bio);
+ submit_bio(rw, bio);
INCREMENT(lmStat.submitted);
}
}
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index 6740d34..89ab0f4 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -436,7 +436,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
}
len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
- bio = bio_alloc(GFP_NOFS, 1);
+ bio = bio_alloc(GFP_NOFS, 1, WRITE);
bio->bi_bdev = inode->i_sb->s_bdev;
bio->bi_sector = pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_write_end_io;
@@ -515,7 +515,7 @@ static int metapage_readpage(struct file *fp, struct page *page)
if (bio)
submit_bio(READ, bio);
- bio = bio_alloc(GFP_NOFS, 1);
+ bio = bio_alloc(GFP_NOFS, 1, READ);
bio->bi_bdev = inode->i_sb->s_bdev;
bio->bi_sector = pblock << (inode->i_blkbits - 9);
bio->bi_end_io = metapage_read_end_io;
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index df0de27..cd91acd 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -25,7 +25,7 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
struct bio_vec bio_vec;
struct completion complete;
- bio_init(&bio);
+ bio_init(&bio, rw);
bio.bi_io_vec = &bio_vec;
bio_vec.bv_page = page;
bio_vec.bv_len = PAGE_SIZE;
@@ -101,7 +101,7 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
if (max_pages > BIO_MAX_PAGES)
max_pages = BIO_MAX_PAGES;
- bio = bio_alloc(GFP_NOFS, max_pages);
+ bio = bio_alloc(GFP_NOFS, max_pages, WRITE);
BUG_ON(!bio);
for (i = 0; i < nr_pages; i++) {
@@ -122,7 +122,7 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
nr_pages -= i;
i = 0;
- bio = bio_alloc(GFP_NOFS, max_pages);
+ bio = bio_alloc(GFP_NOFS, max_pages, WRITE);
BUG_ON(!bio);
}
page = find_lock_page(mapping, index + i);
@@ -196,7 +196,7 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
if (max_pages > BIO_MAX_PAGES)
max_pages = BIO_MAX_PAGES;
- bio = bio_alloc(GFP_NOFS, max_pages);
+ bio = bio_alloc(GFP_NOFS, max_pages, WRITE);
BUG_ON(!bio);
for (i = 0; i < nr_pages; i++) {
@@ -217,7 +217,7 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
nr_pages -= i;
i = 0;
- bio = bio_alloc(GFP_NOFS, max_pages);
+ bio = bio_alloc(GFP_NOFS, max_pages, WRITE);
BUG_ON(!bio);
}
bio->bi_io_vec[i].bv_page = super->s_erase_page;
diff --git a/fs/mpage.c b/fs/mpage.c
index fdfae9f..45e6121 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -80,16 +80,16 @@ static struct bio *mpage_bio_submit(int rw, struct bio *bio)
static struct bio *
mpage_alloc(struct block_device *bdev,
- sector_t first_sector, int nr_vecs,
+ sector_t first_sector, int nr_vecs, int rw,
gfp_t gfp_flags)
{
struct bio *bio;
- bio = bio_alloc(gfp_flags, nr_vecs);
+ bio = bio_alloc(gfp_flags, nr_vecs, rw);
if (bio == NULL && (current->flags & PF_MEMALLOC)) {
while (!bio && (nr_vecs /= 2))
- bio = bio_alloc(gfp_flags, nr_vecs);
+ bio = bio_alloc(gfp_flags, nr_vecs, rw);
}
if (bio) {
@@ -287,7 +287,7 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
alloc_new:
if (bio == NULL) {
bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
- min_t(int, nr_pages, bio_get_nr_vecs(bdev)),
+ min_t(int, nr_pages, bio_get_nr_vecs(bdev)), READ,
GFP_KERNEL);
if (bio == NULL)
goto confused;
@@ -580,7 +580,8 @@ page_is_mapped:
alloc_new:
if (bio == NULL) {
bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
- bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH);
+ bio_get_nr_vecs(bdev), WRITE,
+ GFP_NOFS|__GFP_HIGH);
if (bio == NULL)
goto confused;
}
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 8ff2ea3..f841869 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -695,7 +695,7 @@ out:
return ret;
}
-static int _io_rw_pagelist(struct objio_state *ios, gfp_t gfp_flags)
+static int _io_rw_pagelist(struct objio_state *ios, int rw, gfp_t gfp_flags)
{
u64 length = ios->ol_state.count;
u64 offset = ios->ol_state.offset;
@@ -709,7 +709,7 @@ static int _io_rw_pagelist(struct objio_state *ios, gfp_t gfp_flags)
if (length < si.group_length)
si.group_length = length;
- ret = _prepare_one_group(ios, si.group_length, &si, &last_pg, gfp_flags);
+ ret = _prepare_one_group(ios, si.group_length, &si, &last_pg, rw, gfp_flags);
if (unlikely(ret))
goto out;
@@ -864,7 +864,7 @@ ssize_t objio_read_pagelist(struct objlayout_io_state *ol_state)
ol_state);
int ret;
- ret = _io_rw_pagelist(ios, GFP_KERNEL);
+ ret = _io_rw_pagelist(ios, READ, GFP_KERNEL);
if (unlikely(ret))
return ret;
@@ -987,7 +987,7 @@ ssize_t objio_write_pagelist(struct objlayout_io_state *ol_state, bool stable)
int ret;
/* TODO: ios->stable = stable; */
- ret = _io_rw_pagelist(ios, GFP_NOFS);
+ ret = _io_rw_pagelist(ios, WRITE, GFP_NOFS);
if (unlikely(ret))
return ret;
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 850a7c0..a2dabfa 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -401,19 +401,20 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf,
* @nilfs: nilfs object
* @start: start block number of the bio
* @nr_vecs: request size of page vector.
+ * @rw: direction of IO
*
* Return Value: On success, pointer to the struct bio is returned.
* On error, NULL is returned.
*/
static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
- int nr_vecs)
+ int nr_vecs, int rw)
{
struct bio *bio;
- bio = bio_alloc(GFP_NOIO, nr_vecs);
+ bio = bio_alloc(GFP_NOIO, nr_vecs, rw);
if (bio == NULL) {
while (!bio && (nr_vecs >>= 1))
- bio = bio_alloc(GFP_NOIO, nr_vecs);
+ bio = bio_alloc(GFP_NOIO, nr_vecs, rw);
}
if (likely(bio)) {
bio->bi_bdev = nilfs->ns_bdev;
@@ -443,7 +444,7 @@ static int nilfs_segbuf_submit_bh(struct nilfs_segment_buffer *segbuf,
repeat:
if (!wi->bio) {
wi->bio = nilfs_alloc_seg_bio(wi->nilfs, wi->blocknr + wi->end,
- wi->nr_vecs);
+ wi->nr_vecs, mode);
if (unlikely(!wi->bio))
return -ENOMEM;
}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 9a3e6bb..94d8f05 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -390,7 +390,8 @@ static void o2hb_bio_end_io(struct bio *bio,
static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
struct o2hb_bio_wait_ctxt *wc,
unsigned int *current_slot,
- unsigned int max_slots)
+ unsigned int max_slots,
+ int rw)
{
int len, current_page;
unsigned int vec_len, vec_start;
@@ -404,7 +405,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
* GFP_KERNEL that the local node can get fenced. It would be
* nicest if we could pre-allocate these bios and avoid this
* all together. */
- bio = bio_alloc(GFP_ATOMIC, 16);
+ bio = bio_alloc(GFP_ATOMIC, 16, rw);
if (!bio) {
mlog(ML_ERROR, "Could not alloc slots BIO!\n");
bio = ERR_PTR(-ENOMEM);
@@ -451,7 +452,7 @@ static int o2hb_read_slots(struct o2hb_region *reg,
o2hb_bio_wait_init(&wc);
while(current_slot < max_slots) {
- bio = o2hb_setup_one_bio(reg, &wc, ¤t_slot, max_slots);
+ bio = o2hb_setup_one_bio(reg, &wc, ¤t_slot, max_slots, READ);
if (IS_ERR(bio)) {
status = PTR_ERR(bio);
mlog_errno(status);
@@ -483,7 +484,7 @@ static int o2hb_issue_node_write(struct o2hb_region *reg,
slot = o2nm_this_node();
- bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1);
+ bio = o2hb_setup_one_bio(reg, write_wc, &slot, slot+1, WRITE);
if (IS_ERR(bio)) {
status = PTR_ERR(bio);
mlog_errno(status);
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index 79ce38b..292ee05 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -418,10 +418,10 @@ xfs_submit_ioend_bio(
STATIC struct bio *
xfs_alloc_ioend_bio(
- struct buffer_head *bh)
+ struct buffer_head *bh, int rw)
{
int nvecs = bio_get_nr_vecs(bh->b_bdev);
- struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
+ struct bio *bio = bio_alloc(GFP_NOIO, nvecs, rw);
ASSERT(bio->bi_private == NULL);
bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
@@ -492,6 +492,7 @@ xfs_submit_ioend(
struct buffer_head *bh;
struct bio *bio;
sector_t lastblock = 0;
+ int rw;
/* Pass 1 - start writeback */
do {
@@ -501,6 +502,7 @@ xfs_submit_ioend(
} while ((ioend = next) != NULL);
/* Pass 2 - submit I/O */
+ rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE;
ioend = head;
do {
next = ioend->io_list;
@@ -510,7 +512,7 @@ xfs_submit_ioend(
if (!bio) {
retry:
- bio = xfs_alloc_ioend_bio(bh);
+ bio = xfs_alloc_ioend_bio(bh, rw);
} else if (bh->b_blocknr != lastblock + 1) {
xfs_submit_ioend_bio(wbc, ioend, bio);
goto retry;
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 5e68099..4143da5 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -1246,7 +1246,7 @@ next_chunk:
if (nr_pages > total_nr_pages)
nr_pages = total_nr_pages;
- bio = bio_alloc(GFP_NOIO, nr_pages);
+ bio = bio_alloc(GFP_NOIO, nr_pages, rw);
bio->bi_bdev = bp->b_target->bt_bdev;
bio->bi_sector = sector;
bio->bi_end_io = xfs_buf_bio_end_io;
diff --git a/include/linux/bio.h b/include/linux/bio.h
index ce33e68..c6a4798 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -211,9 +211,9 @@ extern void bio_pair_release(struct bio_pair *dbio);
extern struct bio_set *bioset_create(unsigned int, unsigned int);
extern void bioset_free(struct bio_set *);
-extern struct bio *bio_alloc(gfp_t, int);
-extern struct bio *bio_kmalloc(gfp_t, int);
-extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
+extern struct bio *bio_alloc(gfp_t, int, int);
+extern struct bio *bio_kmalloc(gfp_t, int, int);
+extern struct bio *bio_alloc_bioset(gfp_t, int, int, struct bio_set *);
extern void bio_put(struct bio *);
extern void bio_free(struct bio *, struct bio_set *);
@@ -224,7 +224,7 @@ extern int bio_phys_segments(struct request_queue *, struct bio *);
extern void __bio_clone(struct bio *, struct bio *);
extern struct bio *bio_clone(struct bio *, gfp_t);
-extern void bio_init(struct bio *);
+extern void bio_init(struct bio *, int rw);
extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
@@ -240,7 +240,7 @@ extern struct bio *bio_map_user_iov(struct request_queue *,
struct sg_iovec *, int, int, gfp_t);
extern void bio_unmap_user(struct bio *);
extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
- gfp_t);
+ int, gfp_t);
extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
gfp_t, int);
extern void bio_set_pages_dirty(struct bio *bio);
diff --git a/kernel/power/block_io.c b/kernel/power/block_io.c
index d09dd10..fbfd851 100644
--- a/kernel/power/block_io.c
+++ b/kernel/power/block_io.c
@@ -31,7 +31,7 @@ static int submit(int rw, struct block_device *bdev, sector_t sector,
const int bio_rw = rw | REQ_SYNC;
struct bio *bio;
- bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
+ bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1, bio_rw);
bio->bi_sector = sector;
bio->bi_bdev = bdev;
bio->bi_end_io = end_swap_bio_read;
diff --git a/mm/bounce.c b/mm/bounce.c
index 1481de6..8541301 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -200,7 +200,7 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
if (!bio) {
unsigned int cnt = (*bio_orig)->bi_vcnt;
- bio = bio_alloc(GFP_NOIO, cnt);
+ bio = bio_alloc(GFP_NOIO, cnt, rw);
memset(bio->bi_io_vec, 0, cnt * sizeof(struct bio_vec));
}
diff --git a/mm/page_io.c b/mm/page_io.c
index dc76b4d..d5ce321 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -21,11 +21,12 @@
#include <asm/pgtable.h>
static struct bio *get_swap_bio(gfp_t gfp_flags,
- struct page *page, bio_end_io_t end_io)
+ struct page *page, int rw,
+ bio_end_io_t end_io)
{
struct bio *bio;
- bio = bio_alloc(gfp_flags, 1);
+ bio = bio_alloc(gfp_flags, 1, rw);
if (bio) {
bio->bi_sector = map_swap_page(page, &bio->bi_bdev);
bio->bi_sector <<= PAGE_SHIFT - 9;
@@ -98,15 +99,16 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
unlock_page(page);
goto out;
}
- bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
+ if (wbc->sync_mode == WB_SYNC_ALL)
+ rw |= REQ_SYNC;
+
+ bio = get_swap_bio(GFP_NOIO, page, rw, end_swap_bio_write);
if (bio == NULL) {
set_page_dirty(page);
unlock_page(page);
ret = -ENOMEM;
goto out;
}
- if (wbc->sync_mode == WB_SYNC_ALL)
- rw |= REQ_SYNC;
count_vm_event(PSWPOUT);
set_page_writeback(page);
unlock_page(page);
@@ -122,7 +124,7 @@ int swap_readpage(struct page *page)
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(PageUptodate(page));
- bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
+ bio = get_swap_bio(GFP_KERNEL, page, READ, end_swap_bio_read);
if (bio == NULL) {
unlock_page(page);
ret = -ENOMEM;
^ permalink raw reply related [flat|nested] 8+ messages in thread