linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: mchristi@redhat.com
To: linux-f2fs-devel@lists.sourceforge.net,
	linux-ext4@vger.kernel.org, konrad.wilk@oracle.com,
	drbd-dev@lists.linbit.com, philipp.reisner@linbit.com,
	lars.ellenberg@linbit.com, linux-raid@vger.kernel.org,
	dm-devel@redhat.com, linux-fsdevel@vger.kernel.org,
	linux-bcache@vger.kernel.org, linux-block@vger.kernel.org,
	linux-kernel@vger.kernel.org, linux-scsi@vger.kernel.org,
	linux-mtd@lists.infradead.org, target-devel@vger.kernel.org,
	linux-btrfs@vger.kernel.org, osd-dev@open-osd.org,
	xfs@oss.sgi.com, ocfs2-devel@oss.oracle.com
Cc: Mike Christie <mchristi@redhat.com>
Subject: [PATCH 27/42] block: prepare request creation/destruction code to use REQ_OPs
Date: Wed, 13 Apr 2016 14:36:13 -0500	[thread overview]
Message-ID: <1460576188-5751-28-git-send-email-mchristi@redhat.com> (raw)
In-Reply-To: <1460576188-5751-1-git-send-email-mchristi@redhat.com>

From: Mike Christie <mchristi@redhat.com>

This patch prepares *_get_request/*_put_request and freed_request,
to use separate variables for the operation and flags. In the
next patches the struct request users will be converted like
was done for bios. request->op will be used for the REQ_OP and
request->cmd_flags for the rq_flag_bits.

There is some temporary compat code in __get_request to
allow users to read the operation from the cmd_flags. This will
be deleted in one of the last patches when all drivers have
been converted.

Signed-off-by: Mike Christie <mchristi@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
---
 block/blk-core.c | 56 +++++++++++++++++++++++++++++++-------------------------
 1 file changed, 31 insertions(+), 25 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index 4224775..f1545d1 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -959,10 +959,10 @@ static void __freed_request(struct request_list *rl, int sync)
  * A request has just been released.  Account for it, update the full and
  * congestion status, wake up any waiters.   Called under q->queue_lock.
  */
-static void freed_request(struct request_list *rl, unsigned int flags)
+static void freed_request(struct request_list *rl, int op, unsigned int flags)
 {
 	struct request_queue *q = rl->q;
-	int sync = rw_is_sync(flags);
+	int sync = rw_is_sync(op | flags);
 
 	q->nr_rqs[sync]--;
 	rl->count[sync]--;
@@ -1054,7 +1054,8 @@ static struct io_context *rq_ioc(struct bio *bio)
 /**
  * __get_request - get a free request
  * @rl: request list to allocate from
- * @rw_flags: RW and SYNC flags
+ * @op: REQ_OP_READ/REQ_OP_WRITE
+ * @op_flags: rq_flag_bits
  * @bio: bio to allocate request for (can be %NULL)
  * @gfp_mask: allocation mask
  *
@@ -1065,21 +1066,22 @@ static struct io_context *rq_ioc(struct bio *bio)
  * Returns ERR_PTR on failure, with @q->queue_lock held.
  * Returns request pointer on success, with @q->queue_lock *not held*.
  */
-static struct request *__get_request(struct request_list *rl, int rw_flags,
-				     struct bio *bio, gfp_t gfp_mask)
+static struct request *__get_request(struct request_list *rl, int op,
+				     int op_flags, struct bio *bio,
+				     gfp_t gfp_mask)
 {
 	struct request_queue *q = rl->q;
 	struct request *rq;
 	struct elevator_type *et = q->elevator->type;
 	struct io_context *ioc = rq_ioc(bio);
 	struct io_cq *icq = NULL;
-	const bool is_sync = rw_is_sync(rw_flags) != 0;
+	const bool is_sync = rw_is_sync(op | op_flags) != 0;
 	int may_queue;
 
 	if (unlikely(blk_queue_dying(q)))
 		return ERR_PTR(-ENODEV);
 
-	may_queue = elv_may_queue(q, rw_flags);
+	may_queue = elv_may_queue(q, op | op_flags);
 	if (may_queue == ELV_MQUEUE_NO)
 		goto rq_starved;
 
@@ -1123,7 +1125,7 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
 
 	/*
 	 * Decide whether the new request will be managed by elevator.  If
-	 * so, mark @rw_flags and increment elvpriv.  Non-zero elvpriv will
+	 * so, mark @op_flags and increment elvpriv.  Non-zero elvpriv will
 	 * prevent the current elevator from being destroyed until the new
 	 * request is freed.  This guarantees icq's won't be destroyed and
 	 * makes creating new ones safe.
@@ -1132,14 +1134,14 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
 	 * it will be created after releasing queue_lock.
 	 */
 	if (blk_rq_should_init_elevator(bio) && !blk_queue_bypass(q)) {
-		rw_flags |= REQ_ELVPRIV;
+		op_flags |= REQ_ELVPRIV;
 		q->nr_rqs_elvpriv++;
 		if (et->icq_cache && ioc)
 			icq = ioc_lookup_icq(ioc, q);
 	}
 
 	if (blk_queue_io_stat(q))
-		rw_flags |= REQ_IO_STAT;
+		op_flags |= REQ_IO_STAT;
 	spin_unlock_irq(q->queue_lock);
 
 	/* allocate and init request */
@@ -1149,10 +1151,12 @@ static struct request *__get_request(struct request_list *rl, int rw_flags,
 
 	blk_rq_init(q, rq);
 	blk_rq_set_rl(rq, rl);
-	rq->cmd_flags = rw_flags | REQ_ALLOCED;
+	/* tmp compat - allow users to check either one for the op */
+	rq->cmd_flags = op | op_flags | REQ_ALLOCED;
+	rq->op = op;
 
 	/* init elvpriv */
-	if (rw_flags & REQ_ELVPRIV) {
+	if (op_flags & REQ_ELVPRIV) {
 		if (unlikely(et->icq_cache && !icq)) {
 			if (ioc)
 				icq = ioc_create_icq(ioc, q, gfp_mask);
@@ -1178,7 +1182,7 @@ out:
 	if (ioc_batching(q, ioc))
 		ioc->nr_batch_requests--;
 
-	trace_block_getrq(q, bio, rw_flags & 1);
+	trace_block_getrq(q, bio, op);
 	return rq;
 
 fail_elvpriv:
@@ -1208,7 +1212,7 @@ fail_alloc:
 	 * queue, but this is pretty rare.
 	 */
 	spin_lock_irq(q->queue_lock);
-	freed_request(rl, rw_flags);
+	freed_request(rl, op, op_flags);
 
 	/*
 	 * in the very unlikely event that allocation failed and no
@@ -1226,7 +1230,8 @@ rq_starved:
 /**
  * get_request - get a free request
  * @q: request_queue to allocate request from
- * @rw_flags: RW and SYNC flags
+ * @op: REQ_OP_READ/REQ_OP_WRITE
+ * @op_flags: rq_flag_bits
  * @bio: bio to allocate request for (can be %NULL)
  * @gfp_mask: allocation mask
  *
@@ -1237,17 +1242,18 @@ rq_starved:
  * Returns ERR_PTR on failure, with @q->queue_lock held.
  * Returns request pointer on success, with @q->queue_lock *not held*.
  */
-static struct request *get_request(struct request_queue *q, int rw_flags,
-				   struct bio *bio, gfp_t gfp_mask)
+static struct request *get_request(struct request_queue *q, int op,
+				   int op_flags, struct bio *bio,
+				   gfp_t gfp_mask)
 {
-	const bool is_sync = rw_is_sync(rw_flags) != 0;
+	const bool is_sync = rw_is_sync(op | op_flags) != 0;
 	DEFINE_WAIT(wait);
 	struct request_list *rl;
 	struct request *rq;
 
 	rl = blk_get_rl(q, bio);	/* transferred to @rq on success */
 retry:
-	rq = __get_request(rl, rw_flags, bio, gfp_mask);
+	rq = __get_request(rl, op, op_flags, bio, gfp_mask);
 	if (!IS_ERR(rq))
 		return rq;
 
@@ -1260,7 +1266,7 @@ retry:
 	prepare_to_wait_exclusive(&rl->wait[is_sync], &wait,
 				  TASK_UNINTERRUPTIBLE);
 
-	trace_block_sleeprq(q, bio, rw_flags & 1);
+	trace_block_sleeprq(q, bio, op);
 
 	spin_unlock_irq(q->queue_lock);
 	io_schedule();
@@ -1289,7 +1295,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
 	create_io_context(gfp_mask, q->node);
 
 	spin_lock_irq(q->queue_lock);
-	rq = get_request(q, rw, NULL, gfp_mask);
+	rq = get_request(q, rw, 0, NULL, gfp_mask);
 	if (IS_ERR(rq))
 		spin_unlock_irq(q->queue_lock);
 	/* q->queue_lock is unlocked at this point */
@@ -1491,13 +1497,14 @@ void __blk_put_request(struct request_queue *q, struct request *req)
 	 */
 	if (req->cmd_flags & REQ_ALLOCED) {
 		unsigned int flags = req->cmd_flags;
+		int op = req->op;
 		struct request_list *rl = blk_rq_rl(req);
 
 		BUG_ON(!list_empty(&req->queuelist));
 		BUG_ON(ELV_ON_HASH(req));
 
 		blk_free_request(rl, req);
-		freed_request(rl, flags);
+		freed_request(rl, op, flags);
 		blk_put_rl(rl);
 	}
 }
@@ -1713,7 +1720,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
 	const bool sync = !!(bio->bi_rw & REQ_SYNC);
 	struct blk_plug *plug;
-	int el_ret, rw_flags, where = ELEVATOR_INSERT_SORT;
+	int el_ret, rw_flags = 0, where = ELEVATOR_INSERT_SORT;
 	struct request *req;
 	unsigned int request_count = 0;
 
@@ -1773,7 +1780,6 @@ get_rq:
 	 * but we need to set it earlier to expose the sync flag to the
 	 * rq allocator and io schedulers.
 	 */
-	rw_flags = bio_data_dir(bio);
 	if (sync)
 		rw_flags |= REQ_SYNC;
 
@@ -1781,7 +1787,7 @@ get_rq:
 	 * Grab a free request. This is might sleep but can not fail.
 	 * Returns with the queue unlocked.
 	 */
-	req = get_request(q, rw_flags, bio, GFP_NOIO);
+	req = get_request(q, bio_data_dir(bio), rw_flags, bio, GFP_NOIO);
 	if (IS_ERR(req)) {
 		bio->bi_error = PTR_ERR(req);
 		bio_endio(bio);
-- 
2.7.2

  parent reply	other threads:[~2016-04-13 19:48 UTC|newest]

Thread overview: 46+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-04-13 19:35 [PATCH 00/42] v5: separate operations from flags in the bio/request structs mchristi
2016-04-13 19:35 ` [PATCH 01/42] block/fs/drivers: remove rw argument from submit_bio mchristi
2016-04-13 19:35 ` [PATCH 02/42] block: add REQ_OP definitions and bi_op/op fields mchristi
2016-04-13 19:35 ` [PATCH 03/42] block, fs, mm, drivers: set bi_op to REQ_OP mchristi
2016-04-13 19:35 ` [PATCH 04/42] fs: have submit_bh users pass in op and flags separately mchristi
2016-04-13 19:35 ` [PATCH 05/42] fs: have ll_rw_block " mchristi
2016-04-13 19:35 ` [PATCH 06/42] direct-io: set bi_op to REQ_OP mchristi
2016-04-13 19:35 ` [PATCH 07/42] btrfs: have submit_one_bio users setup bio bi_op mchristi
2016-04-13 19:35 ` [PATCH 08/42] btrfs: set bi_op tp REQ_OP mchristi
2016-04-13 19:35 ` [PATCH 09/42] btrfs: update __btrfs_map_block for bi_op transition mchristi
2016-04-13 19:35 ` [PATCH 10/42] btrfs: use bio fields for op and flags mchristi
2016-04-13 19:35 ` [PATCH 11/42] f2fs: set bi_op to REQ_OP mchristi
2016-04-13 19:35 ` [PATCH 12/42] gfs2: " mchristi
2016-04-13 19:35 ` [PATCH 13/42] xfs: " mchristi
2016-04-13 19:36 ` [PATCH 14/42] hfsplus: " mchristi
2016-04-13 19:36 ` [PATCH 15/42] mpage: " mchristi
2016-04-13 19:36 ` [PATCH 16/42] nilfs: " mchristi
2016-04-13 19:36 ` [PATCH 17/42] ocfs2: " mchristi
2016-04-13 19:36 ` [PATCH 18/42] pm: " mchristi
2016-04-13 19:36 ` [PATCH 19/42] dm: " mchristi
2016-04-13 19:36 ` [PATCH 20/42] dm: pass dm stats data dir instead of bi_rw mchristi
2016-04-13 19:36 ` [PATCH 21/42] bcache: set bi_op to REQ_OP mchristi
2016-04-13 19:36 ` [PATCH 22/42] drbd: " mchristi
2016-04-13 19:36 ` [PATCH 23/42] md/raid: " mchristi
2016-04-13 19:36 ` [PATCH 24/42] xen: " mchristi
2016-04-13 19:36 ` [PATCH 25/42] target: " mchristi
2016-04-13 19:36 ` [PATCH 26/42] block: copy bio op to request op mchristi
2016-04-13 19:36 ` mchristi [this message]
2016-04-13 19:36 ` [PATCH 28/42] block: prepare mq request creation to use REQ_OPs mchristi
2016-04-13 19:36 ` [PATCH 29/42] block: prepare elevator " mchristi
2016-04-13 19:36 ` [PATCH 30/42] blkg_rwstat: separate op from flags mchristi
2016-04-13 19:36 ` [PATCH 31/42] block: convert merge/insert code to check for REQ_OPs mchristi
2016-04-13 19:36 ` [PATCH 32/42] block: convert is_sync helpers to use REQ_OPs mchristi
2016-04-13 19:36 ` [PATCH 33/42] block: convert rq_data_dir helper " mchristi
2016-04-13 19:36 ` [PATCH 34/42] drivers: set request op to REQ_OP mchristi
2016-04-13 19:36 ` [PATCH 35/42] blktrace: get op from req->op/bio->bi_op mchristi
2016-04-13 19:36 ` [PATCH 36/42] ide cd: do not set REQ_WRITE on requests mchristi
2016-04-13 19:36 ` [PATCH 37/42] block, fs, drivers: do use bi_rw/cmd_flags for REQ_OPs mchristi
2016-04-13 19:36 ` [PATCH 38/42] block, fs: remove old REQ definitions mchristi
2016-04-13 19:36 ` [PATCH 39/42] block: shrink bio/request fields mchristi
2016-04-13 19:36 ` [PATCH 40/42] block, drivers: add REQ_OP_FLUSH operation mchristi
2016-04-13 19:36 ` [PATCH 41/42] block: use QUEUE flags instead of flush_flags to test for flush support mchristi
2016-04-13 19:36 ` [PATCH 42/42] block, drivers, fs: rename REQ_FLUSH to REQ_PREFLUSH mchristi
2016-04-14  6:09 ` [PATCH 00/42] v5: separate operations from flags in the bio/request structs Hannes Reinecke
2016-04-15 10:39 [PATCH 00/42] v6: " mchristi
2016-04-15 10:39 ` [PATCH 27/42] block: prepare request creation/destruction code to use REQ_OPs mchristi
2016-04-15 19:15 [PATCH 00/42] v7: separate operations from flags in the bio/request structs mchristi
2016-04-15 19:16 ` [PATCH 27/42] block: prepare request creation/destruction code to use REQ_OPs mchristi

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1460576188-5751-28-git-send-email-mchristi@redhat.com \
    --to=mchristi@redhat.com \
    --cc=dm-devel@redhat.com \
    --cc=drbd-dev@lists.linbit.com \
    --cc=konrad.wilk@oracle.com \
    --cc=lars.ellenberg@linbit.com \
    --cc=linux-bcache@vger.kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-ext4@vger.kernel.org \
    --cc=linux-f2fs-devel@lists.sourceforge.net \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mtd@lists.infradead.org \
    --cc=linux-raid@vger.kernel.org \
    --cc=linux-scsi@vger.kernel.org \
    --cc=ocfs2-devel@oss.oracle.com \
    --cc=osd-dev@open-osd.org \
    --cc=philipp.reisner@linbit.com \
    --cc=target-devel@vger.kernel.org \
    --cc=xfs@oss.sgi.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).