All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ming Lei <ming.lei@redhat.com>
To: Jens Axboe <axboe@kernel.dk>
Cc: linux-block@vger.kernel.org,
	Jeffle Xu <jefflexu@linux.alibaba.com>,
	Mike Snitzer <snitzer@redhat.com>,
	dm-devel@redhat.com, Hannes Reinecke <hare@suse.de>,
	Ming Lei <ming.lei@redhat.com>
Subject: [PATCH V5 03/12] block: create io poll context for submission and poll task
Date: Thu,  1 Apr 2021 10:19:18 +0800	[thread overview]
Message-ID: <20210401021927.343727-4-ming.lei@redhat.com> (raw)
In-Reply-To: <20210401021927.343727-1-ming.lei@redhat.com>

Create per-task io poll context for both IO submission and poll task
if the queue is bio based and supports polling.

This io polling context includes two queues:

1) submission queue(sq) for storing HIPRI bio, written by submission task
   and read by poll task.
2) polling queue(pq) for holding data moved from sq, only used in poll
   context for running bio polling.

Following patches will support bio based io polling.

Reviewed-by: Jeffle Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 block/blk-core.c          | 79 +++++++++++++++++++++++++++++++++------
 block/blk-ioc.c           |  1 +
 block/blk-mq.c            | 14 +++++++
 block/blk.h               | 38 +++++++++++++++++++
 include/linux/iocontext.h |  2 +
 5 files changed, 123 insertions(+), 11 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index a31371d55b9d..8a21a8c010a6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -792,6 +792,61 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
 	return BLK_STS_OK;
 }
 
+static inline struct blk_bio_poll_ctx *blk_get_bio_poll_ctx(void)
+{
+	struct io_context *ioc = current->io_context;
+
+	return ioc ? ioc->data : NULL;
+}
+
+static inline unsigned int bio_grp_list_size(unsigned int nr_grps)
+{
+	return sizeof(struct bio_grp_list) + nr_grps *
+		sizeof(struct bio_grp_list_data);
+}
+
+static void bio_poll_ctx_init(struct blk_bio_poll_ctx *pc)
+{
+	pc->sq = (void *)pc + sizeof(*pc);
+	pc->sq->max_nr_grps = BLK_BIO_POLL_SQ_SZ;
+
+	pc->pq = (void *)pc->sq + bio_grp_list_size(BLK_BIO_POLL_SQ_SZ);
+	pc->pq->max_nr_grps = BLK_BIO_POLL_PQ_SZ;
+
+	spin_lock_init(&pc->sq_lock);
+	spin_lock_init(&pc->pq_lock);
+}
+
+void bio_poll_ctx_alloc(struct io_context *ioc)
+{
+	struct blk_bio_poll_ctx *pc;
+	unsigned int size = sizeof(*pc) +
+		bio_grp_list_size(BLK_BIO_POLL_SQ_SZ) +
+		bio_grp_list_size(BLK_BIO_POLL_PQ_SZ);
+
+	pc = kzalloc(GFP_ATOMIC, size);
+	if (pc) {
+		bio_poll_ctx_init(pc);
+		if (cmpxchg(&ioc->data, NULL, (void *)pc))
+			kfree(pc);
+	}
+}
+
+static inline bool blk_queue_support_bio_poll(struct request_queue *q)
+{
+	return !queue_is_mq(q) && blk_queue_poll(q);
+}
+
+static inline void blk_bio_poll_preprocess(struct request_queue *q,
+		struct bio *bio)
+{
+	if (!(bio->bi_opf & REQ_HIPRI))
+		return;
+
+	if (!blk_queue_poll(q) || (!queue_is_mq(q) && !blk_get_bio_poll_ctx()))
+		bio->bi_opf &= ~REQ_HIPRI;
+}
+
 static noinline_for_stack bool submit_bio_checks(struct bio *bio)
 {
 	struct block_device *bdev = bio->bi_bdev;
@@ -836,8 +891,19 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
 		}
 	}
 
-	if (!blk_queue_poll(q))
-		bio->bi_opf &= ~REQ_HIPRI;
+	/*
+	 * Various block parts want %current->io_context, so allocate it up
+	 * front rather than dealing with lots of pain to allocate it only
+	 * where needed. This may fail and the block layer knows how to live
+	 * with it.
+	 */
+	if (unlikely(!current->io_context))
+		create_task_io_context(current, GFP_ATOMIC, q->node);
+
+	if (blk_queue_support_bio_poll(q) && (bio->bi_opf & REQ_HIPRI))
+		blk_create_io_poll_context(q);
+
+	blk_bio_poll_preprocess(q, bio);
 
 	switch (bio_op(bio)) {
 	case REQ_OP_DISCARD:
@@ -876,15 +942,6 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
 		break;
 	}
 
-	/*
-	 * Various block parts want %current->io_context, so allocate it up
-	 * front rather than dealing with lots of pain to allocate it only
-	 * where needed. This may fail and the block layer knows how to live
-	 * with it.
-	 */
-	if (unlikely(!current->io_context))
-		create_task_io_context(current, GFP_ATOMIC, q->node);
-
 	if (blk_throtl_bio(bio)) {
 		blkcg_bio_issue_init(bio);
 		return false;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index b0cde18c4b8c..5574c398eff6 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -19,6 +19,7 @@ static struct kmem_cache *iocontext_cachep;
 
 static inline void free_io_context(struct io_context *ioc)
 {
+	kfree(ioc->data);
 	kmem_cache_free(iocontext_cachep, ioc);
 }
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 63c81df3b8b5..1ada2c0e76b1 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3852,6 +3852,17 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
 	return blk_mq_poll_hybrid_sleep(q, rq);
 }
 
+static int blk_bio_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
+{
+	/*
+	 * Create poll queue for storing poll bio and its cookie from
+	 * submission queue
+	 */
+	blk_create_io_poll_context(q);
+
+	return 0;
+}
+
 /**
  * blk_poll - poll for IO completions
  * @q:  the queue
@@ -3875,6 +3886,9 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
 	if (current->plug)
 		blk_flush_plug_list(current->plug, false);
 
+	if (!queue_is_mq(q))
+		return blk_bio_poll(q, cookie, spin);
+
 	hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
 
 	/*
diff --git a/block/blk.h b/block/blk.h
index 3b53e44b967e..35901cee709d 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -357,4 +357,42 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
 		struct page *page, unsigned int len, unsigned int offset,
 		unsigned int max_sectors, bool *same_page);
 
+/* Grouping bios that share same data into one list */
+struct bio_grp_list_data {
+	void *grp_data;
+
+	/* all bios in this list share same 'grp_data' */
+	struct bio_list list;
+};
+
+struct bio_grp_list {
+	unsigned int max_nr_grps, nr_grps;
+	struct bio_grp_list_data head[0];
+};
+
+struct blk_bio_poll_ctx {
+	spinlock_t sq_lock;
+	struct bio_grp_list *sq;
+
+	spinlock_t pq_lock;
+	struct bio_grp_list *pq;
+};
+
+#define BLK_BIO_POLL_SQ_SZ		16U
+#define BLK_BIO_POLL_PQ_SZ		(BLK_BIO_POLL_SQ_SZ * 2)
+
+void bio_poll_ctx_alloc(struct io_context *ioc);
+
+static inline void blk_create_io_poll_context(struct request_queue *q)
+{
+	struct io_context *ioc;
+
+	if (unlikely(!current->io_context))
+		create_task_io_context(current, GFP_ATOMIC, q->node);
+
+	ioc = current->io_context;
+	if (unlikely(ioc && !ioc->data))
+		bio_poll_ctx_alloc(ioc);
+}
+
 #endif /* BLK_INTERNAL_H */
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 0a9dc40b7be8..f9a467571356 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -110,6 +110,8 @@ struct io_context {
 	struct io_cq __rcu	*icq_hint;
 	struct hlist_head	icq_list;
 
+	void			*data;
+
 	struct work_struct release_work;
 };
 
-- 
2.29.2


WARNING: multiple messages have this Message-ID (diff)
From: Ming Lei <ming.lei@redhat.com>
To: Jens Axboe <axboe@kernel.dk>
Cc: Mike Snitzer <snitzer@redhat.com>, Ming Lei <ming.lei@redhat.com>,
	linux-block@vger.kernel.org, dm-devel@redhat.com,
	Jeffle Xu <jefflexu@linux.alibaba.com>
Subject: [dm-devel] [PATCH V5 03/12] block: create io poll context for submission and poll task
Date: Thu,  1 Apr 2021 10:19:18 +0800	[thread overview]
Message-ID: <20210401021927.343727-4-ming.lei@redhat.com> (raw)
In-Reply-To: <20210401021927.343727-1-ming.lei@redhat.com>

Create per-task io poll context for both IO submission and poll task
if the queue is bio based and supports polling.

This io polling context includes two queues:

1) submission queue(sq) for storing HIPRI bio, written by submission task
   and read by poll task.
2) polling queue(pq) for holding data moved from sq, only used in poll
   context for running bio polling.

Following patches will support bio based io polling.

Reviewed-by: Jeffle Xu <jefflexu@linux.alibaba.com>
Reviewed-by: Hannes Reinecke <hare@suse.de>
Signed-off-by: Ming Lei <ming.lei@redhat.com>
---
 block/blk-core.c          | 79 +++++++++++++++++++++++++++++++++------
 block/blk-ioc.c           |  1 +
 block/blk-mq.c            | 14 +++++++
 block/blk.h               | 38 +++++++++++++++++++
 include/linux/iocontext.h |  2 +
 5 files changed, 123 insertions(+), 11 deletions(-)

diff --git a/block/blk-core.c b/block/blk-core.c
index a31371d55b9d..8a21a8c010a6 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -792,6 +792,61 @@ static inline blk_status_t blk_check_zone_append(struct request_queue *q,
 	return BLK_STS_OK;
 }
 
+static inline struct blk_bio_poll_ctx *blk_get_bio_poll_ctx(void)
+{
+	struct io_context *ioc = current->io_context;
+
+	return ioc ? ioc->data : NULL;
+}
+
+static inline unsigned int bio_grp_list_size(unsigned int nr_grps)
+{
+	return sizeof(struct bio_grp_list) + nr_grps *
+		sizeof(struct bio_grp_list_data);
+}
+
+static void bio_poll_ctx_init(struct blk_bio_poll_ctx *pc)
+{
+	pc->sq = (void *)pc + sizeof(*pc);
+	pc->sq->max_nr_grps = BLK_BIO_POLL_SQ_SZ;
+
+	pc->pq = (void *)pc->sq + bio_grp_list_size(BLK_BIO_POLL_SQ_SZ);
+	pc->pq->max_nr_grps = BLK_BIO_POLL_PQ_SZ;
+
+	spin_lock_init(&pc->sq_lock);
+	spin_lock_init(&pc->pq_lock);
+}
+
+void bio_poll_ctx_alloc(struct io_context *ioc)
+{
+	struct blk_bio_poll_ctx *pc;
+	unsigned int size = sizeof(*pc) +
+		bio_grp_list_size(BLK_BIO_POLL_SQ_SZ) +
+		bio_grp_list_size(BLK_BIO_POLL_PQ_SZ);
+
+	pc = kzalloc(GFP_ATOMIC, size);
+	if (pc) {
+		bio_poll_ctx_init(pc);
+		if (cmpxchg(&ioc->data, NULL, (void *)pc))
+			kfree(pc);
+	}
+}
+
+static inline bool blk_queue_support_bio_poll(struct request_queue *q)
+{
+	return !queue_is_mq(q) && blk_queue_poll(q);
+}
+
+static inline void blk_bio_poll_preprocess(struct request_queue *q,
+		struct bio *bio)
+{
+	if (!(bio->bi_opf & REQ_HIPRI))
+		return;
+
+	if (!blk_queue_poll(q) || (!queue_is_mq(q) && !blk_get_bio_poll_ctx()))
+		bio->bi_opf &= ~REQ_HIPRI;
+}
+
 static noinline_for_stack bool submit_bio_checks(struct bio *bio)
 {
 	struct block_device *bdev = bio->bi_bdev;
@@ -836,8 +891,19 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
 		}
 	}
 
-	if (!blk_queue_poll(q))
-		bio->bi_opf &= ~REQ_HIPRI;
+	/*
+	 * Various block parts want %current->io_context, so allocate it up
+	 * front rather than dealing with lots of pain to allocate it only
+	 * where needed. This may fail and the block layer knows how to live
+	 * with it.
+	 */
+	if (unlikely(!current->io_context))
+		create_task_io_context(current, GFP_ATOMIC, q->node);
+
+	if (blk_queue_support_bio_poll(q) && (bio->bi_opf & REQ_HIPRI))
+		blk_create_io_poll_context(q);
+
+	blk_bio_poll_preprocess(q, bio);
 
 	switch (bio_op(bio)) {
 	case REQ_OP_DISCARD:
@@ -876,15 +942,6 @@ static noinline_for_stack bool submit_bio_checks(struct bio *bio)
 		break;
 	}
 
-	/*
-	 * Various block parts want %current->io_context, so allocate it up
-	 * front rather than dealing with lots of pain to allocate it only
-	 * where needed. This may fail and the block layer knows how to live
-	 * with it.
-	 */
-	if (unlikely(!current->io_context))
-		create_task_io_context(current, GFP_ATOMIC, q->node);
-
 	if (blk_throtl_bio(bio)) {
 		blkcg_bio_issue_init(bio);
 		return false;
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index b0cde18c4b8c..5574c398eff6 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -19,6 +19,7 @@ static struct kmem_cache *iocontext_cachep;
 
 static inline void free_io_context(struct io_context *ioc)
 {
+	kfree(ioc->data);
 	kmem_cache_free(iocontext_cachep, ioc);
 }
 
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 63c81df3b8b5..1ada2c0e76b1 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -3852,6 +3852,17 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
 	return blk_mq_poll_hybrid_sleep(q, rq);
 }
 
+static int blk_bio_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
+{
+	/*
+	 * Create poll queue for storing poll bio and its cookie from
+	 * submission queue
+	 */
+	blk_create_io_poll_context(q);
+
+	return 0;
+}
+
 /**
  * blk_poll - poll for IO completions
  * @q:  the queue
@@ -3875,6 +3886,9 @@ int blk_poll(struct request_queue *q, blk_qc_t cookie, bool spin)
 	if (current->plug)
 		blk_flush_plug_list(current->plug, false);
 
+	if (!queue_is_mq(q))
+		return blk_bio_poll(q, cookie, spin);
+
 	hctx = q->queue_hw_ctx[blk_qc_t_to_queue_num(cookie)];
 
 	/*
diff --git a/block/blk.h b/block/blk.h
index 3b53e44b967e..35901cee709d 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -357,4 +357,42 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
 		struct page *page, unsigned int len, unsigned int offset,
 		unsigned int max_sectors, bool *same_page);
 
+/* Grouping bios that share same data into one list */
+struct bio_grp_list_data {
+	void *grp_data;
+
+	/* all bios in this list share same 'grp_data' */
+	struct bio_list list;
+};
+
+struct bio_grp_list {
+	unsigned int max_nr_grps, nr_grps;
+	struct bio_grp_list_data head[0];
+};
+
+struct blk_bio_poll_ctx {
+	spinlock_t sq_lock;
+	struct bio_grp_list *sq;
+
+	spinlock_t pq_lock;
+	struct bio_grp_list *pq;
+};
+
+#define BLK_BIO_POLL_SQ_SZ		16U
+#define BLK_BIO_POLL_PQ_SZ		(BLK_BIO_POLL_SQ_SZ * 2)
+
+void bio_poll_ctx_alloc(struct io_context *ioc);
+
+static inline void blk_create_io_poll_context(struct request_queue *q)
+{
+	struct io_context *ioc;
+
+	if (unlikely(!current->io_context))
+		create_task_io_context(current, GFP_ATOMIC, q->node);
+
+	ioc = current->io_context;
+	if (unlikely(ioc && !ioc->data))
+		bio_poll_ctx_alloc(ioc);
+}
+
 #endif /* BLK_INTERNAL_H */
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 0a9dc40b7be8..f9a467571356 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -110,6 +110,8 @@ struct io_context {
 	struct io_cq __rcu	*icq_hint;
 	struct hlist_head	icq_list;
 
+	void			*data;
+
 	struct work_struct release_work;
 };
 
-- 
2.29.2

--
dm-devel mailing list
dm-devel@redhat.com
https://listman.redhat.com/mailman/listinfo/dm-devel


  parent reply	other threads:[~2021-04-01  2:21 UTC|newest]

Thread overview: 92+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-04-01  2:19 [PATCH V5 00/12] block: support bio based io polling Ming Lei
2021-04-01  2:19 ` [dm-devel] " Ming Lei
2021-04-01  2:19 ` [PATCH V5 01/12] block: add helper of blk_queue_poll Ming Lei
2021-04-01  2:19   ` [dm-devel] " Ming Lei
2021-04-01  2:19 ` [PATCH V5 02/12] block: add one helper to free io_context Ming Lei
2021-04-01  2:19   ` [dm-devel] " Ming Lei
2021-04-01  2:19 ` Ming Lei [this message]
2021-04-01  2:19   ` [dm-devel] [PATCH V5 03/12] block: create io poll context for submission and poll task Ming Lei
2021-04-12 10:19   ` Christoph Hellwig
2021-04-12 10:19     ` [dm-devel] " Christoph Hellwig
2021-04-01  2:19 ` [PATCH V5 04/12] block: add req flag of REQ_POLL_CTX Ming Lei
2021-04-01  2:19   ` [dm-devel] " Ming Lei
2021-04-01  2:19 ` [PATCH V5 05/12] block: add new field into 'struct bvec_iter' Ming Lei
2021-04-01  2:19   ` [dm-devel] " Ming Lei
2021-04-12  9:26   ` Christoph Hellwig
2021-04-12  9:26     ` [dm-devel] " Christoph Hellwig
2021-04-13  9:36     ` Ming Lei
2021-04-13  9:36       ` [dm-devel] " Ming Lei
2021-04-01  2:19 ` [PATCH V5 06/12] block/mq: extract one helper function polling hw queue Ming Lei
2021-04-01  2:19   ` [dm-devel] " Ming Lei
2021-04-12  9:29   ` Christoph Hellwig
2021-04-12  9:29     ` [dm-devel] " Christoph Hellwig
2021-04-01  2:19 ` [PATCH V5 07/12] block: prepare for supporting bio_list via other link Ming Lei
2021-04-01  2:19   ` [dm-devel] " Ming Lei
2021-04-12 10:18   ` Christoph Hellwig
2021-04-12 10:18     ` [dm-devel] " Christoph Hellwig
2021-04-12 11:37     ` Ming Lei
2021-04-12 11:37       ` [dm-devel] " Ming Lei
2021-04-01  2:19 ` [PATCH V5 08/12] block: use per-task poll context to implement bio based io polling Ming Lei
2021-04-01  2:19   ` [dm-devel] " Ming Lei
2021-04-12  9:54   ` Christoph Hellwig
2021-04-12  9:54     ` [dm-devel] " Christoph Hellwig
2021-04-12 10:20     ` Ming Lei
2021-04-12 10:20       ` [dm-devel] " Ming Lei
2021-04-12 10:29       ` Christoph Hellwig
2021-04-12 10:29         ` [dm-devel] " Christoph Hellwig
2021-04-12 11:42         ` Ming Lei
2021-04-12 11:42           ` [dm-devel] " Ming Lei
2021-04-12 10:16   ` Christoph Hellwig
2021-04-12 10:16     ` [dm-devel] " Christoph Hellwig
2021-04-12 10:37     ` Ming Lei
2021-04-12 10:37       ` [dm-devel] " Ming Lei
2021-04-01  2:19 ` [PATCH V5 09/12] blk-mq: limit hw queues to be polled in each blk_poll() Ming Lei
2021-04-01  2:19   ` [dm-devel] " Ming Lei
2021-04-01  2:19 ` [PATCH V5 10/12] block: add queue_to_disk() to get gendisk from request_queue Ming Lei
2021-04-01  2:19   ` [dm-devel] " Ming Lei
2021-04-12 12:52   ` Jens Axboe
2021-04-12 12:52     ` [dm-devel] " Jens Axboe
2021-04-01  2:19 ` [PATCH V5 11/12] block: add poll_capable method to support bio-based IO polling Ming Lei
2021-04-01  2:19   ` [dm-devel] " Ming Lei
2021-04-12  9:38   ` Christoph Hellwig
2021-04-12  9:38     ` [dm-devel] " Christoph Hellwig
2021-04-14  8:38     ` JeffleXu
2021-04-14  8:38       ` [dm-devel] " JeffleXu
2021-04-14 11:24       ` Ming Lei
2021-04-14 11:24         ` [dm-devel] " Ming Lei
2021-04-15  1:34         ` JeffleXu
2021-04-15  1:34           ` [dm-devel] " JeffleXu
2021-04-15  7:43           ` Ming Lei
2021-04-15  7:43             ` [dm-devel] " Ming Lei
2021-04-15  9:21             ` JeffleXu
2021-04-15  9:21               ` [dm-devel] " JeffleXu
2021-04-15 10:06               ` Ming Lei
2021-04-15 10:06                 ` [dm-devel] " Ming Lei
2021-04-15 11:21                 ` JeffleXu
2021-04-15 11:21                   ` [dm-devel] " JeffleXu
2021-04-15 13:08                   ` Ming Lei
2021-04-15 13:08                     ` [dm-devel] " Ming Lei
2021-04-16  8:00   ` [PATCH] block: introduce QUEUE_FLAG_POLL_CAP flag Jeffle Xu
2021-04-16  8:00     ` [dm-devel] " Jeffle Xu
2021-04-16  8:42     ` JeffleXu
2021-04-16  8:42       ` [dm-devel] " JeffleXu
2021-04-16  9:07     ` Ming Lei
2021-04-16  9:07       ` [dm-devel] " Ming Lei
2021-04-16 10:20       ` JeffleXu
2021-04-16 10:20         ` [dm-devel] " JeffleXu
2021-04-17 14:06       ` JeffleXu
2021-04-17 14:06         ` [dm-devel] " JeffleXu
2021-04-19  2:21         ` Ming Lei
2021-04-19  2:21           ` [dm-devel] " Ming Lei
2021-04-19  5:40           ` JeffleXu
2021-04-19  5:40             ` [dm-devel] " JeffleXu
2021-04-19 13:36             ` Ming Lei
2021-04-19 13:36               ` [dm-devel] " Ming Lei
2021-04-20  7:25               ` JeffleXu
2021-04-20  7:25                 ` [dm-devel] " JeffleXu
2021-04-01  2:19 ` [PATCH V5 12/12] dm: support IO polling for bio-based dm device Ming Lei
2021-04-01  2:19   ` [dm-devel] " Ming Lei
2021-04-09 15:39 ` [PATCH V5 00/12] block: support bio based io polling Ming Lei
2021-04-09 15:39   ` [dm-devel] " Ming Lei
2021-04-12  9:46 ` Christoph Hellwig
2021-04-12  9:46   ` [dm-devel] " Christoph Hellwig

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210401021927.343727-4-ming.lei@redhat.com \
    --to=ming.lei@redhat.com \
    --cc=axboe@kernel.dk \
    --cc=dm-devel@redhat.com \
    --cc=hare@suse.de \
    --cc=jefflexu@linux.alibaba.com \
    --cc=linux-block@vger.kernel.org \
    --cc=snitzer@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.