All of lore.kernel.org
 help / color / mirror / Atom feed
From: Javier Gonzalez <javier@cnexlabs.com>
To: Kent Overstreet <kent.overstreet@gmail.com>
Cc: "linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	"linux-block@vger.kernel.org" <linux-block@vger.kernel.org>,
	Jens Axboe <axboe@kernel.dk>,
	Christoph Hellwig <hch@infradead.org>,
	"colyli@suse.de" <colyli@suse.de>,
	"snitzer@redhat.com" <snitzer@redhat.com>,
	"darrick.wong@oracle.com" <darrick.wong@oracle.com>,
	"clm@fb.com" <clm@fb.com>, "bacik@fb.com" <bacik@fb.com>,
	"linux-xfs@vger.kernel.org" <linux-xfs@vger.kernel.org>,
	"drbd-dev@lists.linbit.com" <drbd-dev@lists.linbit.com>,
	"linux-btrfs@vger.kernel.org" <linux-btrfs@vger.kernel.org>,
	"linux-raid@vger.kernel.org" <linux-raid@vger.kernel.org>,
	NeilBrown <neilb@suse.com>
Subject: Re: [PATCH 04/12] lightnvm: convert to bioset_init()/mempool_init()
Date: Tue, 22 May 2018 10:10:13 +0000	[thread overview]
Message-ID: <84EF687A-078F-4807-A667-39A8A91D48D0@cnexlabs.com> (raw)
In-Reply-To: <20180520222558.7053-5-kent.overstreet@gmail.com>

[-- Attachment #1: Type: text/plain, Size: 13221 bytes --]

> On 21 May 2018, at 00.25, Kent Overstreet <kent.overstreet@gmail.com> wrote:
> 
> Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
> ---
> drivers/lightnvm/pblk-core.c     | 30 ++++++-------
> drivers/lightnvm/pblk-init.c     | 72 ++++++++++++++++----------------
> drivers/lightnvm/pblk-read.c     |  4 +-
> drivers/lightnvm/pblk-recovery.c |  2 +-
> drivers/lightnvm/pblk-write.c    |  8 ++--
> drivers/lightnvm/pblk.h          | 14 +++----
> 6 files changed, 65 insertions(+), 65 deletions(-)
> 
> diff --git a/drivers/lightnvm/pblk-core.c b/drivers/lightnvm/pblk-core.c
> index 94d5d97c9d..934341b104 100644
> --- a/drivers/lightnvm/pblk-core.c
> +++ b/drivers/lightnvm/pblk-core.c
> @@ -40,7 +40,7 @@ static void pblk_line_mark_bb(struct work_struct *work)
> 	}
> 
> 	kfree(ppa);
> -	mempool_free(line_ws, pblk->gen_ws_pool);
> +	mempool_free(line_ws, &pblk->gen_ws_pool);
> }
> 
> static void pblk_mark_bb(struct pblk *pblk, struct pblk_line *line,
> @@ -102,7 +102,7 @@ static void pblk_end_io_erase(struct nvm_rq *rqd)
> 	struct pblk *pblk = rqd->private;
> 
> 	__pblk_end_io_erase(pblk, rqd);
> -	mempool_free(rqd, pblk->e_rq_pool);
> +	mempool_free(rqd, &pblk->e_rq_pool);
> }
> 
> /*
> @@ -237,15 +237,15 @@ struct nvm_rq *pblk_alloc_rqd(struct pblk *pblk, int type)
> 	switch (type) {
> 	case PBLK_WRITE:
> 	case PBLK_WRITE_INT:
> -		pool = pblk->w_rq_pool;
> +		pool = &pblk->w_rq_pool;
> 		rq_size = pblk_w_rq_size;
> 		break;
> 	case PBLK_READ:
> -		pool = pblk->r_rq_pool;
> +		pool = &pblk->r_rq_pool;
> 		rq_size = pblk_g_rq_size;
> 		break;
> 	default:
> -		pool = pblk->e_rq_pool;
> +		pool = &pblk->e_rq_pool;
> 		rq_size = pblk_g_rq_size;
> 	}
> 
> @@ -265,13 +265,13 @@ void pblk_free_rqd(struct pblk *pblk, struct nvm_rq *rqd, int type)
> 	case PBLK_WRITE:
> 		kfree(((struct pblk_c_ctx *)nvm_rq_to_pdu(rqd))->lun_bitmap);
> 	case PBLK_WRITE_INT:
> -		pool = pblk->w_rq_pool;
> +		pool = &pblk->w_rq_pool;
> 		break;
> 	case PBLK_READ:
> -		pool = pblk->r_rq_pool;
> +		pool = &pblk->r_rq_pool;
> 		break;
> 	case PBLK_ERASE:
> -		pool = pblk->e_rq_pool;
> +		pool = &pblk->e_rq_pool;
> 		break;
> 	default:
> 		pr_err("pblk: trying to free unknown rqd type\n");
> @@ -292,7 +292,7 @@ void pblk_bio_free_pages(struct pblk *pblk, struct bio *bio, int off,
> 
> 	for (i = off; i < nr_pages + off; i++) {
> 		bv = bio->bi_io_vec[i];
> -		mempool_free(bv.bv_page, pblk->page_bio_pool);
> +		mempool_free(bv.bv_page, &pblk->page_bio_pool);
> 	}
> }
> 
> @@ -304,12 +304,12 @@ int pblk_bio_add_pages(struct pblk *pblk, struct bio *bio, gfp_t flags,
> 	int i, ret;
> 
> 	for (i = 0; i < nr_pages; i++) {
> -		page = mempool_alloc(pblk->page_bio_pool, flags);
> +		page = mempool_alloc(&pblk->page_bio_pool, flags);
> 
> 		ret = bio_add_pc_page(q, bio, page, PBLK_EXPOSED_PAGE_SIZE, 0);
> 		if (ret != PBLK_EXPOSED_PAGE_SIZE) {
> 			pr_err("pblk: could not add page to bio\n");
> -			mempool_free(page, pblk->page_bio_pool);
> +			mempool_free(page, &pblk->page_bio_pool);
> 			goto err;
> 		}
> 	}
> @@ -1593,7 +1593,7 @@ static void pblk_line_put_ws(struct work_struct *work)
> 	struct pblk_line *line = line_put_ws->line;
> 
> 	__pblk_line_put(pblk, line);
> -	mempool_free(line_put_ws, pblk->gen_ws_pool);
> +	mempool_free(line_put_ws, &pblk->gen_ws_pool);
> }
> 
> void pblk_line_put(struct kref *ref)
> @@ -1610,7 +1610,7 @@ void pblk_line_put_wq(struct kref *ref)
> 	struct pblk *pblk = line->pblk;
> 	struct pblk_line_ws *line_put_ws;
> 
> -	line_put_ws = mempool_alloc(pblk->gen_ws_pool, GFP_ATOMIC);
> +	line_put_ws = mempool_alloc(&pblk->gen_ws_pool, GFP_ATOMIC);
> 	if (!line_put_ws)
> 		return;
> 
> @@ -1752,7 +1752,7 @@ void pblk_line_close_ws(struct work_struct *work)
> 	struct pblk_line *line = line_ws->line;
> 
> 	pblk_line_close(pblk, line);
> -	mempool_free(line_ws, pblk->gen_ws_pool);
> +	mempool_free(line_ws, &pblk->gen_ws_pool);
> }
> 
> void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
> @@ -1761,7 +1761,7 @@ void pblk_gen_run_ws(struct pblk *pblk, struct pblk_line *line, void *priv,
> {
> 	struct pblk_line_ws *line_ws;
> 
> -	line_ws = mempool_alloc(pblk->gen_ws_pool, gfp_mask);
> +	line_ws = mempool_alloc(&pblk->gen_ws_pool, gfp_mask);
> 
> 	line_ws->pblk = pblk;
> 	line_ws->line = line;
> diff --git a/drivers/lightnvm/pblk-init.c b/drivers/lightnvm/pblk-init.c
> index 91a5bc2556..9a984abd3d 100644
> --- a/drivers/lightnvm/pblk-init.c
> +++ b/drivers/lightnvm/pblk-init.c
> @@ -23,7 +23,7 @@
> static struct kmem_cache *pblk_ws_cache, *pblk_rec_cache, *pblk_g_rq_cache,
> 				*pblk_w_rq_cache;
> static DECLARE_RWSEM(pblk_lock);
> -struct bio_set *pblk_bio_set;
> +struct bio_set pblk_bio_set;
> 
> static int pblk_rw_io(struct request_queue *q, struct pblk *pblk,
> 			  struct bio *bio)
> @@ -341,7 +341,7 @@ static int pblk_core_init(struct pblk *pblk)
> {
> 	struct nvm_tgt_dev *dev = pblk->dev;
> 	struct nvm_geo *geo = &dev->geo;
> -	int max_write_ppas;
> +	int ret, max_write_ppas;
> 
> 	atomic64_set(&pblk->user_wa, 0);
> 	atomic64_set(&pblk->pad_wa, 0);
> @@ -375,33 +375,33 @@ static int pblk_core_init(struct pblk *pblk)
> 		goto fail_free_pad_dist;
> 
> 	/* Internal bios can be at most the sectors signaled by the device. */
> -	pblk->page_bio_pool = mempool_create_page_pool(NVM_MAX_VLBA, 0);
> -	if (!pblk->page_bio_pool)
> +	ret = mempool_init_page_pool(&pblk->page_bio_pool, NVM_MAX_VLBA, 0);
> +	if (ret)
> 		goto free_global_caches;
> 
> -	pblk->gen_ws_pool = mempool_create_slab_pool(PBLK_GEN_WS_POOL_SIZE,
> -							pblk_ws_cache);
> -	if (!pblk->gen_ws_pool)
> +	ret = mempool_init_slab_pool(&pblk->gen_ws_pool, PBLK_GEN_WS_POOL_SIZE,
> +				     pblk_ws_cache);
> +	if (ret)
> 		goto free_page_bio_pool;
> 
> -	pblk->rec_pool = mempool_create_slab_pool(geo->all_luns,
> -							pblk_rec_cache);
> -	if (!pblk->rec_pool)
> +	ret = mempool_init_slab_pool(&pblk->rec_pool, geo->all_luns,
> +				     pblk_rec_cache);
> +	if (ret)
> 		goto free_gen_ws_pool;
> 
> -	pblk->r_rq_pool = mempool_create_slab_pool(geo->all_luns,
> -							pblk_g_rq_cache);
> -	if (!pblk->r_rq_pool)
> +	ret = mempool_init_slab_pool(&pblk->r_rq_pool, geo->all_luns,
> +				     pblk_g_rq_cache);
> +	if (ret)
> 		goto free_rec_pool;
> 
> -	pblk->e_rq_pool = mempool_create_slab_pool(geo->all_luns,
> -							pblk_g_rq_cache);
> -	if (!pblk->e_rq_pool)
> +	ret = mempool_init_slab_pool(&pblk->e_rq_pool, geo->all_luns,
> +				     pblk_g_rq_cache);
> +	if (ret)
> 		goto free_r_rq_pool;
> 
> -	pblk->w_rq_pool = mempool_create_slab_pool(geo->all_luns,
> -							pblk_w_rq_cache);
> -	if (!pblk->w_rq_pool)
> +	ret = mempool_init_slab_pool(&pblk->w_rq_pool, geo->all_luns,
> +				     pblk_w_rq_cache);
> +	if (ret)
> 		goto free_e_rq_pool;
> 
> 	pblk->close_wq = alloc_workqueue("pblk-close-wq",
> @@ -433,17 +433,17 @@ static int pblk_core_init(struct pblk *pblk)
> free_close_wq:
> 	destroy_workqueue(pblk->close_wq);
> free_w_rq_pool:
> -	mempool_destroy(pblk->w_rq_pool);
> +	mempool_exit(&pblk->w_rq_pool);
> free_e_rq_pool:
> -	mempool_destroy(pblk->e_rq_pool);
> +	mempool_exit(&pblk->e_rq_pool);
> free_r_rq_pool:
> -	mempool_destroy(pblk->r_rq_pool);
> +	mempool_exit(&pblk->r_rq_pool);
> free_rec_pool:
> -	mempool_destroy(pblk->rec_pool);
> +	mempool_exit(&pblk->rec_pool);
> free_gen_ws_pool:
> -	mempool_destroy(pblk->gen_ws_pool);
> +	mempool_exit(&pblk->gen_ws_pool);
> free_page_bio_pool:
> -	mempool_destroy(pblk->page_bio_pool);
> +	mempool_exit(&pblk->page_bio_pool);
> free_global_caches:
> 	pblk_free_global_caches(pblk);
> fail_free_pad_dist:
> @@ -462,12 +462,12 @@ static void pblk_core_free(struct pblk *pblk)
> 	if (pblk->bb_wq)
> 		destroy_workqueue(pblk->bb_wq);
> 
> -	mempool_destroy(pblk->page_bio_pool);
> -	mempool_destroy(pblk->gen_ws_pool);
> -	mempool_destroy(pblk->rec_pool);
> -	mempool_destroy(pblk->r_rq_pool);
> -	mempool_destroy(pblk->e_rq_pool);
> -	mempool_destroy(pblk->w_rq_pool);
> +	mempool_exit(&pblk->page_bio_pool);
> +	mempool_exit(&pblk->gen_ws_pool);
> +	mempool_exit(&pblk->rec_pool);
> +	mempool_exit(&pblk->r_rq_pool);
> +	mempool_exit(&pblk->e_rq_pool);
> +	mempool_exit(&pblk->w_rq_pool);
> 
> 	pblk_free_global_caches(pblk);
> 	kfree(pblk->pad_dist);
> @@ -1297,18 +1297,18 @@ static int __init pblk_module_init(void)
> {
> 	int ret;
> 
> -	pblk_bio_set = bioset_create(BIO_POOL_SIZE, 0, 0);
> -	if (!pblk_bio_set)
> -		return -ENOMEM;
> +	ret = bioset_init(&pblk_bio_set, BIO_POOL_SIZE, 0, 0);
> +	if (ret)
> +		return ret;
> 	ret = nvm_register_tgt_type(&tt_pblk);
> 	if (ret)
> -		bioset_free(pblk_bio_set);
> +		bioset_exit(&pblk_bio_set);
> 	return ret;
> }
> 
> static void pblk_module_exit(void)
> {
> -	bioset_free(pblk_bio_set);
> +	bioset_exit(&pblk_bio_set);
> 	nvm_unregister_tgt_type(&tt_pblk);
> }
> 
> diff --git a/drivers/lightnvm/pblk-read.c b/drivers/lightnvm/pblk-read.c
> index 9eee10f69d..c844ffb6ae 100644
> --- a/drivers/lightnvm/pblk-read.c
> +++ b/drivers/lightnvm/pblk-read.c
> @@ -294,7 +294,7 @@ static int pblk_partial_read_bio(struct pblk *pblk, struct nvm_rq *rqd,
> 		kunmap_atomic(src_p);
> 		kunmap_atomic(dst_p);
> 
> -		mempool_free(src_bv.bv_page, pblk->page_bio_pool);
> +		mempool_free(src_bv.bv_page, &pblk->page_bio_pool);
> 
> 		hole = find_next_zero_bit(read_bitmap, nr_secs, hole + 1);
> 	} while (hole < nr_secs);
> @@ -429,7 +429,7 @@ int pblk_submit_read(struct pblk *pblk, struct bio *bio)
> 		struct bio *int_bio = NULL;
> 
> 		/* Clone read bio to deal with read errors internally */
> -		int_bio = bio_clone_fast(bio, GFP_KERNEL, pblk_bio_set);
> +		int_bio = bio_clone_fast(bio, GFP_KERNEL, &pblk_bio_set);
> 		if (!int_bio) {
> 			pr_err("pblk: could not clone read bio\n");
> 			goto fail_end_io;
> diff --git a/drivers/lightnvm/pblk-recovery.c b/drivers/lightnvm/pblk-recovery.c
> index 3e079c2afa..364ad52a5b 100644
> --- a/drivers/lightnvm/pblk-recovery.c
> +++ b/drivers/lightnvm/pblk-recovery.c
> @@ -60,7 +60,7 @@ void pblk_submit_rec(struct work_struct *work)
> 		goto err;
> 	}
> 
> -	mempool_free(recovery, pblk->rec_pool);
> +	mempool_free(recovery, &pblk->rec_pool);
> 	return;
> 
> err:
> diff --git a/drivers/lightnvm/pblk-write.c b/drivers/lightnvm/pblk-write.c
> index 3e6f1ebd74..aef7fa2d40 100644
> --- a/drivers/lightnvm/pblk-write.c
> +++ b/drivers/lightnvm/pblk-write.c
> @@ -122,7 +122,7 @@ static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
> 	if (unlikely(nr_ppas == 1))
> 		ppa_list = &rqd->ppa_addr;
> 
> -	recovery = mempool_alloc(pblk->rec_pool, GFP_ATOMIC);
> +	recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
> 
> 	INIT_LIST_HEAD(&recovery->failed);
> 
> @@ -134,7 +134,7 @@ static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
> 		/* Logic error */
> 		if (bit > c_ctx->nr_valid) {
> 			WARN_ONCE(1, "pblk: corrupted write request\n");
> -			mempool_free(recovery, pblk->rec_pool);
> +			mempool_free(recovery, &pblk->rec_pool);
> 			goto out;
> 		}
> 
> @@ -142,7 +142,7 @@ static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
> 		entry = pblk_rb_sync_scan_entry(&pblk->rwb, &ppa);
> 		if (!entry) {
> 			pr_err("pblk: could not scan entry on write failure\n");
> -			mempool_free(recovery, pblk->rec_pool);
> +			mempool_free(recovery, &pblk->rec_pool);
> 			goto out;
> 		}
> 
> @@ -156,7 +156,7 @@ static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
> 	ret = pblk_recov_setup_rq(pblk, c_ctx, recovery, comp_bits, c_entries);
> 	if (ret) {
> 		pr_err("pblk: could not recover from write failure\n");
> -		mempool_free(recovery, pblk->rec_pool);
> +		mempool_free(recovery, &pblk->rec_pool);
> 		goto out;
> 	}
> 
> diff --git a/drivers/lightnvm/pblk.h b/drivers/lightnvm/pblk.h
> index 9c682acfc5..feafa4de26 100644
> --- a/drivers/lightnvm/pblk.h
> +++ b/drivers/lightnvm/pblk.h
> @@ -664,12 +664,12 @@ struct pblk {
> 
> 	struct list_head compl_list;
> 
> -	mempool_t *page_bio_pool;
> -	mempool_t *gen_ws_pool;
> -	mempool_t *rec_pool;
> -	mempool_t *r_rq_pool;
> -	mempool_t *w_rq_pool;
> -	mempool_t *e_rq_pool;
> +	mempool_t page_bio_pool;
> +	mempool_t gen_ws_pool;
> +	mempool_t rec_pool;
> +	mempool_t r_rq_pool;
> +	mempool_t w_rq_pool;
> +	mempool_t e_rq_pool;
> 
> 	struct workqueue_struct *close_wq;
> 	struct workqueue_struct *bb_wq;
> @@ -841,7 +841,7 @@ void pblk_write_should_kick(struct pblk *pblk);
> /*
>  * pblk read path
>  */
> -extern struct bio_set *pblk_bio_set;
> +extern struct bio_set pblk_bio_set;
> int pblk_submit_read(struct pblk *pblk, struct bio *bio);
> int pblk_submit_read_gc(struct pblk *pblk, struct pblk_gc_rq *gc_rq);
> /*
> --
> 2.17.0

Looks like two patches in one, but the changes look good to me as part of
the series.

checkpatch complains on a couple of patches in the series. Nothing big,
but you probably want to have a look.

Reviewed-by: Javier González <javier@cnexlabs.com>


[-- Attachment #2: Message signed with OpenPGP --]
[-- Type: application/pgp-signature, Size: 833 bytes --]

  reply	other threads:[~2018-05-22 10:10 UTC|newest]

Thread overview: 73+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2018-05-20 22:25 [PATCH 00/13] convert block layer to bioset_init()/mempool_init() Kent Overstreet
2018-05-20 22:25 ` [PATCH 01/12] block: convert bounce, q->bio_split " Kent Overstreet
2018-05-22 10:08   ` Christoph Hellwig
2018-05-20 22:25 ` [PATCH 02/12] drbd: convert " Kent Overstreet
2018-05-20 22:25 ` [PATCH 03/12] pktcdvd: " Kent Overstreet
2018-05-20 22:25 ` [PATCH 04/12] lightnvm: " Kent Overstreet
2018-05-22 10:10   ` Javier Gonzalez [this message]
2018-05-20 22:25 ` [PATCH 05/12] bcache: " Kent Overstreet
2018-05-21  3:58   ` Coly Li
2018-05-20 22:25 ` [PATCH 06/12] md: " Kent Overstreet
2018-06-01 10:51   ` Arnd Bergmann
2018-05-20 22:25 ` [PATCH 07/12] dm: " Kent Overstreet
     [not found]   ` <20180520222558.7053-8-kent.overstreet-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-05-30 19:27     ` Mike Snitzer
2018-05-30 19:27       ` Mike Snitzer
2018-05-20 22:25 ` [PATCH 08/12] target: " Kent Overstreet
2018-05-22 10:09   ` Christoph Hellwig
2018-05-20 22:25 ` [PATCH 09/12] fs: convert block_dev.c to bioset_init() Kent Overstreet
2018-05-22 10:09   ` Christoph Hellwig
2018-05-20 22:25 ` [PATCH 10/12] btrfs: convert to bioset_init()/mempool_init() Kent Overstreet
2018-05-30 21:30   ` Chris Mason
2018-05-30 21:30     ` Chris Mason
2018-05-20 22:25 ` [PATCH 11/12] xfs: " Kent Overstreet
2018-05-21 18:39   ` Darrick J. Wong
     [not found]   ` <20180520222558.7053-12-kent.overstreet-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-05-22 10:10     ` Christoph Hellwig
2018-05-22 10:10       ` Christoph Hellwig
2018-05-20 22:25 ` [PATCH 12/12] block: Drop bioset_create() Kent Overstreet
2018-05-22 10:10   ` Christoph Hellwig
2018-05-20 23:08 ` [PATCH 00/13] convert block layer to bioset_init()/mempool_init() NeilBrown
2018-05-20 23:08   ` NeilBrown
2018-05-20 23:11   ` Kent Overstreet
     [not found] ` <20180520222558.7053-1-kent.overstreet-Re5JQEeQqe8AvxtiuMwx3w@public.gmane.org>
2018-05-21 14:03   ` Mike Snitzer
2018-05-21 14:03     ` Mike Snitzer
2018-05-21 14:19     ` Jens Axboe
     [not found]       ` <686d7df6-c7d1-48a6-b7ff-48dc8aff6a62-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
2018-05-21 14:31         ` Mike Snitzer
2018-05-21 14:31           ` Mike Snitzer
2018-05-21 14:36           ` Jens Axboe
     [not found]             ` <2bbeeb1a-8b99-b06a-eb9b-eb8523c16460-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
2018-05-21 14:47               ` Mike Snitzer
2018-05-21 14:47                 ` Mike Snitzer
     [not found]                 ` <20180521144703.GA19303-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-05-21 14:52                   ` Jens Axboe
2018-05-21 14:52                     ` Jens Axboe
     [not found]                     ` <4b343aef-e11c-73ba-1d88-7e73ca838cad-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
2018-05-21 15:04                       ` Mike Snitzer
2018-05-21 15:04                         ` Mike Snitzer
     [not found]                         ` <20180521150439.GA19379-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-05-21 15:09                           ` Jens Axboe
2018-05-21 15:09                             ` Jens Axboe
     [not found]                             ` <61e30dcf-a01c-f47d-087a-12930caf9aef-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
2018-05-21 15:18                               ` Mike Snitzer
2018-05-21 15:18                                 ` Mike Snitzer
     [not found]                                 ` <20180521151817.GA19454-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-05-21 15:36                                   ` Jens Axboe
2018-05-21 15:36                                     ` Jens Axboe
     [not found]                                     ` <d01a150a-7752-f6ce-78f2-17a65c1e6fa5-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
2018-05-21 16:09                                       ` Mike Snitzer
2018-05-21 16:09                                         ` Mike Snitzer
     [not found]                                         ` <20180521160907.GA19553-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-05-21 16:20                                           ` Jens Axboe
2018-05-21 16:20                                             ` Jens Axboe
     [not found]                                             ` <f9e3714c-b7c9-d5f6-4018-2a87dd5babb2-tSWWG44O7X1aa/9Udqfwiw@public.gmane.org>
2018-05-30 13:36                                               ` Mike Snitzer
2018-05-30 13:36                                                 ` Mike Snitzer
     [not found]                                                 ` <20180530133629.GC5157-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-05-30 18:55                                                   ` Jens Axboe
2018-05-30 18:55                                                     ` Jens Axboe
2018-05-30 19:34                                                     ` Kent Overstreet
2018-05-30 19:36                                                       ` Jens Axboe
2018-05-30 19:36                                                         ` Jens Axboe
2018-05-30 19:37                                                     ` Mike Snitzer
     [not found]                                                       ` <20180530193707.GB6568-H+wXaHxf7aLQT0dZR+AlfA@public.gmane.org>
2018-05-30 19:38                                                         ` Jens Axboe
2018-05-30 19:38                                                           ` Jens Axboe
2018-05-21 17:37                                         ` Kent Overstreet
2018-05-21 18:24                                           ` Mike Snitzer
2018-05-21 18:24                                             ` Mike Snitzer
2018-05-21 23:38                                             ` Kent Overstreet
2018-05-22  6:41                                               ` Christoph Hellwig
     [not found]                                                 ` <20180522064118.GA18704-wEGCiKHe2LqWVfeAwA7xHQ@public.gmane.org>
2018-05-22 19:09                                                   ` Mike Snitzer
2018-05-22 19:09                                                     ` Mike Snitzer
2018-05-21 15:12       ` David Sterba
2018-05-21 15:18         ` Jens Axboe
2018-05-21 14:20 ` Jens Axboe
2018-05-30 22:24 ` Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=84EF687A-078F-4807-A667-39A8A91D48D0@cnexlabs.com \
    --to=javier@cnexlabs.com \
    --cc=axboe@kernel.dk \
    --cc=bacik@fb.com \
    --cc=clm@fb.com \
    --cc=colyli@suse.de \
    --cc=darrick.wong@oracle.com \
    --cc=drbd-dev@lists.linbit.com \
    --cc=hch@infradead.org \
    --cc=kent.overstreet@gmail.com \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-btrfs@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-raid@vger.kernel.org \
    --cc=linux-xfs@vger.kernel.org \
    --cc=neilb@suse.com \
    --cc=snitzer@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.