nvdimm.lists.linux.dev archive mirror
 help / color / mirror / Atom feed
From: Hannes Reinecke <hare@suse.de>
To: Christoph Hellwig <hch@lst.de>, Jens Axboe <axboe@kernel.dk>,
	Geert Uytterhoeven <geert@linux-m68k.org>,
	Chris Zankel <chris@zankel.net>,
	Max Filippov <jcmvbkbc@gmail.com>,
	Philipp Reisner <philipp.reisner@linbit.com>,
	Lars Ellenberg <lars.ellenberg@linbit.com>,
	Jim Paris <jim@jtan.com>, Philip Kelleher <pjk1939@linux.ibm.com>,
	Minchan Kim <minchan@kernel.org>, Nitin Gupta <ngupta@vflare.org>,
	Matias Bjorling <mb@lightnvm.io>, Coly Li <colyli@suse.de>,
	Mike Snitzer <snitzer@redhat.com>, Song Liu <song@kernel.org>,
	Maxim Levitsky <maximlevitsky@gmail.com>,
	Alex Dubov <oakad@yahoo.com>,
	Ulf Hansson <ulf.hansson@linaro.org>,
	Dan Williams <dan.j.williams@intel.com>,
	Vishal Verma <vishal.l.verma@intel.com>,
	Dave Jiang <dave.jiang@intel.com>,
	Heiko Carstens <hca@linux.ibm.com>,
	Vasily Gorbik <gor@linux.ibm.com>,
	Christian Borntraeger <borntraeger@de.ibm.com>
Cc: linux-block@vger.kernel.org, dm-devel@redhat.com,
	linux-m68k@lists.linux-m68k.org, linux-xtensa@linux-xtensa.org,
	drbd-dev@lists.linbit.com, linuxppc-dev@lists.ozlabs.org,
	linux-bcache@vger.kernel.org, linux-raid@vger.kernel.org,
	linux-mmc@vger.kernel.org, nvdimm@lists.linux.dev,
	linux-nvme@lists.infradead.org, linux-s390@vger.kernel.org
Subject: Re: [PATCH 18/26] nvme-multipath: convert to blk_alloc_disk/blk_cleanup_disk
Date: Sun, 23 May 2021 10:20:27 +0200	[thread overview]
Message-ID: <1a771bf9-5083-c440-f0e1-5f6920b5b017@suse.de> (raw)
In-Reply-To: <20210521055116.1053587-19-hch@lst.de>

On 5/21/21 7:51 AM, Christoph Hellwig wrote:
> Convert the nvme-multipath driver to use the blk_alloc_disk and
> blk_cleanup_disk helpers to simplify gendisk and request_queue
> allocation.
> 
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
>   drivers/nvdimm/pmem.c         |  1 -
>   drivers/nvme/host/multipath.c | 45 ++++++++++-------------------------
>   2 files changed, 13 insertions(+), 33 deletions(-)
> 
> diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
> index 9fcd05084564..31f3c4bd6f72 100644
> --- a/drivers/nvdimm/pmem.c
> +++ b/drivers/nvdimm/pmem.c
> @@ -472,7 +472,6 @@ static int pmem_attach_disk(struct device *dev,
>   		blk_queue_flag_set(QUEUE_FLAG_DAX, q);
>   
>   	disk->fops		= &pmem_fops;
> -	disk->queue		= q;
>   	disk->private_data	= pmem;
>   	nvdimm_namespace_disk_name(ndns, disk->disk_name);
>   	set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
> diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
> index a5d02f236cca..b5fbdb416022 100644
> --- a/drivers/nvme/host/multipath.c
> +++ b/drivers/nvme/host/multipath.c
> @@ -427,7 +427,6 @@ static void nvme_requeue_work(struct work_struct *work)
>   
>   int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
>   {
> -	struct request_queue *q;
>   	bool vwc = false;
>   
>   	mutex_init(&head->lock);
> @@ -443,33 +442,24 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head)
>   	if (!(ctrl->subsys->cmic & NVME_CTRL_CMIC_MULTI_CTRL) || !multipath)
>   		return 0;
>   
> -	q = blk_alloc_queue(ctrl->numa_node);
> -	if (!q)
> -		goto out;
> -	blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
> -	/* set to a default value for 512 until disk is validated */
> -	blk_queue_logical_block_size(q, 512);
> -	blk_set_stacking_limits(&q->limits);
> -
> -	/* we need to propagate up the VMC settings */
> -	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
> -		vwc = true;
> -	blk_queue_write_cache(q, vwc, vwc);
> -
> -	head->disk = alloc_disk(0);
> +	head->disk = blk_alloc_disk(ctrl->numa_node);
>   	if (!head->disk)
> -		goto out_cleanup_queue;
> +		return -ENOMEM;
>   	head->disk->fops = &nvme_ns_head_ops;
>   	head->disk->private_data = head;
> -	head->disk->queue = q;
>   	sprintf(head->disk->disk_name, "nvme%dn%d",
>   			ctrl->subsys->instance, head->instance);
> -	return 0;
>   
> -out_cleanup_queue:
> -	blk_cleanup_queue(q);
> -out:
> -	return -ENOMEM;
> +	blk_queue_flag_set(QUEUE_FLAG_NONROT, head->disk->queue);
> +	/* set to a default value of 512 until the disk is validated */
> +	blk_queue_logical_block_size(head->disk->queue, 512);
> +	blk_set_stacking_limits(&head->disk->queue->limits);
> +
> +	/* we need to propagate up the VMC settings */
> +	if (ctrl->vwc & NVME_CTRL_VWC_PRESENT)
> +		vwc = true;
> +	blk_queue_write_cache(head->disk->queue, vwc, vwc);
> +	return 0;
>   }
>   
>   static void nvme_mpath_set_live(struct nvme_ns *ns)
> @@ -768,16 +758,7 @@ void nvme_mpath_remove_disk(struct nvme_ns_head *head)
>   	/* make sure all pending bios are cleaned up */
>   	kblockd_schedule_work(&head->requeue_work);
>   	flush_work(&head->requeue_work);
> -	blk_cleanup_queue(head->disk->queue);
> -	if (!test_bit(NVME_NSHEAD_DISK_LIVE, &head->flags)) {
> -		/*
> -		 * if device_add_disk wasn't called, prevent
> -		 * disk release to put a bogus reference on the
> -		 * request queue
> -		 */
> -		head->disk->queue = NULL;
> -	}
> -	put_disk(head->disk);
> +	blk_cleanup_disk(head->disk);
>   }
>   
>   void nvme_mpath_init_ctrl(struct nvme_ctrl *ctrl)
> 
What about the check for GENHD_FL_UP a bit further up in line 766?
Can this still happen with the new allocation scheme, ie is there still 
a difference in lifetime between ->disk and ->disk->queue?

Cheers,

Hannes
-- 
Dr. Hannes Reinecke                Kernel Storage Architect
hare@suse.de                              +49 911 74053 688
SUSE Software Solutions GmbH, Maxfeldstr. 5, 90409 Nürnberg
HRB 36809 (AG Nürnberg), Geschäftsführer: Felix Imendörffer

  reply	other threads:[~2021-05-23  8:20 UTC|newest]

Thread overview: 75+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-05-21  5:50 simplify gendisk and request_queue allocation for bio based drivers Christoph Hellwig
2021-05-21  5:50 ` [PATCH 01/26] block: refactor device number setup in __device_add_disk Christoph Hellwig
2021-05-21 17:16   ` [dm-devel] " Luis Chamberlain
2021-05-24  7:20     ` Christoph Hellwig
2021-05-23  7:46   ` Hannes Reinecke
2021-05-24  7:22     ` Christoph Hellwig
2021-05-21  5:50 ` [PATCH 02/26] block: move the DISK_MAX_PARTS sanity check into __device_add_disk Christoph Hellwig
2021-05-21 17:18   ` [dm-devel] " Luis Chamberlain
2021-05-23  7:48   ` Hannes Reinecke
2021-05-21  5:50 ` [PATCH 03/26] block: automatically enable GENHD_FL_EXT_DEVT Christoph Hellwig
2021-05-21 17:22   ` [dm-devel] " Luis Chamberlain
2021-05-23  7:50   ` Hannes Reinecke
2021-05-21  5:50 ` [PATCH 04/26] block: add a flag to make put_disk on partially initalized disks safer Christoph Hellwig
2021-05-21 17:28   ` [dm-devel] " Luis Chamberlain
2021-05-23  7:54   ` Hannes Reinecke
2021-05-21  5:50 ` [PATCH 05/26] block: add blk_alloc_disk and blk_cleanup_disk APIs Christoph Hellwig
2021-05-21 17:44   ` [dm-devel] " Luis Chamberlain
2021-05-24  7:24     ` Christoph Hellwig
2021-05-23  7:55   ` Hannes Reinecke
2021-05-21  5:50 ` [PATCH 06/26] brd: convert to blk_alloc_disk/blk_cleanup_disk Christoph Hellwig
2021-05-23  7:58   ` Hannes Reinecke
2021-05-24  7:24     ` Christoph Hellwig
2021-05-21  5:50 ` [PATCH 07/26] drbd: " Christoph Hellwig
2021-05-23  7:59   ` Hannes Reinecke
2021-05-21  5:50 ` [PATCH 08/26] pktcdvd: " Christoph Hellwig
2021-05-23  8:00   ` Hannes Reinecke
2021-05-21  5:50 ` [PATCH 09/26] rsxx: " Christoph Hellwig
2021-05-23  8:01   ` Hannes Reinecke
2021-05-21  5:51 ` [PATCH 10/26] zram: " Christoph Hellwig
2021-05-23  8:01   ` Hannes Reinecke
2021-05-21  5:51 ` [PATCH 11/26] lightnvm: " Christoph Hellwig
2021-05-23  8:02   ` Hannes Reinecke
2021-05-21  5:51 ` [PATCH 12/26] bcache: " Christoph Hellwig
2021-05-21  6:15   ` Coly Li
2021-05-21  6:23     ` Christoph Hellwig
2021-05-23  8:04   ` Hannes Reinecke
2021-05-23 16:20   ` Coly Li
2021-05-21  5:51 ` [PATCH 13/26] dm: " Christoph Hellwig
2021-05-23  8:10   ` Hannes Reinecke
2021-05-24  7:25     ` Christoph Hellwig
2021-05-21  5:51 ` [PATCH 14/26] md: " Christoph Hellwig
2021-05-23  8:12   ` Hannes Reinecke
2021-05-24  7:26     ` Christoph Hellwig
2021-05-24  8:27       ` Hannes Reinecke
2021-05-21  5:51 ` [PATCH 15/26] nvdimm-blk: " Christoph Hellwig
2021-05-23  8:13   ` Hannes Reinecke
2021-05-21  5:51 ` [PATCH 16/26] nvdimm-btt: " Christoph Hellwig
2021-05-23  8:14   ` Hannes Reinecke
2021-05-21  5:51 ` [PATCH 17/26] nvdimm-pmem: " Christoph Hellwig
2021-05-23  8:14   ` Hannes Reinecke
2021-06-07  4:43   ` Dan Williams
2021-05-21  5:51 ` [PATCH 18/26] nvme-multipath: " Christoph Hellwig
2021-05-23  8:20   ` Hannes Reinecke [this message]
2021-05-24  7:29     ` Christoph Hellwig
2021-05-21  5:51 ` [PATCH 19/26] nfblock: " Christoph Hellwig
2021-05-21  8:37   ` Geert Uytterhoeven
2021-05-23  8:21   ` Hannes Reinecke
2021-05-21  5:51 ` [PATCH 20/26] simdisk: " Christoph Hellwig
2021-05-23  8:22   ` Hannes Reinecke
2021-05-21  5:51 ` [PATCH 21/26] n64cart: convert to blk_alloc_disk Christoph Hellwig
2021-05-23  8:22   ` Hannes Reinecke
2021-05-21  5:51 ` [PATCH 22/26] ps3vram: convert to blk_alloc_disk/blk_cleanup_disk Christoph Hellwig
2021-05-23  8:23   ` Hannes Reinecke
2021-05-21  5:51 ` [PATCH 23/26] dcssblk: " Christoph Hellwig
2021-05-23  8:23   ` Hannes Reinecke
2021-05-21  5:51 ` [PATCH 24/26] xpram: " Christoph Hellwig
2021-05-23  8:24   ` Hannes Reinecke
2021-05-21  5:51 ` [PATCH 25/26] null_blk: " Christoph Hellwig
2021-05-23  8:25   ` Hannes Reinecke
2021-05-21  5:51 ` [PATCH 26/26] block: unexport blk_alloc_queue Christoph Hellwig
2021-05-23  8:26   ` Hannes Reinecke
2021-05-25 22:41 ` simplify gendisk and request_queue allocation for bio based drivers Ulf Hansson
2021-05-26  4:49   ` Christoph Hellwig
2021-05-26  8:07     ` Ulf Hansson
2021-06-01 13:48 ` Jens Axboe

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1a771bf9-5083-c440-f0e1-5f6920b5b017@suse.de \
    --to=hare@suse.de \
    --cc=axboe@kernel.dk \
    --cc=borntraeger@de.ibm.com \
    --cc=chris@zankel.net \
    --cc=colyli@suse.de \
    --cc=dan.j.williams@intel.com \
    --cc=dave.jiang@intel.com \
    --cc=dm-devel@redhat.com \
    --cc=drbd-dev@lists.linbit.com \
    --cc=geert@linux-m68k.org \
    --cc=gor@linux.ibm.com \
    --cc=hca@linux.ibm.com \
    --cc=hch@lst.de \
    --cc=jcmvbkbc@gmail.com \
    --cc=jim@jtan.com \
    --cc=lars.ellenberg@linbit.com \
    --cc=linux-bcache@vger.kernel.org \
    --cc=linux-block@vger.kernel.org \
    --cc=linux-m68k@lists.linux-m68k.org \
    --cc=linux-mmc@vger.kernel.org \
    --cc=linux-nvme@lists.infradead.org \
    --cc=linux-raid@vger.kernel.org \
    --cc=linux-s390@vger.kernel.org \
    --cc=linux-xtensa@linux-xtensa.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=maximlevitsky@gmail.com \
    --cc=mb@lightnvm.io \
    --cc=minchan@kernel.org \
    --cc=ngupta@vflare.org \
    --cc=nvdimm@lists.linux.dev \
    --cc=oakad@yahoo.com \
    --cc=philipp.reisner@linbit.com \
    --cc=pjk1939@linux.ibm.com \
    --cc=snitzer@redhat.com \
    --cc=song@kernel.org \
    --cc=ulf.hansson@linaro.org \
    --cc=vishal.l.verma@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).