From mboxrd@z Thu Jan 1 00:00:00 1970 From: Christoph Hellwig Subject: Re: [PATCH V11 14/19] block: handle non-cluster bio out of blk_bio_segment_split Date: Wed, 21 Nov 2018 15:33:55 +0100 Message-ID: <20181121143355.GB2594@lst.de> References: <20181121032327.8434-1-ming.lei@redhat.com> <20181121032327.8434-15-ming.lei@redhat.com> Mime-Version: 1.0 Content-Type: text/plain; charset=us-ascii Return-path: Content-Disposition: inline In-Reply-To: <20181121032327.8434-15-ming.lei@redhat.com> Sender: linux-kernel-owner@vger.kernel.org To: Ming Lei Cc: Jens Axboe , linux-block@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Theodore Ts'o , Omar Sandoval , Sagi Grimberg , Dave Chinner , Kent Overstreet , Mike Snitzer , dm-devel@redhat.com, Alexander Viro , linux-fsdevel@vger.kernel.org, Shaohua Li , linux-raid@vger.kernel.org, David Sterba , linux-btrfs@vger.kernel.org, "Darrick J . Wong" , linux-xfs@vger.kernel.org, Gao Xiang , Christoph Hellwig , linux-ext4@vger.kernel.org, Coly Li , linux-bcache@vger.kernel.org, Boaz Harrosh List-Id: linux-raid.ids > + non-cluster.o Do we really need a new source file for these few functions? > default: > + if (!blk_queue_cluster(q)) { > + blk_queue_non_cluster_bio(q, bio); > + return; I'd name this blk_bio_segment_split_singlepage or similar. > +static __init int init_non_cluster_bioset(void) > +{ > + WARN_ON(bioset_init(&non_cluster_bio_set, BIO_POOL_SIZE, 0, > + BIOSET_NEED_BVECS)); > + WARN_ON(bioset_integrity_create(&non_cluster_bio_set, BIO_POOL_SIZE)); > + WARN_ON(bioset_init(&non_cluster_bio_split, BIO_POOL_SIZE, 0, 0)); Please only allocate the resources once a queue without the cluster flag is registered, there are only very few modern drivers that do that. > +static void non_cluster_end_io(struct bio *bio) > +{ > + struct bio *bio_orig = bio->bi_private; > + > + bio_orig->bi_status = bio->bi_status; > + bio_endio(bio_orig); > + bio_put(bio); > +} Why can't we use bio_chain for the split bios? > + bio_for_each_segment(from, *bio_orig, iter) { > + if (i++ < max_segs) > + sectors += from.bv_len >> 9; > + else > + break; > + } The easy to read way would be: bio_for_each_segment(from, *bio_orig, iter) { if (i++ == max_segs) break; sectors += from.bv_len >> 9; } > + if (sectors < bio_sectors(*bio_orig)) { > + bio = bio_split(*bio_orig, sectors, GFP_NOIO, > + &non_cluster_bio_split); > + bio_chain(bio, *bio_orig); > + generic_make_request(*bio_orig); > + *bio_orig = bio; I don't think this is very efficient, as this means we now clone the bio twice, first to split it at the sector boundary, and then again when converting it to single-page bio_vec. I think this could be something like this (totally untested): diff --git a/block/non-cluster.c b/block/non-cluster.c index 9c2910be9404..60389f275c43 100644 --- a/block/non-cluster.c +++ b/block/non-cluster.c @@ -13,58 +13,59 @@ #include "blk.h" -static struct bio_set non_cluster_bio_set, non_cluster_bio_split; +static struct bio_set non_cluster_bio_set; static __init int init_non_cluster_bioset(void) { WARN_ON(bioset_init(&non_cluster_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS)); WARN_ON(bioset_integrity_create(&non_cluster_bio_set, BIO_POOL_SIZE)); - WARN_ON(bioset_init(&non_cluster_bio_split, BIO_POOL_SIZE, 0, 0)); return 0; } __initcall(init_non_cluster_bioset); -static void non_cluster_end_io(struct bio *bio) -{ - struct bio *bio_orig = bio->bi_private; - - bio_orig->bi_status = bio->bi_status; - bio_endio(bio_orig); - bio_put(bio); -} - void blk_queue_non_cluster_bio(struct request_queue *q, struct bio **bio_orig) { - struct bio *bio; struct bvec_iter iter; - struct bio_vec from; - unsigned i = 0; - unsigned sectors = 0; - unsigned short max_segs = min_t(unsigned short, BIO_MAX_PAGES, - queue_max_segments(q)); + struct bio *bio; + struct bio_vec bv; + unsigned short max_segs, segs = 0; + + bio = bio_alloc_bioset(GFP_NOIO, bio_segments(*bio_orig), + &non_cluster_bio_set); + bio->bi_disk = (*bio_orig)->bi_disk; + bio->bi_partno = (*bio_orig)->bi_partno; + bio_set_flag(bio, BIO_CLONED); + if (bio_flagged(*bio_orig, BIO_THROTTLED)) + bio_set_flag(bio, BIO_THROTTLED); + bio->bi_opf = (*bio_orig)->bi_opf; + bio->bi_ioprio = (*bio_orig)->bi_ioprio; + bio->bi_write_hint = (*bio_orig)->bi_write_hint; + bio->bi_iter.bi_sector = (*bio_orig)->bi_iter.bi_sector; + bio->bi_iter.bi_size = (*bio_orig)->bi_iter.bi_size; + + if (bio_integrity(*bio_orig)) + bio_integrity_clone(bio, *bio_orig, GFP_NOIO); - bio_for_each_segment(from, *bio_orig, iter) { - if (i++ < max_segs) - sectors += from.bv_len >> 9; - else + bio_clone_blkcg_association(bio, *bio_orig); + + max_segs = min_t(unsigned short, queue_max_segments(q), BIO_MAX_PAGES); + bio_for_each_segment(bv, *bio_orig, iter) { + bio->bi_io_vec[segs++] = bv; + if (segs++ == max_segs) break; } - if (sectors < bio_sectors(*bio_orig)) { - bio = bio_split(*bio_orig, sectors, GFP_NOIO, - &non_cluster_bio_split); - bio_chain(bio, *bio_orig); - generic_make_request(*bio_orig); - *bio_orig = bio; - } - bio = bio_clone_bioset(*bio_orig, GFP_NOIO, &non_cluster_bio_set); + bio->bi_vcnt = segs; + bio->bi_phys_segments = segs; + bio_set_flag(bio, BIO_SEG_VALID); + bio_chain(bio, *bio_orig); - bio->bi_phys_segments = bio_segments(bio); - bio_set_flag(bio, BIO_SEG_VALID); - bio->bi_end_io = non_cluster_end_io; + if (bio_integrity(bio)) + bio_integrity_trim(bio); + bio_advance(bio, (*bio_orig)->bi_iter.bi_size); - bio->bi_private = *bio_orig; + generic_make_request(*bio_orig); *bio_orig = bio; } From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-5.5 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH,MAILING_LIST_MULTI,SPF_PASS,USER_AGENT_MUTT autolearn=unavailable autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 8C6E3C43441 for ; Wed, 21 Nov 2018 14:34:08 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 5F41621479 for ; Wed, 21 Nov 2018 14:34:08 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 5F41621479 Authentication-Results: mail.kernel.org; dmarc=none (p=none dis=none) header.from=lst.de Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=linux-block-owner@vger.kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1730480AbeKVBIi (ORCPT ); Wed, 21 Nov 2018 20:08:38 -0500 Received: from verein.lst.de ([213.95.11.211]:51717 "EHLO newverein.lst.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726778AbeKVBIh (ORCPT ); Wed, 21 Nov 2018 20:08:37 -0500 Received: by newverein.lst.de (Postfix, from userid 2407) id E98EB68C19; Wed, 21 Nov 2018 15:33:55 +0100 (CET) Date: Wed, 21 Nov 2018 15:33:55 +0100 From: Christoph Hellwig To: Ming Lei Cc: Jens Axboe , linux-block@vger.kernel.org, linux-kernel@vger.kernel.org, linux-mm@kvack.org, Theodore Ts'o , Omar Sandoval , Sagi Grimberg , Dave Chinner , Kent Overstreet , Mike Snitzer , dm-devel@redhat.com, Alexander Viro , linux-fsdevel@vger.kernel.org, Shaohua Li , linux-raid@vger.kernel.org, David Sterba , linux-btrfs@vger.kernel.org, "Darrick J . Wong" , linux-xfs@vger.kernel.org, Gao Xiang , Christoph Hellwig , linux-ext4@vger.kernel.org, Coly Li , linux-bcache@vger.kernel.org, Boaz Harrosh , Bob Peterson , cluster-devel@redhat.com Subject: Re: [PATCH V11 14/19] block: handle non-cluster bio out of blk_bio_segment_split Message-ID: <20181121143355.GB2594@lst.de> References: <20181121032327.8434-1-ming.lei@redhat.com> <20181121032327.8434-15-ming.lei@redhat.com> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <20181121032327.8434-15-ming.lei@redhat.com> User-Agent: Mutt/1.5.17 (2007-11-01) Sender: linux-block-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-block@vger.kernel.org > + non-cluster.o Do we really need a new source file for these few functions? > default: > + if (!blk_queue_cluster(q)) { > + blk_queue_non_cluster_bio(q, bio); > + return; I'd name this blk_bio_segment_split_singlepage or similar. > +static __init int init_non_cluster_bioset(void) > +{ > + WARN_ON(bioset_init(&non_cluster_bio_set, BIO_POOL_SIZE, 0, > + BIOSET_NEED_BVECS)); > + WARN_ON(bioset_integrity_create(&non_cluster_bio_set, BIO_POOL_SIZE)); > + WARN_ON(bioset_init(&non_cluster_bio_split, BIO_POOL_SIZE, 0, 0)); Please only allocate the resources once a queue without the cluster flag is registered, there are only very few modern drivers that do that. > +static void non_cluster_end_io(struct bio *bio) > +{ > + struct bio *bio_orig = bio->bi_private; > + > + bio_orig->bi_status = bio->bi_status; > + bio_endio(bio_orig); > + bio_put(bio); > +} Why can't we use bio_chain for the split bios? > + bio_for_each_segment(from, *bio_orig, iter) { > + if (i++ < max_segs) > + sectors += from.bv_len >> 9; > + else > + break; > + } The easy to read way would be: bio_for_each_segment(from, *bio_orig, iter) { if (i++ == max_segs) break; sectors += from.bv_len >> 9; } > + if (sectors < bio_sectors(*bio_orig)) { > + bio = bio_split(*bio_orig, sectors, GFP_NOIO, > + &non_cluster_bio_split); > + bio_chain(bio, *bio_orig); > + generic_make_request(*bio_orig); > + *bio_orig = bio; I don't think this is very efficient, as this means we now clone the bio twice, first to split it at the sector boundary, and then again when converting it to single-page bio_vec. I think this could be something like this (totally untested): diff --git a/block/non-cluster.c b/block/non-cluster.c index 9c2910be9404..60389f275c43 100644 --- a/block/non-cluster.c +++ b/block/non-cluster.c @@ -13,58 +13,59 @@ #include "blk.h" -static struct bio_set non_cluster_bio_set, non_cluster_bio_split; +static struct bio_set non_cluster_bio_set; static __init int init_non_cluster_bioset(void) { WARN_ON(bioset_init(&non_cluster_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS)); WARN_ON(bioset_integrity_create(&non_cluster_bio_set, BIO_POOL_SIZE)); - WARN_ON(bioset_init(&non_cluster_bio_split, BIO_POOL_SIZE, 0, 0)); return 0; } __initcall(init_non_cluster_bioset); -static void non_cluster_end_io(struct bio *bio) -{ - struct bio *bio_orig = bio->bi_private; - - bio_orig->bi_status = bio->bi_status; - bio_endio(bio_orig); - bio_put(bio); -} - void blk_queue_non_cluster_bio(struct request_queue *q, struct bio **bio_orig) { - struct bio *bio; struct bvec_iter iter; - struct bio_vec from; - unsigned i = 0; - unsigned sectors = 0; - unsigned short max_segs = min_t(unsigned short, BIO_MAX_PAGES, - queue_max_segments(q)); + struct bio *bio; + struct bio_vec bv; + unsigned short max_segs, segs = 0; + + bio = bio_alloc_bioset(GFP_NOIO, bio_segments(*bio_orig), + &non_cluster_bio_set); + bio->bi_disk = (*bio_orig)->bi_disk; + bio->bi_partno = (*bio_orig)->bi_partno; + bio_set_flag(bio, BIO_CLONED); + if (bio_flagged(*bio_orig, BIO_THROTTLED)) + bio_set_flag(bio, BIO_THROTTLED); + bio->bi_opf = (*bio_orig)->bi_opf; + bio->bi_ioprio = (*bio_orig)->bi_ioprio; + bio->bi_write_hint = (*bio_orig)->bi_write_hint; + bio->bi_iter.bi_sector = (*bio_orig)->bi_iter.bi_sector; + bio->bi_iter.bi_size = (*bio_orig)->bi_iter.bi_size; + + if (bio_integrity(*bio_orig)) + bio_integrity_clone(bio, *bio_orig, GFP_NOIO); - bio_for_each_segment(from, *bio_orig, iter) { - if (i++ < max_segs) - sectors += from.bv_len >> 9; - else + bio_clone_blkcg_association(bio, *bio_orig); + + max_segs = min_t(unsigned short, queue_max_segments(q), BIO_MAX_PAGES); + bio_for_each_segment(bv, *bio_orig, iter) { + bio->bi_io_vec[segs++] = bv; + if (segs++ == max_segs) break; } - if (sectors < bio_sectors(*bio_orig)) { - bio = bio_split(*bio_orig, sectors, GFP_NOIO, - &non_cluster_bio_split); - bio_chain(bio, *bio_orig); - generic_make_request(*bio_orig); - *bio_orig = bio; - } - bio = bio_clone_bioset(*bio_orig, GFP_NOIO, &non_cluster_bio_set); + bio->bi_vcnt = segs; + bio->bi_phys_segments = segs; + bio_set_flag(bio, BIO_SEG_VALID); + bio_chain(bio, *bio_orig); - bio->bi_phys_segments = bio_segments(bio); - bio_set_flag(bio, BIO_SEG_VALID); - bio->bi_end_io = non_cluster_end_io; + if (bio_integrity(bio)) + bio_integrity_trim(bio); + bio_advance(bio, (*bio_orig)->bi_iter.bi_size); - bio->bi_private = *bio_orig; + generic_make_request(*bio_orig); *bio_orig = bio; } From mboxrd@z Thu Jan 1 00:00:00 1970 From: Christoph Hellwig Date: Wed, 21 Nov 2018 15:33:55 +0100 Subject: [Cluster-devel] [PATCH V11 14/19] block: handle non-cluster bio out of blk_bio_segment_split In-Reply-To: <20181121032327.8434-15-ming.lei@redhat.com> References: <20181121032327.8434-1-ming.lei@redhat.com> <20181121032327.8434-15-ming.lei@redhat.com> Message-ID: <20181121143355.GB2594@lst.de> List-Id: To: cluster-devel.redhat.com MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit > + non-cluster.o Do we really need a new source file for these few functions? > default: > + if (!blk_queue_cluster(q)) { > + blk_queue_non_cluster_bio(q, bio); > + return; I'd name this blk_bio_segment_split_singlepage or similar. > +static __init int init_non_cluster_bioset(void) > +{ > + WARN_ON(bioset_init(&non_cluster_bio_set, BIO_POOL_SIZE, 0, > + BIOSET_NEED_BVECS)); > + WARN_ON(bioset_integrity_create(&non_cluster_bio_set, BIO_POOL_SIZE)); > + WARN_ON(bioset_init(&non_cluster_bio_split, BIO_POOL_SIZE, 0, 0)); Please only allocate the resources once a queue without the cluster flag is registered, there are only very few modern drivers that do that. > +static void non_cluster_end_io(struct bio *bio) > +{ > + struct bio *bio_orig = bio->bi_private; > + > + bio_orig->bi_status = bio->bi_status; > + bio_endio(bio_orig); > + bio_put(bio); > +} Why can't we use bio_chain for the split bios? > + bio_for_each_segment(from, *bio_orig, iter) { > + if (i++ < max_segs) > + sectors += from.bv_len >> 9; > + else > + break; > + } The easy to read way would be: bio_for_each_segment(from, *bio_orig, iter) { if (i++ == max_segs) break; sectors += from.bv_len >> 9; } > + if (sectors < bio_sectors(*bio_orig)) { > + bio = bio_split(*bio_orig, sectors, GFP_NOIO, > + &non_cluster_bio_split); > + bio_chain(bio, *bio_orig); > + generic_make_request(*bio_orig); > + *bio_orig = bio; I don't think this is very efficient, as this means we now clone the bio twice, first to split it at the sector boundary, and then again when converting it to single-page bio_vec. I think this could be something like this (totally untested): diff --git a/block/non-cluster.c b/block/non-cluster.c index 9c2910be9404..60389f275c43 100644 --- a/block/non-cluster.c +++ b/block/non-cluster.c @@ -13,58 +13,59 @@ #include "blk.h" -static struct bio_set non_cluster_bio_set, non_cluster_bio_split; +static struct bio_set non_cluster_bio_set; static __init int init_non_cluster_bioset(void) { WARN_ON(bioset_init(&non_cluster_bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS)); WARN_ON(bioset_integrity_create(&non_cluster_bio_set, BIO_POOL_SIZE)); - WARN_ON(bioset_init(&non_cluster_bio_split, BIO_POOL_SIZE, 0, 0)); return 0; } __initcall(init_non_cluster_bioset); -static void non_cluster_end_io(struct bio *bio) -{ - struct bio *bio_orig = bio->bi_private; - - bio_orig->bi_status = bio->bi_status; - bio_endio(bio_orig); - bio_put(bio); -} - void blk_queue_non_cluster_bio(struct request_queue *q, struct bio **bio_orig) { - struct bio *bio; struct bvec_iter iter; - struct bio_vec from; - unsigned i = 0; - unsigned sectors = 0; - unsigned short max_segs = min_t(unsigned short, BIO_MAX_PAGES, - queue_max_segments(q)); + struct bio *bio; + struct bio_vec bv; + unsigned short max_segs, segs = 0; + + bio = bio_alloc_bioset(GFP_NOIO, bio_segments(*bio_orig), + &non_cluster_bio_set); + bio->bi_disk = (*bio_orig)->bi_disk; + bio->bi_partno = (*bio_orig)->bi_partno; + bio_set_flag(bio, BIO_CLONED); + if (bio_flagged(*bio_orig, BIO_THROTTLED)) + bio_set_flag(bio, BIO_THROTTLED); + bio->bi_opf = (*bio_orig)->bi_opf; + bio->bi_ioprio = (*bio_orig)->bi_ioprio; + bio->bi_write_hint = (*bio_orig)->bi_write_hint; + bio->bi_iter.bi_sector = (*bio_orig)->bi_iter.bi_sector; + bio->bi_iter.bi_size = (*bio_orig)->bi_iter.bi_size; + + if (bio_integrity(*bio_orig)) + bio_integrity_clone(bio, *bio_orig, GFP_NOIO); - bio_for_each_segment(from, *bio_orig, iter) { - if (i++ < max_segs) - sectors += from.bv_len >> 9; - else + bio_clone_blkcg_association(bio, *bio_orig); + + max_segs = min_t(unsigned short, queue_max_segments(q), BIO_MAX_PAGES); + bio_for_each_segment(bv, *bio_orig, iter) { + bio->bi_io_vec[segs++] = bv; + if (segs++ == max_segs) break; } - if (sectors < bio_sectors(*bio_orig)) { - bio = bio_split(*bio_orig, sectors, GFP_NOIO, - &non_cluster_bio_split); - bio_chain(bio, *bio_orig); - generic_make_request(*bio_orig); - *bio_orig = bio; - } - bio = bio_clone_bioset(*bio_orig, GFP_NOIO, &non_cluster_bio_set); + bio->bi_vcnt = segs; + bio->bi_phys_segments = segs; + bio_set_flag(bio, BIO_SEG_VALID); + bio_chain(bio, *bio_orig); - bio->bi_phys_segments = bio_segments(bio); - bio_set_flag(bio, BIO_SEG_VALID); - bio->bi_end_io = non_cluster_end_io; + if (bio_integrity(bio)) + bio_integrity_trim(bio); + bio_advance(bio, (*bio_orig)->bi_iter.bi_size); - bio->bi_private = *bio_orig; + generic_make_request(*bio_orig); *bio_orig = bio; }