From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1765579AbdAKQZJ (ORCPT ); Wed, 11 Jan 2017 11:25:09 -0500 Received: from mail-qk0-f196.google.com ([209.85.220.196]:34825 "EHLO mail-qk0-f196.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753689AbdAKQZH (ORCPT ); Wed, 11 Jan 2017 11:25:07 -0500 MIME-Version: 1.0 In-Reply-To: <20170111160615.ddb713b8fa1e53c76e159e7b@gmail.com> References: <20170111155948.aa61c5b995b6523caf87d862@gmail.com> <20170111160615.ddb713b8fa1e53c76e159e7b@gmail.com> From: Dan Streetman Date: Wed, 11 Jan 2017 11:24:26 -0500 X-Google-Sender-Auth: bzgvXEqlxDzR5smOW5np-DdYOBE Message-ID: Subject: Re: [PATCH/RESEND v2 2/5] z3fold: fix header size related issues To: Vitaly Wool Cc: Linux-MM , linux-kernel , Andrew Morton Content-Type: text/plain; charset=UTF-8 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org On Wed, Jan 11, 2017 at 10:06 AM, Vitaly Wool wrote: > Currently the whole kernel build will be stopped if the size of struct > z3fold_header is greater than the size of one chunk, which is 64 bytes by > default. This patch instead defines the offset for z3fold objects as the > size of the z3fold header in chunks. > > Fixed also are the calculation of num_free_chunks() and the address to > move the middle chunk to in case of in-page compaction in > z3fold_compact_page(). > > Signed-off-by: Vitaly Wool Acked-by: Dan Streetman > --- > mm/z3fold.c | 114 ++++++++++++++++++++++++++++++++++-------------------------- > 1 file changed, 64 insertions(+), 50 deletions(-) > > diff --git a/mm/z3fold.c b/mm/z3fold.c > index 2273789..98ab01f 100644 > --- a/mm/z3fold.c > +++ b/mm/z3fold.c > @@ -34,29 +34,58 @@ > /***************** > * Structures > *****************/ > +struct z3fold_pool; > +struct z3fold_ops { > + int (*evict)(struct z3fold_pool *pool, unsigned long handle); > +}; > + > +enum buddy { > + HEADLESS = 0, > + FIRST, > + MIDDLE, > + LAST, > + BUDDIES_MAX > +}; > + > +/* > + * struct z3fold_header - z3fold page metadata occupying the first chunk of each > + * z3fold page, except for HEADLESS pages > + * @buddy: links the z3fold page into the relevant list in the pool > + * @first_chunks: the size of the first buddy in chunks, 0 if free > + * @middle_chunks: the size of the middle buddy in chunks, 0 if free > + * @last_chunks: the size of the last buddy in chunks, 0 if free > + * @first_num: the starting number (for the first handle) > + */ > +struct z3fold_header { > + struct list_head buddy; > + unsigned short first_chunks; > + unsigned short middle_chunks; > + unsigned short last_chunks; > + unsigned short start_middle; > + unsigned short first_num:2; > +}; > + > /* > * NCHUNKS_ORDER determines the internal allocation granularity, effectively > * adjusting internal fragmentation. It also determines the number of > * freelists maintained in each pool. NCHUNKS_ORDER of 6 means that the > - * allocation granularity will be in chunks of size PAGE_SIZE/64. As one chunk > - * in allocated page is occupied by z3fold header, NCHUNKS will be calculated > - * to 63 which shows the max number of free chunks in z3fold page, also there > - * will be 63 freelists per pool. > + * allocation granularity will be in chunks of size PAGE_SIZE/64. Some chunks > + * in the beginning of an allocated page are occupied by z3fold header, so > + * NCHUNKS will be calculated to 63 (or 62 in case CONFIG_DEBUG_SPINLOCK=y), > + * which shows the max number of free chunks in z3fold page, also there will > + * be 63, or 62, respectively, freelists per pool. > */ > #define NCHUNKS_ORDER 6 > > #define CHUNK_SHIFT (PAGE_SHIFT - NCHUNKS_ORDER) > #define CHUNK_SIZE (1 << CHUNK_SHIFT) > -#define ZHDR_SIZE_ALIGNED CHUNK_SIZE > +#define ZHDR_SIZE_ALIGNED round_up(sizeof(struct z3fold_header), CHUNK_SIZE) > +#define ZHDR_CHUNKS (ZHDR_SIZE_ALIGNED >> CHUNK_SHIFT) > +#define TOTAL_CHUNKS (PAGE_SIZE >> CHUNK_SHIFT) > #define NCHUNKS ((PAGE_SIZE - ZHDR_SIZE_ALIGNED) >> CHUNK_SHIFT) > > #define BUDDY_MASK (0x3) > > -struct z3fold_pool; > -struct z3fold_ops { > - int (*evict)(struct z3fold_pool *pool, unsigned long handle); > -}; > - > /** > * struct z3fold_pool - stores metadata for each z3fold pool > * @lock: protects all pool fields and first|last_chunk fields of any > @@ -86,32 +115,6 @@ struct z3fold_pool { > const struct zpool_ops *zpool_ops; > }; > > -enum buddy { > - HEADLESS = 0, > - FIRST, > - MIDDLE, > - LAST, > - BUDDIES_MAX > -}; > - > -/* > - * struct z3fold_header - z3fold page metadata occupying the first chunk of each > - * z3fold page, except for HEADLESS pages > - * @buddy: links the z3fold page into the relevant list in the pool > - * @first_chunks: the size of the first buddy in chunks, 0 if free > - * @middle_chunks: the size of the middle buddy in chunks, 0 if free > - * @last_chunks: the size of the last buddy in chunks, 0 if free > - * @first_num: the starting number (for the first handle) > - */ > -struct z3fold_header { > - struct list_head buddy; > - unsigned short first_chunks; > - unsigned short middle_chunks; > - unsigned short last_chunks; > - unsigned short start_middle; > - unsigned short first_num:2; > -}; > - > /* > * Internal z3fold page flags > */ > @@ -121,6 +124,7 @@ enum z3fold_page_flags { > MIDDLE_CHUNK_MAPPED, > }; > > + > /***************** > * Helpers > *****************/ > @@ -204,9 +208,10 @@ static int num_free_chunks(struct z3fold_header *zhdr) > */ > if (zhdr->middle_chunks != 0) { > int nfree_before = zhdr->first_chunks ? > - 0 : zhdr->start_middle - 1; > + 0 : zhdr->start_middle - ZHDR_CHUNKS; > int nfree_after = zhdr->last_chunks ? > - 0 : NCHUNKS - zhdr->start_middle - zhdr->middle_chunks; > + 0 : TOTAL_CHUNKS - > + (zhdr->start_middle + zhdr->middle_chunks); > nfree = max(nfree_before, nfree_after); > } else > nfree = NCHUNKS - zhdr->first_chunks - zhdr->last_chunks; > @@ -254,26 +259,35 @@ static void z3fold_destroy_pool(struct z3fold_pool *pool) > kfree(pool); > } > > +static inline void *mchunk_memmove(struct z3fold_header *zhdr, > + unsigned short dst_chunk) > +{ > + void *beg = zhdr; > + return memmove(beg + (dst_chunk << CHUNK_SHIFT), > + beg + (zhdr->start_middle << CHUNK_SHIFT), > + zhdr->middle_chunks << CHUNK_SHIFT); > +} > + > /* Has to be called with lock held */ > static int z3fold_compact_page(struct z3fold_header *zhdr) > { > struct page *page = virt_to_page(zhdr); > - void *beg = zhdr; > > + if (test_bit(MIDDLE_CHUNK_MAPPED, &page->private)) > + return 0; /* can't move middle chunk, it's used */ > > - if (!test_bit(MIDDLE_CHUNK_MAPPED, &page->private) && > - zhdr->middle_chunks != 0 && > - zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { > - memmove(beg + ZHDR_SIZE_ALIGNED, > - beg + (zhdr->start_middle << CHUNK_SHIFT), > - zhdr->middle_chunks << CHUNK_SHIFT); > + if (zhdr->middle_chunks == 0) > + return 0; /* nothing to compact */ > + > + if (zhdr->first_chunks == 0 && zhdr->last_chunks == 0) { > + /* move to the beginning */ > + mchunk_memmove(zhdr, ZHDR_CHUNKS); > zhdr->first_chunks = zhdr->middle_chunks; > zhdr->middle_chunks = 0; > zhdr->start_middle = 0; > zhdr->first_num++; > - return 1; > } > - return 0; > + return 1; > } > > /** > @@ -365,7 +379,7 @@ static int z3fold_alloc(struct z3fold_pool *pool, size_t size, gfp_t gfp, > zhdr->last_chunks = chunks; > else { > zhdr->middle_chunks = chunks; > - zhdr->start_middle = zhdr->first_chunks + 1; > + zhdr->start_middle = zhdr->first_chunks + ZHDR_CHUNKS; > } > > if (zhdr->first_chunks == 0 || zhdr->last_chunks == 0 || > @@ -778,8 +792,8 @@ MODULE_ALIAS("zpool-z3fold"); > > static int __init init_z3fold(void) > { > - /* Make sure the z3fold header will fit in one chunk */ > - BUILD_BUG_ON(sizeof(struct z3fold_header) > ZHDR_SIZE_ALIGNED); > + /* Make sure the z3fold header is not larger than the page size */ > + BUILD_BUG_ON(ZHDR_SIZE_ALIGNED > PAGE_SIZE); > zpool_register_driver(&z3fold_zpool_driver); > > return 0; > -- > 2.4.2