All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
To: Christophe Leroy <christophe.leroy@c-s.fr>,
	Benjamin Herrenschmidt <benh@kernel.crashing.org>,
	Paul Mackerras <paulus@samba.org>,
	Michael Ellerman <mpe@ellerman.id.au>
Cc: linuxppc-dev@lists.ozlabs.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH v2 03/11] powerpc/mm: hand a context_t over to slice_mask_for_size() instead of mm_struct
Date: Fri, 26 Apr 2019 12:04:40 +0530	[thread overview]
Message-ID: <8736m5b7gf.fsf@linux.ibm.com> (raw)
In-Reply-To: <633e11daecb440a8233890589a4025ed5003f222.1556202029.git.christophe.leroy@c-s.fr>

Christophe Leroy <christophe.leroy@c-s.fr> writes:

> slice_mask_for_size() only uses mm->context, so hand directly a
> pointer to the context. This will help moving the function in
> subarch mmu.h in the next patch by avoiding having to include
> the definition of struct mm_struct
>

Reviewed-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>

> Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr>
> ---
>  arch/powerpc/mm/slice.c | 34 +++++++++++++++++-----------------
>  1 file changed, 17 insertions(+), 17 deletions(-)
>
> diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
> index 35b278082391..8eb7e8b09c75 100644
> --- a/arch/powerpc/mm/slice.c
> +++ b/arch/powerpc/mm/slice.c
> @@ -151,32 +151,32 @@ static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
>  }
>  
>  #ifdef CONFIG_PPC_BOOK3S_64
> -static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
> +static struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize)
>  {
>  #ifdef CONFIG_PPC_64K_PAGES
>  	if (psize == MMU_PAGE_64K)
> -		return mm_ctx_slice_mask_64k(&mm->context);
> +		return mm_ctx_slice_mask_64k(&ctx);
>  #endif
>  	if (psize == MMU_PAGE_4K)
> -		return mm_ctx_slice_mask_4k(&mm->context);
> +		return mm_ctx_slice_mask_4k(&ctx);
>  #ifdef CONFIG_HUGETLB_PAGE
>  	if (psize == MMU_PAGE_16M)
> -		return mm_ctx_slice_mask_16m(&mm->context);
> +		return mm_ctx_slice_mask_16m(&ctx);
>  	if (psize == MMU_PAGE_16G)
> -		return mm_ctx_slice_mask_16g(&mm->context);
> +		return mm_ctx_slice_mask_16g(&ctx);
>  #endif
>  	BUG();
>  }
>  #elif defined(CONFIG_PPC_8xx)
> -static struct slice_mask *slice_mask_for_size(struct mm_struct *mm, int psize)
> +static struct slice_mask *slice_mask_for_size(mm_context_t *ctx, int psize)
>  {
>  	if (psize == mmu_virtual_psize)
> -		return &mm->context.mask_base_psize;
> +		return &ctx->mask_base_psize;
>  #ifdef CONFIG_HUGETLB_PAGE
>  	if (psize == MMU_PAGE_512K)
> -		return &mm->context.mask_512k;
> +		return &ctx->mask_512k;
>  	if (psize == MMU_PAGE_8M)
> -		return &mm->context.mask_8m;
> +		return &ctx->mask_8m;
>  #endif
>  	BUG();
>  }
> @@ -246,7 +246,7 @@ static void slice_convert(struct mm_struct *mm,
>  	slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
>  	slice_print_mask(" mask", mask);
>  
> -	psize_mask = slice_mask_for_size(mm, psize);
> +	psize_mask = slice_mask_for_size(&mm->context, psize);
>  
>  	/* We need to use a spinlock here to protect against
>  	 * concurrent 64k -> 4k demotion ...
> @@ -263,7 +263,7 @@ static void slice_convert(struct mm_struct *mm,
>  
>  		/* Update the slice_mask */
>  		old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
> -		old_mask = slice_mask_for_size(mm, old_psize);
> +		old_mask = slice_mask_for_size(&mm->context, old_psize);
>  		old_mask->low_slices &= ~(1u << i);
>  		psize_mask->low_slices |= 1u << i;
>  
> @@ -282,7 +282,7 @@ static void slice_convert(struct mm_struct *mm,
>  
>  		/* Update the slice_mask */
>  		old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
> -		old_mask = slice_mask_for_size(mm, old_psize);
> +		old_mask = slice_mask_for_size(&mm->context, old_psize);
>  		__clear_bit(i, old_mask->high_slices);
>  		__set_bit(i, psize_mask->high_slices);
>  
> @@ -538,7 +538,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
>  	/* First make up a "good" mask of slices that have the right size
>  	 * already
>  	 */
> -	maskp = slice_mask_for_size(mm, psize);
> +	maskp = slice_mask_for_size(&mm->context, psize);
>  
>  	/*
>  	 * Here "good" means slices that are already the right page size,
> @@ -565,7 +565,7 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
>  	 * a pointer to good mask for the next code to use.
>  	 */
>  	if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
> -		compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
> +		compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
>  		if (fixed)
>  			slice_or_mask(&good_mask, maskp, compat_maskp);
>  		else
> @@ -760,7 +760,7 @@ void slice_init_new_context_exec(struct mm_struct *mm)
>  	/*
>  	 * Slice mask cache starts zeroed, fill the default size cache.
>  	 */
> -	mask = slice_mask_for_size(mm, psize);
> +	mask = slice_mask_for_size(&mm->context, psize);
>  	mask->low_slices = ~0UL;
>  	if (SLICE_NUM_HIGH)
>  		bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
> @@ -819,14 +819,14 @@ int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
>  
>  	VM_BUG_ON(radix_enabled());
>  
> -	maskp = slice_mask_for_size(mm, psize);
> +	maskp = slice_mask_for_size(&mm->context, psize);
>  #ifdef CONFIG_PPC_64K_PAGES
>  	/* We need to account for 4k slices too */
>  	if (psize == MMU_PAGE_64K) {
>  		const struct slice_mask *compat_maskp;
>  		struct slice_mask available;
>  
> -		compat_maskp = slice_mask_for_size(mm, MMU_PAGE_4K);
> +		compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
>  		slice_or_mask(&available, maskp, compat_maskp);
>  		return !slice_check_range_fits(mm, &available, addr, len);
>  	}
> -- 
> 2.13.3


  reply	other threads:[~2019-04-26  6:45 UTC|newest]

Thread overview: 35+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-04-25 14:29 [PATCH v2 00/11] Reduce ifdef mess in slice.c Christophe Leroy
2019-04-25 14:29 ` Christophe Leroy
2019-04-25 14:29 ` [PATCH v2 01/11] powerpc/mm: fix erroneous duplicate slb_addr_limit init Christophe Leroy
2019-04-25 14:29   ` Christophe Leroy
2019-04-26  6:32   ` Aneesh Kumar K.V
2019-05-03  6:59   ` Michael Ellerman
2019-04-25 14:29 ` [PATCH v2 02/11] powerpc/mm: no slice for nohash/64 Christophe Leroy
2019-04-25 14:29   ` Christophe Leroy
2019-04-26  6:33   ` Aneesh Kumar K.V
2019-04-25 14:29 ` [PATCH v2 03/11] powerpc/mm: hand a context_t over to slice_mask_for_size() instead of mm_struct Christophe Leroy
2019-04-25 14:29   ` Christophe Leroy
2019-04-26  6:34   ` Aneesh Kumar K.V [this message]
2019-04-25 14:29 ` [PATCH v2 04/11] powerpc/mm: move slice_mask_for_size() into mmu.h Christophe Leroy
2019-04-25 14:29   ` Christophe Leroy
2019-04-26  6:36   ` Aneesh Kumar K.V
2019-04-25 14:29 ` [PATCH v2 05/11] powerpc/mm: get rid of mm_ctx_slice_mask_xxx() Christophe Leroy
2019-04-25 14:29   ` Christophe Leroy
2019-04-26  6:37   ` Aneesh Kumar K.V
2019-04-25 14:29 ` [PATCH v2 06/11] powerpc/mm: remove unnecessary #ifdef CONFIG_PPC64 Christophe Leroy
2019-04-25 14:29   ` Christophe Leroy
2019-04-25 14:29 ` [PATCH v2 07/11] powerpc/mm: remove a couple of #ifdef CONFIG_PPC_64K_PAGES in mm/slice.c Christophe Leroy
2019-04-25 14:29   ` Christophe Leroy
2019-04-26  6:40   ` Aneesh Kumar K.V
2019-04-25 14:29 ` [PATCH v2 08/11] powerpc/8xx: get rid of #ifdef CONFIG_HUGETLB_PAGE for slices Christophe Leroy
2019-04-25 14:29   ` Christophe Leroy
2019-04-25 14:29 ` [PATCH v2 09/11] powerpc/mm: define get_slice_psize() all the time Christophe Leroy
2019-04-25 14:29   ` Christophe Leroy
2019-04-26  6:42   ` Aneesh Kumar K.V
2019-04-25 14:29 ` [PATCH v2 10/11] powerpc/mm: define subarch SLB_ADDR_LIMIT_DEFAULT Christophe Leroy
2019-04-25 14:29   ` Christophe Leroy
2019-04-26  6:43   ` Aneesh Kumar K.V
2019-04-25 14:29 ` [PATCH v2 11/11] powerpc/mm: drop slice DEBUG Christophe Leroy
2019-04-25 14:29   ` Christophe Leroy
2019-04-26  6:44   ` Aneesh Kumar K.V
2019-04-26  6:49     ` Christophe Leroy

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=8736m5b7gf.fsf@linux.ibm.com \
    --to=aneesh.kumar@linux.ibm.com \
    --cc=benh@kernel.crashing.org \
    --cc=christophe.leroy@c-s.fr \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mpe@ellerman.id.au \
    --cc=paulus@samba.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.