All of lore.kernel.org
 help / color / mirror / Atom feed
From: "Kirill A. Shutemov" <kirill@shutemov.name>
To: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Matthew Wilcox <willy@linux.intel.com>,
	linux-mm@kvack.org, linux-nvdimm@lists.01.org,
	linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
	x86@kernel.org
Subject: Re: [PATCH 1/8] mm: Add optional support for PUD-sized transparent hugepages
Date: Mon, 28 Dec 2015 12:05:51 +0200	[thread overview]
Message-ID: <20151228100551.GA4589@node.shutemov.name> (raw)
In-Reply-To: <1450974037-24775-2-git-send-email-matthew.r.wilcox@intel.com>

On Thu, Dec 24, 2015 at 11:20:30AM -0500, Matthew Wilcox wrote:
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 4bf3811..e14634f 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -1958,6 +1977,17 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
>  	return ptl;
>  }
>  
> +/*
> + * No scalability reason to split PUD locks yet, but follow the same pattern
> + * as the PMD locks to make it easier if we have to.
> + */

I don't think it makes any good unless you convert all other places where
we use page_table_lock to protect pud table (like __pud_alloc()) to the
same API.
I think this would deserve separate patch.

> +static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
> +{
> +	spinlock_t *ptl = &mm->page_table_lock;
> +	spin_lock(ptl);
> +	return ptl;
> +}
> +
>  extern void free_area_init(unsigned long * zones_size);
>  extern void free_area_init_node(int nid, unsigned long * zones_size,
>  		unsigned long zone_start_pfn, unsigned long *zholes_size);

...

> diff --git a/mm/memory.c b/mm/memory.c
> index 416b129..7328df0 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -1220,9 +1220,27 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
>  	pud = pud_offset(pgd, addr);
>  	do {
>  		next = pud_addr_end(addr, end);
> +		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
> +			if (next - addr != HPAGE_PUD_SIZE) {
> +#ifdef CONFIG_DEBUG_VM

IS_ENABLED(CONFIG_DEBUG_VM) ?

> +				if (!rwsem_is_locked(&tlb->mm->mmap_sem)) {
> +					pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n",
> +						__func__, addr, end,
> +						vma->vm_start,
> +						vma->vm_end);

dump_vma(), I guess.

> +					BUG();
> +				}
> +#endif
> +				split_huge_pud(vma, pud, addr);
> +			} else if (zap_huge_pud(tlb, vma, pud, addr))
> +				goto next;
> +			/* fall through */
> +		}
>  		if (pud_none_or_clear_bad(pud))
>  			continue;
>  		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
> +next:
> +		cond_resched();
>  	} while (pud++, addr = next, addr != end);
>  
>  	return addr;
-- 
 Kirill A. Shutemov

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

WARNING: multiple messages have this Message-ID (diff)
From: "Kirill A. Shutemov" <kirill@shutemov.name>
To: Matthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Matthew Wilcox <willy@linux.intel.com>,
	linux-mm@kvack.org, linux-nvdimm@ml01.01.org,
	linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
	x86@kernel.org
Subject: Re: [PATCH 1/8] mm: Add optional support for PUD-sized transparent hugepages
Date: Mon, 28 Dec 2015 12:05:51 +0200	[thread overview]
Message-ID: <20151228100551.GA4589@node.shutemov.name> (raw)
In-Reply-To: <1450974037-24775-2-git-send-email-matthew.r.wilcox@intel.com>

On Thu, Dec 24, 2015 at 11:20:30AM -0500, Matthew Wilcox wrote:
> diff --git a/include/linux/mm.h b/include/linux/mm.h
> index 4bf3811..e14634f 100644
> --- a/include/linux/mm.h
> +++ b/include/linux/mm.h
> @@ -1958,6 +1977,17 @@ static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
>  	return ptl;
>  }
>  
> +/*
> + * No scalability reason to split PUD locks yet, but follow the same pattern
> + * as the PMD locks to make it easier if we have to.
> + */

I don't think it makes any good unless you convert all other places where
we use page_table_lock to protect pud table (like __pud_alloc()) to the
same API.
I think this would deserve separate patch.

> +static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
> +{
> +	spinlock_t *ptl = &mm->page_table_lock;
> +	spin_lock(ptl);
> +	return ptl;
> +}
> +
>  extern void free_area_init(unsigned long * zones_size);
>  extern void free_area_init_node(int nid, unsigned long * zones_size,
>  		unsigned long zone_start_pfn, unsigned long *zholes_size);

...

> diff --git a/mm/memory.c b/mm/memory.c
> index 416b129..7328df0 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -1220,9 +1220,27 @@ static inline unsigned long zap_pud_range(struct mmu_gather *tlb,
>  	pud = pud_offset(pgd, addr);
>  	do {
>  		next = pud_addr_end(addr, end);
> +		if (pud_trans_huge(*pud) || pud_devmap(*pud)) {
> +			if (next - addr != HPAGE_PUD_SIZE) {
> +#ifdef CONFIG_DEBUG_VM

IS_ENABLED(CONFIG_DEBUG_VM) ?

> +				if (!rwsem_is_locked(&tlb->mm->mmap_sem)) {
> +					pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n",
> +						__func__, addr, end,
> +						vma->vm_start,
> +						vma->vm_end);

dump_vma(), I guess.

> +					BUG();
> +				}
> +#endif
> +				split_huge_pud(vma, pud, addr);
> +			} else if (zap_huge_pud(tlb, vma, pud, addr))
> +				goto next;
> +			/* fall through */
> +		}
>  		if (pud_none_or_clear_bad(pud))
>  			continue;
>  		next = zap_pmd_range(tlb, vma, pud, addr, next, details);
> +next:
> +		cond_resched();
>  	} while (pud++, addr = next, addr != end);
>  
>  	return addr;
-- 
 Kirill A. Shutemov

  reply	other threads:[~2015-12-28 10:05 UTC|newest]

Thread overview: 45+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-12-24 16:20 [PATCH 0/8] Support for transparent PUD pages Matthew Wilcox
2015-12-24 16:20 ` Matthew Wilcox
2015-12-24 16:20 ` Matthew Wilcox
2015-12-24 16:20 ` [PATCH 1/8] mm: Add optional support for PUD-sized transparent hugepages Matthew Wilcox
2015-12-24 16:20   ` Matthew Wilcox
2015-12-24 16:20   ` Matthew Wilcox
2015-12-28 10:05   ` Kirill A. Shutemov [this message]
2015-12-28 10:05     ` Kirill A. Shutemov
2016-01-02 17:06     ` Matthew Wilcox
2016-01-02 17:06       ` Matthew Wilcox
2016-01-04 20:30       ` Kirill A. Shutemov
2016-01-04 20:30         ` Kirill A. Shutemov
2015-12-28 10:11   ` Kirill A. Shutemov
2015-12-28 10:11     ` Kirill A. Shutemov
2015-12-24 16:20 ` [PATCH 2/8] mincore: Add support for PUDs Matthew Wilcox
2015-12-24 16:20   ` Matthew Wilcox
2015-12-24 16:20   ` Matthew Wilcox
2015-12-24 16:20 ` [PATCH 3/8] procfs: Add support for PUDs to smaps, clear_refs and pagemap Matthew Wilcox
2015-12-24 16:20   ` Matthew Wilcox
2015-12-24 16:20   ` Matthew Wilcox
2015-12-24 16:20 ` [PATCH 4/8] x86: Add support for PUD-sized transparent hugepages Matthew Wilcox
2015-12-24 16:20   ` Matthew Wilcox
2015-12-24 16:20   ` Matthew Wilcox
2015-12-24 16:20 ` [PATCH 5/8] dax: Support for transparent PUD pages Matthew Wilcox
2015-12-24 16:20   ` Matthew Wilcox
2015-12-24 16:20   ` Matthew Wilcox
2015-12-24 16:20 ` [PATCH 6/8] block_dev: Support PUD DAX mappings Matthew Wilcox
2015-12-24 16:20   ` Matthew Wilcox
2015-12-24 16:20   ` Matthew Wilcox
2015-12-24 16:20 ` [PATCH 7/8] xfs: Support for transparent PUD pages Matthew Wilcox
2015-12-24 16:20   ` Matthew Wilcox
2015-12-24 16:20   ` Matthew Wilcox
2015-12-30 23:30   ` Dave Chinner
2015-12-30 23:30     ` Dave Chinner
2016-01-02 16:43     ` Matthew Wilcox
2016-01-02 16:43       ` Matthew Wilcox
2016-01-03 20:33       ` Dave Chinner
2016-01-03 20:33         ` Dave Chinner
2016-01-04 20:41         ` Kirill A. Shutemov
2016-01-04 20:41           ` Kirill A. Shutemov
2016-01-04 22:01           ` Matthew Wilcox
2016-01-04 22:01             ` Matthew Wilcox
2015-12-24 16:20 ` [PATCH 8/8] ext4: Transparent support for PUD-sized transparent huge pages Matthew Wilcox
2015-12-24 16:20   ` Matthew Wilcox
2015-12-24 16:20   ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20151228100551.GA4589@node.shutemov.name \
    --to=kirill@shutemov.name \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-nvdimm@lists.01.org \
    --cc=matthew.r.wilcox@intel.com \
    --cc=willy@linux.intel.com \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.