All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] thp: change pmd_trans_huge_lock() interface to return ptl
@ 2016-01-19 13:24 Kirill A. Shutemov
  2016-01-19 18:40 ` Michal Hocko
  0 siblings, 1 reply; 2+ messages in thread
From: Kirill A. Shutemov @ 2016-01-19 13:24 UTC (permalink / raw)
  To: Andrew Morton; +Cc: Linus Torvalds, Minchan Kim, linux-mm, Kirill A. Shutemov

After THP refcounting rework we have only two possible return values
from pmd_trans_huge_lock(): success and failure. Return-by-pointer for
ptl doesn't make much sense in this case.

Let's convert pmd_trans_huge_lock() to return ptl on success and NULL on
failure.

Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
---
 fs/proc/task_mmu.c      | 12 ++++++++----
 include/linux/huge_mm.h | 16 ++++++++--------
 mm/huge_memory.c        | 24 ++++++++++++++----------
 mm/memcontrol.c         |  6 ++++--
 mm/mincore.c            |  3 ++-
 5 files changed, 36 insertions(+), 25 deletions(-)

diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 71ffc91060f6..85d16c67c33e 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -602,7 +602,8 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 	pte_t *pte;
 	spinlock_t *ptl;
 
-	if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+	ptl = pmd_trans_huge_lock(pmd, vma);
+	if (ptl) {
 		smaps_pmd_entry(pmd, addr, walk);
 		spin_unlock(ptl);
 		return 0;
@@ -913,7 +914,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
 	spinlock_t *ptl;
 	struct page *page;
 
-	if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+	ptl = pmd_trans_huge_lock(pmd, vma);
+	if (ptl) {
 		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
 			clear_soft_dirty_pmd(vma, addr, pmd);
 			goto out;
@@ -1187,7 +1189,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
 	int err = 0;
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-	if (pmd_trans_huge_lock(pmdp, vma, &ptl)) {
+	ptl = pmd_trans_huge_lock(pmdp, vma);
+	if (ptl) {
 		u64 flags = 0, frame = 0;
 		pmd_t pmd = *pmdp;
 
@@ -1519,7 +1522,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
 	pte_t *orig_pte;
 	pte_t *pte;
 
-	if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+	ptl = pmd_trans_huge_lock(pmd, vma);
+	if (ptl) {
 		pte_t huge_pte = *(pte_t *)pmd;
 		struct page *page;
 
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index cfe81e10bd54..459fd25b378e 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -120,15 +120,15 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
 				    unsigned long start,
 				    unsigned long end,
 				    long adjust_next);
-extern bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
-		spinlock_t **ptl);
+extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
+		struct vm_area_struct *vma);
 /* mmap_sem must be held on entry */
-static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
-		spinlock_t **ptl)
+static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
+		struct vm_area_struct *vma)
 {
 	VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
 	if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
-		return __pmd_trans_huge_lock(pmd, vma, ptl);
+		return __pmd_trans_huge_lock(pmd, vma);
 	else
 		return false;
 }
@@ -190,10 +190,10 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
 					 long adjust_next)
 {
 }
-static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
-		spinlock_t **ptl)
+static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
+		struct vm_area_struct *vma)
 {
-	return false;
+	return NULL;
 }
 
 static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 21fda6a10e89..ba655d4d6864 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1560,7 +1560,8 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 	struct mm_struct *mm = tlb->mm;
 	int ret = 0;
 
-	if (!pmd_trans_huge_lock(pmd, vma, &ptl))
+	ptl = pmd_trans_huge_lock(pmd, vma);
+	if (!ptl)
 		goto out_unlocked;
 
 	orig_pmd = *pmd;
@@ -1627,7 +1628,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
 	pmd_t orig_pmd;
 	spinlock_t *ptl;
 
-	if (!__pmd_trans_huge_lock(pmd, vma, &ptl))
+	ptl = __pmd_trans_huge_lock(pmd, vma);
+	if (!ptl)
 		return 0;
 	/*
 	 * For architectures like ppc64 we look at deposited pgtable
@@ -1690,7 +1692,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
 	 * We don't have to worry about the ordering of src and dst
 	 * ptlocks because exclusive mmap_sem prevents deadlock.
 	 */
-	if (__pmd_trans_huge_lock(old_pmd, vma, &old_ptl)) {
+	old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
+	if (old_ptl) {
 		new_ptl = pmd_lockptr(mm, new_pmd);
 		if (new_ptl != old_ptl)
 			spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
@@ -1724,7 +1727,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 	spinlock_t *ptl;
 	int ret = 0;
 
-	if (__pmd_trans_huge_lock(pmd, vma, &ptl)) {
+	ptl = __pmd_trans_huge_lock(pmd, vma);
+	if (ptl) {
 		pmd_t entry;
 		bool preserve_write = prot_numa && pmd_write(*pmd);
 		ret = 1;
@@ -1760,14 +1764,14 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
  * Note that if it returns true, this routine returns without unlocking page
  * table lock. So callers must unlock it.
  */
-bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
-		spinlock_t **ptl)
+spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
 {
-	*ptl = pmd_lock(vma->vm_mm, pmd);
+	spinlock_t *ptl;
+	ptl = pmd_lock(vma->vm_mm, pmd);
 	if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
-		return true;
-	spin_unlock(*ptl);
-	return false;
+		return ptl;
+	spin_unlock(ptl);
+	return NULL;
 }
 
 #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0eda67376df4..2d91438cf14a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4673,7 +4673,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
 	pte_t *pte;
 	spinlock_t *ptl;
 
-	if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+	ptl = pmd_trans_huge_lock(pmd, vma);
+	if (ptl) {
 		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
 			mc.precharge += HPAGE_PMD_NR;
 		spin_unlock(ptl);
@@ -4861,7 +4862,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
 	union mc_target target;
 	struct page *page;
 
-	if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+	ptl = pmd_trans_huge_lock(pmd, vma);
+	if (ptl) {
 		if (mc.precharge < HPAGE_PMD_NR) {
 			spin_unlock(ptl);
 			return 0;
diff --git a/mm/mincore.c b/mm/mincore.c
index 2a565ed8bb49..563f32045490 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -117,7 +117,8 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
 	unsigned char *vec = walk->private;
 	int nr = (end - addr) >> PAGE_SHIFT;
 
-	if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
+	ptl = pmd_trans_huge_lock(pmd, vma);
+	if (ptl) {
 		memset(vec, 1, nr);
 		spin_unlock(ptl);
 		goto out;
-- 
2.7.0.rc3

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] thp: change pmd_trans_huge_lock() interface to return ptl
  2016-01-19 13:24 [PATCH] thp: change pmd_trans_huge_lock() interface to return ptl Kirill A. Shutemov
@ 2016-01-19 18:40 ` Michal Hocko
  0 siblings, 0 replies; 2+ messages in thread
From: Michal Hocko @ 2016-01-19 18:40 UTC (permalink / raw)
  To: Kirill A. Shutemov; +Cc: Andrew Morton, Linus Torvalds, Minchan Kim, linux-mm

On Tue 19-01-16 16:24:01, Kirill A. Shutemov wrote:
> After THP refcounting rework we have only two possible return values
> from pmd_trans_huge_lock(): success and failure. Return-by-pointer for
> ptl doesn't make much sense in this case.
> 
> Let's convert pmd_trans_huge_lock() to return ptl on success and NULL on
> failure.

Looks good to me and fits better with other page table locking
functions.

> 
> Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>

Acked-by: Michal Hocko <mhocko@suse.com>
> ---
>  fs/proc/task_mmu.c      | 12 ++++++++----
>  include/linux/huge_mm.h | 16 ++++++++--------
>  mm/huge_memory.c        | 24 ++++++++++++++----------
>  mm/memcontrol.c         |  6 ++++--
>  mm/mincore.c            |  3 ++-
>  5 files changed, 36 insertions(+), 25 deletions(-)
> 
> diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
> index 71ffc91060f6..85d16c67c33e 100644
> --- a/fs/proc/task_mmu.c
> +++ b/fs/proc/task_mmu.c
> @@ -602,7 +602,8 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
>  	pte_t *pte;
>  	spinlock_t *ptl;
>  
> -	if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
> +	ptl = pmd_trans_huge_lock(pmd, vma);
> +	if (ptl) {
>  		smaps_pmd_entry(pmd, addr, walk);
>  		spin_unlock(ptl);
>  		return 0;
> @@ -913,7 +914,8 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
>  	spinlock_t *ptl;
>  	struct page *page;
>  
> -	if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
> +	ptl = pmd_trans_huge_lock(pmd, vma);
> +	if (ptl) {
>  		if (cp->type == CLEAR_REFS_SOFT_DIRTY) {
>  			clear_soft_dirty_pmd(vma, addr, pmd);
>  			goto out;
> @@ -1187,7 +1189,8 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
>  	int err = 0;
>  
>  #ifdef CONFIG_TRANSPARENT_HUGEPAGE
> -	if (pmd_trans_huge_lock(pmdp, vma, &ptl)) {
> +	ptl = pmd_trans_huge_lock(pmdp, vma);
> +	if (ptl) {
>  		u64 flags = 0, frame = 0;
>  		pmd_t pmd = *pmdp;
>  
> @@ -1519,7 +1522,8 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
>  	pte_t *orig_pte;
>  	pte_t *pte;
>  
> -	if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
> +	ptl = pmd_trans_huge_lock(pmd, vma);
> +	if (ptl) {
>  		pte_t huge_pte = *(pte_t *)pmd;
>  		struct page *page;
>  
> diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
> index cfe81e10bd54..459fd25b378e 100644
> --- a/include/linux/huge_mm.h
> +++ b/include/linux/huge_mm.h
> @@ -120,15 +120,15 @@ extern void vma_adjust_trans_huge(struct vm_area_struct *vma,
>  				    unsigned long start,
>  				    unsigned long end,
>  				    long adjust_next);
> -extern bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
> -		spinlock_t **ptl);
> +extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
> +		struct vm_area_struct *vma);
>  /* mmap_sem must be held on entry */
> -static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
> -		spinlock_t **ptl)
> +static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
> +		struct vm_area_struct *vma)
>  {
>  	VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
>  	if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
> -		return __pmd_trans_huge_lock(pmd, vma, ptl);
> +		return __pmd_trans_huge_lock(pmd, vma);
>  	else
>  		return false;
>  }
> @@ -190,10 +190,10 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
>  					 long adjust_next)
>  {
>  }
> -static inline bool pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
> -		spinlock_t **ptl)
> +static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
> +		struct vm_area_struct *vma)
>  {
> -	return false;
> +	return NULL;
>  }
>  
>  static inline int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
> diff --git a/mm/huge_memory.c b/mm/huge_memory.c
> index 21fda6a10e89..ba655d4d6864 100644
> --- a/mm/huge_memory.c
> +++ b/mm/huge_memory.c
> @@ -1560,7 +1560,8 @@ int madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
>  	struct mm_struct *mm = tlb->mm;
>  	int ret = 0;
>  
> -	if (!pmd_trans_huge_lock(pmd, vma, &ptl))
> +	ptl = pmd_trans_huge_lock(pmd, vma);
> +	if (!ptl)
>  		goto out_unlocked;
>  
>  	orig_pmd = *pmd;
> @@ -1627,7 +1628,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
>  	pmd_t orig_pmd;
>  	spinlock_t *ptl;
>  
> -	if (!__pmd_trans_huge_lock(pmd, vma, &ptl))
> +	ptl = __pmd_trans_huge_lock(pmd, vma);
> +	if (!ptl)
>  		return 0;
>  	/*
>  	 * For architectures like ppc64 we look at deposited pgtable
> @@ -1690,7 +1692,8 @@ bool move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
>  	 * We don't have to worry about the ordering of src and dst
>  	 * ptlocks because exclusive mmap_sem prevents deadlock.
>  	 */
> -	if (__pmd_trans_huge_lock(old_pmd, vma, &old_ptl)) {
> +	old_ptl = __pmd_trans_huge_lock(old_pmd, vma);
> +	if (old_ptl) {
>  		new_ptl = pmd_lockptr(mm, new_pmd);
>  		if (new_ptl != old_ptl)
>  			spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
> @@ -1724,7 +1727,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
>  	spinlock_t *ptl;
>  	int ret = 0;
>  
> -	if (__pmd_trans_huge_lock(pmd, vma, &ptl)) {
> +	ptl = __pmd_trans_huge_lock(pmd, vma);
> +	if (ptl) {
>  		pmd_t entry;
>  		bool preserve_write = prot_numa && pmd_write(*pmd);
>  		ret = 1;
> @@ -1760,14 +1764,14 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
>   * Note that if it returns true, this routine returns without unlocking page
>   * table lock. So callers must unlock it.
>   */
> -bool __pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma,
> -		spinlock_t **ptl)
> +spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
>  {
> -	*ptl = pmd_lock(vma->vm_mm, pmd);
> +	spinlock_t *ptl;
> +	ptl = pmd_lock(vma->vm_mm, pmd);
>  	if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
> -		return true;
> -	spin_unlock(*ptl);
> -	return false;
> +		return ptl;
> +	spin_unlock(ptl);
> +	return NULL;
>  }
>  
>  #define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
> diff --git a/mm/memcontrol.c b/mm/memcontrol.c
> index 0eda67376df4..2d91438cf14a 100644
> --- a/mm/memcontrol.c
> +++ b/mm/memcontrol.c
> @@ -4673,7 +4673,8 @@ static int mem_cgroup_count_precharge_pte_range(pmd_t *pmd,
>  	pte_t *pte;
>  	spinlock_t *ptl;
>  
> -	if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
> +	ptl = pmd_trans_huge_lock(pmd, vma);
> +	if (ptl) {
>  		if (get_mctgt_type_thp(vma, addr, *pmd, NULL) == MC_TARGET_PAGE)
>  			mc.precharge += HPAGE_PMD_NR;
>  		spin_unlock(ptl);
> @@ -4861,7 +4862,8 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
>  	union mc_target target;
>  	struct page *page;
>  
> -	if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
> +	ptl = pmd_trans_huge_lock(pmd, vma);
> +	if (ptl) {
>  		if (mc.precharge < HPAGE_PMD_NR) {
>  			spin_unlock(ptl);
>  			return 0;
> diff --git a/mm/mincore.c b/mm/mincore.c
> index 2a565ed8bb49..563f32045490 100644
> --- a/mm/mincore.c
> +++ b/mm/mincore.c
> @@ -117,7 +117,8 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
>  	unsigned char *vec = walk->private;
>  	int nr = (end - addr) >> PAGE_SHIFT;
>  
> -	if (pmd_trans_huge_lock(pmd, vma, &ptl)) {
> +	ptl = pmd_trans_huge_lock(pmd, vma);
> +	if (ptl) {
>  		memset(vec, 1, nr);
>  		spin_unlock(ptl);
>  		goto out;
> -- 
> 2.7.0.rc3
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in
> the body to majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

-- 
Michal Hocko
SUSE Labs

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2016-01-19 18:40 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-01-19 13:24 [PATCH] thp: change pmd_trans_huge_lock() interface to return ptl Kirill A. Shutemov
2016-01-19 18:40 ` Michal Hocko

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.