All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] x86/hugetlb: use set_pmd for huge pte operations
@ 2010-07-26 19:45 Jeremy Fitzhardinge
  2010-08-04 18:31 ` Jeremy Fitzhardinge
  0 siblings, 1 reply; 4+ messages in thread
From: Jeremy Fitzhardinge @ 2010-07-26 19:45 UTC (permalink / raw)
  To: H. Peter Anvin
  Cc: the arch/x86 maintainers, Linux Kernel Mailing List, Dave McCracken



On x86, a huge pte is logically a pte, but structurally a pmd.  Among
other issues, pmds and ptes overload some flags for multiple uses (PAT
vs PSE), so it is necessary to know which structural level a pagetable
entry is in order interpret it properly.

When huge pages are used within a paravirtualized system, it is therefore
appropriate to use the pmd set of function to operate on them, so that
the hypervisor can correctly validate the update.

[ Add fix for 32-bit non-PAE with two-level pagetable - Jeremy ]

Signed-off-by: Dave McCracken<dave.mccracken@oracle.com>
Signed-off-by: Jeremy Fitzhardinge<jeremy.fitzhardinge@citrix.com>

diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index 439a9ac..bf88684 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -36,16 +36,28 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
  	free_pgd_range(tlb, addr, end, floor, ceiling);
  }

+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+	return *ptep;
+}
+
  static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
  				   pte_t *ptep, pte_t pte)
  {
-	set_pte_at(mm, addr, ptep, pte);
+#if PAGETABLE_LEVELS>= 3
+	set_pmd((pmd_t *)ptep, native_make_pmd(native_pte_val(pte)));
+#else
+	set_pgd((pgd_t *)ptep, native_make_pgd(native_pte_val(pte)));
+#endif
  }

  static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
  					    unsigned long addr, pte_t *ptep)
  {
-	return ptep_get_and_clear(mm, addr, ptep);
+	pte_t pte = huge_ptep_get(ptep);
+
+	set_huge_pte_at(mm, addr, ptep, __pte(0));
+	return pte;
  }

  static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
@@ -66,19 +78,25 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
  static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
  					   unsigned long addr, pte_t *ptep)
  {
-	ptep_set_wrprotect(mm, addr, ptep);
+	pte_t pte = huge_ptep_get(ptep);
+
+	pte = pte_wrprotect(pte);
+	set_huge_pte_at(mm, addr, ptep, pte);
  }

  static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
  					     unsigned long addr, pte_t *ptep,
  					     pte_t pte, int dirty)
  {
-	return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
-}
+	pte_t oldpte = huge_ptep_get(ptep);
+	int changed = !pte_same(oldpte, pte);

-static inline pte_t huge_ptep_get(pte_t *ptep)
-{
-	return *ptep;
+	if (changed&&  dirty) {
+		set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+		flush_tlb_page(vma, addr);
+	}
+
+	return changed;
  }

  static inline int arch_prepare_hugepage(struct page *page)



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* Re: [PATCH] x86/hugetlb: use set_pmd for huge pte operations
  2010-07-26 19:45 [PATCH] x86/hugetlb: use set_pmd for huge pte operations Jeremy Fitzhardinge
@ 2010-08-04 18:31 ` Jeremy Fitzhardinge
  0 siblings, 0 replies; 4+ messages in thread
From: Jeremy Fitzhardinge @ 2010-08-04 18:31 UTC (permalink / raw)
  To: H. Peter Anvin
  Cc: the arch/x86 maintainers, Linux Kernel Mailing List,
	Dave McCracken, Thomas Gleixner, Ingo Molnar

  On 07/26/2010 12:45 PM, Jeremy Fitzhardinge wrote:

Ping?  I was about to send this to Linus, but I realized that I hadn't 
got any x86 acks on it.  Any comments/complaints?

Thanks,
     J

>
> On x86, a huge pte is logically a pte, but structurally a pmd.  Among
> other issues, pmds and ptes overload some flags for multiple uses (PAT
> vs PSE), so it is necessary to know which structural level a pagetable
> entry is in order interpret it properly.
>
> When huge pages are used within a paravirtualized system, it is therefore
> appropriate to use the pmd set of function to operate on them, so that
> the hypervisor can correctly validate the update.
>
> [ Add fix for 32-bit non-PAE with two-level pagetable - Jeremy ]
>
> Signed-off-by: Dave McCracken<dave.mccracken@oracle.com>
> Signed-off-by: Jeremy Fitzhardinge<jeremy.fitzhardinge@citrix.com>
>
> diff --git a/arch/x86/include/asm/hugetlb.h 
> b/arch/x86/include/asm/hugetlb.h
> index 439a9ac..bf88684 100644
> --- a/arch/x86/include/asm/hugetlb.h
> +++ b/arch/x86/include/asm/hugetlb.h
> @@ -36,16 +36,28 @@ static inline void hugetlb_free_pgd_range(struct 
> mmu_gather *tlb,
>      free_pgd_range(tlb, addr, end, floor, ceiling);
>  }
>
> +static inline pte_t huge_ptep_get(pte_t *ptep)
> +{
> +    return *ptep;
> +}
> +
>  static inline void set_huge_pte_at(struct mm_struct *mm, unsigned 
> long addr,
>                     pte_t *ptep, pte_t pte)
>  {
> -    set_pte_at(mm, addr, ptep, pte);
> +#if PAGETABLE_LEVELS>= 3
> +    set_pmd((pmd_t *)ptep, native_make_pmd(native_pte_val(pte)));
> +#else
> +    set_pgd((pgd_t *)ptep, native_make_pgd(native_pte_val(pte)));
> +#endif
>  }
>
>  static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
>                          unsigned long addr, pte_t *ptep)
>  {
> -    return ptep_get_and_clear(mm, addr, ptep);
> +    pte_t pte = huge_ptep_get(ptep);
> +
> +    set_huge_pte_at(mm, addr, ptep, __pte(0));
> +    return pte;
>  }
>
>  static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
> @@ -66,19 +78,25 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
>  static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
>                         unsigned long addr, pte_t *ptep)
>  {
> -    ptep_set_wrprotect(mm, addr, ptep);
> +    pte_t pte = huge_ptep_get(ptep);
> +
> +    pte = pte_wrprotect(pte);
> +    set_huge_pte_at(mm, addr, ptep, pte);
>  }
>
>  static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
>                           unsigned long addr, pte_t *ptep,
>                           pte_t pte, int dirty)
>  {
> -    return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
> -}
> +    pte_t oldpte = huge_ptep_get(ptep);
> +    int changed = !pte_same(oldpte, pte);
>
> -static inline pte_t huge_ptep_get(pte_t *ptep)
> -{
> -    return *ptep;
> +    if (changed&&  dirty) {
> +        set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
> +        flush_tlb_page(vma, addr);
> +    }
> +
> +    return changed;
>  }
>
>  static inline int arch_prepare_hugepage(struct page *page)
>
>
> -- 
> To unsubscribe from this list: send the line "unsubscribe 
> linux-kernel" in
> the body of a message to majordomo@vger.kernel.org
> More majordomo info at  http://vger.kernel.org/majordomo-info.html
> Please read the FAQ at  http://www.tux.org/lkml/
>


^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH] x86/hugetlb: use set_pmd for huge pte operations
@ 2010-07-20 19:55 ` Jeremy Fitzhardinge
  0 siblings, 0 replies; 4+ messages in thread
From: Jeremy Fitzhardinge @ 2010-07-20 19:55 UTC (permalink / raw)
  To: H. Peter Anvin
  Cc: Ingo Molnar, the arch/x86 maintainers, Xen-devel, Dave McCracken,
	Linux Kernel Mailing List

From: Dave McCracken <dave.mccracken@oracle.com>

On x86, a huge pte is logically a pte, but structurally a pmd.  Among
other issues, pmds and ptes overload some flags for multiple uses (PAT
vs PSE), so it is necessary to know which structural level a pagetable
entry is in order interpret it properly.

When huge pages are used within a paravirtualized system, it is therefore
appropriate to use the pmd set of function to operate on them, so that
the hypervisor can correctly validate the update.

Signed-off-by: Dave McCracken <dave.mccracken@oracle.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>

diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index 439a9ac..4cfd4de 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -36,16 +36,24 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
 	free_pgd_range(tlb, addr, end, floor, ceiling);
 }
 
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+	return *ptep;
+}
+
 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 				   pte_t *ptep, pte_t pte)
 {
-	set_pte_at(mm, addr, ptep, pte);
+	set_pmd((pmd_t *)ptep, __pmd(pte_val(pte)));
 }
 
 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 					    unsigned long addr, pte_t *ptep)
 {
-	return ptep_get_and_clear(mm, addr, ptep);
+	pte_t pte = huge_ptep_get(ptep);
+
+	set_huge_pte_at(mm, addr, ptep, __pte(0));
+	return pte;
 }
 
 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
@@ -66,19 +74,25 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
 					   unsigned long addr, pte_t *ptep)
 {
-	ptep_set_wrprotect(mm, addr, ptep);
+	pte_t pte = huge_ptep_get(ptep);
+
+	pte = pte_wrprotect(pte);
+	set_huge_pte_at(mm, addr, ptep, pte);
 }
 
 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 					     unsigned long addr, pte_t *ptep,
 					     pte_t pte, int dirty)
 {
-	return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
-}
+	pte_t oldpte = huge_ptep_get(ptep);
+	int changed = !pte_same(oldpte, pte);
 
-static inline pte_t huge_ptep_get(pte_t *ptep)
-{
-	return *ptep;
+	if (changed && dirty) {
+		set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+		flush_tlb_page(vma, addr);
+	}
+
+	return changed;
 }
 
 static inline int arch_prepare_hugepage(struct page *page)



^ permalink raw reply related	[flat|nested] 4+ messages in thread

* [PATCH] x86/hugetlb: use set_pmd for huge pte operations
@ 2010-07-20 19:55 ` Jeremy Fitzhardinge
  0 siblings, 0 replies; 4+ messages in thread
From: Jeremy Fitzhardinge @ 2010-07-20 19:55 UTC (permalink / raw)
  To: H. Peter Anvin
  Cc: Xen-devel, Ingo Molnar, Dave McCracken, the arch/x86 maintainers,
	Linux Kernel Mailing List

From: Dave McCracken <dave.mccracken@oracle.com>

On x86, a huge pte is logically a pte, but structurally a pmd.  Among
other issues, pmds and ptes overload some flags for multiple uses (PAT
vs PSE), so it is necessary to know which structural level a pagetable
entry is in order interpret it properly.

When huge pages are used within a paravirtualized system, it is therefore
appropriate to use the pmd set of function to operate on them, so that
the hypervisor can correctly validate the update.

Signed-off-by: Dave McCracken <dave.mccracken@oracle.com>
Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>

diff --git a/arch/x86/include/asm/hugetlb.h b/arch/x86/include/asm/hugetlb.h
index 439a9ac..4cfd4de 100644
--- a/arch/x86/include/asm/hugetlb.h
+++ b/arch/x86/include/asm/hugetlb.h
@@ -36,16 +36,24 @@ static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
 	free_pgd_range(tlb, addr, end, floor, ceiling);
 }
 
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+	return *ptep;
+}
+
 static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 				   pte_t *ptep, pte_t pte)
 {
-	set_pte_at(mm, addr, ptep, pte);
+	set_pmd((pmd_t *)ptep, __pmd(pte_val(pte)));
 }
 
 static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
 					    unsigned long addr, pte_t *ptep)
 {
-	return ptep_get_and_clear(mm, addr, ptep);
+	pte_t pte = huge_ptep_get(ptep);
+
+	set_huge_pte_at(mm, addr, ptep, __pte(0));
+	return pte;
 }
 
 static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
@@ -66,19 +74,25 @@ static inline pte_t huge_pte_wrprotect(pte_t pte)
 static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
 					   unsigned long addr, pte_t *ptep)
 {
-	ptep_set_wrprotect(mm, addr, ptep);
+	pte_t pte = huge_ptep_get(ptep);
+
+	pte = pte_wrprotect(pte);
+	set_huge_pte_at(mm, addr, ptep, pte);
 }
 
 static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
 					     unsigned long addr, pte_t *ptep,
 					     pte_t pte, int dirty)
 {
-	return ptep_set_access_flags(vma, addr, ptep, pte, dirty);
-}
+	pte_t oldpte = huge_ptep_get(ptep);
+	int changed = !pte_same(oldpte, pte);
 
-static inline pte_t huge_ptep_get(pte_t *ptep)
-{
-	return *ptep;
+	if (changed && dirty) {
+		set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+		flush_tlb_page(vma, addr);
+	}
+
+	return changed;
 }
 
 static inline int arch_prepare_hugepage(struct page *page)

^ permalink raw reply related	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2010-08-04 18:31 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-07-26 19:45 [PATCH] x86/hugetlb: use set_pmd for huge pte operations Jeremy Fitzhardinge
2010-08-04 18:31 ` Jeremy Fitzhardinge
  -- strict thread matches above, loose matches on Subject: below --
2010-07-20 19:55 Jeremy Fitzhardinge
2010-07-20 19:55 ` Jeremy Fitzhardinge

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.