linux-riscv.lists.infradead.org archive mirror
 help / color / mirror / Atom feed
* [PATCH -next v2] riscv/mm/fault: simplify code for do_page_fault()
@ 2022-10-29  8:47 Tong Tiangen
  2022-12-03  3:08 ` Palmer Dabbelt
  0 siblings, 1 reply; 2+ messages in thread
From: Tong Tiangen @ 2022-10-29  8:47 UTC (permalink / raw)
  To: Paul Walmsley, Palmer Dabbelt, Albert Ou
  Cc: linux-riscv, linux-kernel, Tong Tiangen, wangkefeng.wang, Guohanjun

To make the code more hierarchical and readable, we fold vma related
judgments into __do_page_fault(). And to simplify the code, move the
tsk->thread.bad_cause's setting into bad_area(). No functional change
intended.

Signed-off-by: Tong Tiangen <tongtiangen@huawei.com>
---
v1 -> v2:
  Fixed incorrect use of macro VM_FAULT_BADMAP.

 arch/riscv/mm/fault.c | 77 +++++++++++++++++++++++--------------------
 1 file changed, 41 insertions(+), 36 deletions(-)

diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index d86f7cebd4a7..3fdc2eebdd36 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -85,6 +85,8 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
 
 static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
 {
+	current->thread.bad_cause = regs->cause;
+
 	/*
 	 * Something tried to access memory that isn't in our memory map.
 	 * Fix it, but check if it's kernel or user first.
@@ -200,6 +202,38 @@ static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
 	return false;
 }
 
+#define VM_FAULT_BADMAP		((__force vm_fault_t)0x010000)
+#define VM_FAULT_BADACCESS	((__force vm_fault_t)0x020000)
+
+static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
+				unsigned int mm_flags, struct pt_regs *regs)
+{
+	struct vm_area_struct *vma = find_vma(mm, addr);
+
+	if (unlikely(!vma))
+		return VM_FAULT_BADMAP;
+
+	if (unlikely(vma->vm_start > addr)) {
+		if (unlikely(!(vma->vm_flags & VM_GROWSDOWN) ||
+				expand_stack(vma, addr)))
+			return VM_FAULT_BADMAP;
+	}
+
+	/*
+	 * Ok, we have a good vm_area for this memory access, so
+	 * we can handle it.
+	 */
+	if (unlikely(access_error(regs->cause, vma)))
+		return VM_FAULT_BADACCESS;
+
+	/*
+	 * If for any reason at all we could not handle the fault,
+	 * make sure we exit gracefully rather than endlessly redo
+	 * the fault.
+	 */
+	return handle_mm_fault(vma, addr, mm_flags, regs);
+}
+
 /*
  * This routine handles page faults.  It determines the address and the
  * problem, and then passes it off to one of the appropriate routines.
@@ -207,7 +241,6 @@ static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
 asmlinkage void do_page_fault(struct pt_regs *regs)
 {
 	struct task_struct *tsk;
-	struct vm_area_struct *vma;
 	struct mm_struct *mm;
 	unsigned long addr, cause;
 	unsigned int flags = FAULT_FLAG_DEFAULT;
@@ -280,44 +313,16 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
 		flags |= FAULT_FLAG_INSTRUCTION;
 retry:
 	mmap_read_lock(mm);
-	vma = find_vma(mm, addr);
-	if (unlikely(!vma)) {
-		tsk->thread.bad_cause = cause;
-		bad_area(regs, mm, code, addr);
-		return;
-	}
-	if (likely(vma->vm_start <= addr))
-		goto good_area;
-	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
-		tsk->thread.bad_cause = cause;
-		bad_area(regs, mm, code, addr);
-		return;
-	}
-	if (unlikely(expand_stack(vma, addr))) {
-		tsk->thread.bad_cause = cause;
-		bad_area(regs, mm, code, addr);
-		return;
-	}
 
-	/*
-	 * Ok, we have a good vm_area for this memory access, so
-	 * we can handle it.
-	 */
-good_area:
-	code = SEGV_ACCERR;
+	fault = __do_page_fault(mm, addr, flags, regs);
 
-	if (unlikely(access_error(cause, vma))) {
-		tsk->thread.bad_cause = cause;
-		bad_area(regs, mm, code, addr);
-		return;
-	}
+	if (unlikely(fault & VM_FAULT_BADMAP))
+		return bad_area(regs, mm, code, addr);
 
-	/*
-	 * If for any reason at all we could not handle the fault,
-	 * make sure we exit gracefully rather than endlessly redo
-	 * the fault.
-	 */
-	fault = handle_mm_fault(vma, addr, flags, regs);
+	if (unlikely(fault & VM_FAULT_BADACCESS)) {
+		code = SEGV_ACCERR;
+		return bad_area(regs, mm, code, addr);
+	}
 
 	/*
 	 * If we need to retry but a fatal signal is pending, handle the
-- 
2.25.1


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH -next v2] riscv/mm/fault: simplify code for do_page_fault()
  2022-10-29  8:47 [PATCH -next v2] riscv/mm/fault: simplify code for do_page_fault() Tong Tiangen
@ 2022-12-03  3:08 ` Palmer Dabbelt
  0 siblings, 0 replies; 2+ messages in thread
From: Palmer Dabbelt @ 2022-12-03  3:08 UTC (permalink / raw)
  To: tongtiangen
  Cc: Paul Walmsley, aou, linux-riscv, linux-kernel, tongtiangen,
	wangkefeng.wang, guohanjun

On Sat, 29 Oct 2022 01:47:15 PDT (-0700), tongtiangen@huawei.com wrote:
> To make the code more hierarchical and readable, we fold vma related
> judgments into __do_page_fault(). And to simplify the code, move the
> tsk->thread.bad_cause's setting into bad_area(). No functional change
> intended.
>
> Signed-off-by: Tong Tiangen <tongtiangen@huawei.com>
> ---
> v1 -> v2:
>   Fixed incorrect use of macro VM_FAULT_BADMAP.
>
>  arch/riscv/mm/fault.c | 77 +++++++++++++++++++++++--------------------
>  1 file changed, 41 insertions(+), 36 deletions(-)
>
> diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
> index d86f7cebd4a7..3fdc2eebdd36 100644
> --- a/arch/riscv/mm/fault.c
> +++ b/arch/riscv/mm/fault.c
> @@ -85,6 +85,8 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
>
>  static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
>  {
> +	current->thread.bad_cause = regs->cause;
> +
>  	/*
>  	 * Something tried to access memory that isn't in our memory map.
>  	 * Fix it, but check if it's kernel or user first.
> @@ -200,6 +202,38 @@ static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
>  	return false;
>  }
>
> +#define VM_FAULT_BADMAP		((__force vm_fault_t)0x010000)
> +#define VM_FAULT_BADACCESS	((__force vm_fault_t)0x020000)

These alias with VM_FAULT_HINDEX_MASK.  I just sent along 
<https://lore.kernel.org/all/20221203030356.3917-1-palmer@rivosinc.com/T/#t> 
to clean that up for the other ports that also do so, I'm going to hold 
off on this patch until I see what the comments on that one look like.

Otherwise this looks fine, it might not make 6.2 pending the discussion 
over there.

Thanks!

> +
> +static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
> +				unsigned int mm_flags, struct pt_regs *regs)
> +{
> +	struct vm_area_struct *vma = find_vma(mm, addr);
> +
> +	if (unlikely(!vma))
> +		return VM_FAULT_BADMAP;
> +
> +	if (unlikely(vma->vm_start > addr)) {
> +		if (unlikely(!(vma->vm_flags & VM_GROWSDOWN) ||
> +				expand_stack(vma, addr)))
> +			return VM_FAULT_BADMAP;
> +	}
> +
> +	/*
> +	 * Ok, we have a good vm_area for this memory access, so
> +	 * we can handle it.
> +	 */
> +	if (unlikely(access_error(regs->cause, vma)))
> +		return VM_FAULT_BADACCESS;
> +
> +	/*
> +	 * If for any reason at all we could not handle the fault,
> +	 * make sure we exit gracefully rather than endlessly redo
> +	 * the fault.
> +	 */
> +	return handle_mm_fault(vma, addr, mm_flags, regs);
> +}
> +
>  /*
>   * This routine handles page faults.  It determines the address and the
>   * problem, and then passes it off to one of the appropriate routines.
> @@ -207,7 +241,6 @@ static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
>  asmlinkage void do_page_fault(struct pt_regs *regs)
>  {
>  	struct task_struct *tsk;
> -	struct vm_area_struct *vma;
>  	struct mm_struct *mm;
>  	unsigned long addr, cause;
>  	unsigned int flags = FAULT_FLAG_DEFAULT;
> @@ -280,44 +313,16 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
>  		flags |= FAULT_FLAG_INSTRUCTION;
>  retry:
>  	mmap_read_lock(mm);
> -	vma = find_vma(mm, addr);
> -	if (unlikely(!vma)) {
> -		tsk->thread.bad_cause = cause;
> -		bad_area(regs, mm, code, addr);
> -		return;
> -	}
> -	if (likely(vma->vm_start <= addr))
> -		goto good_area;
> -	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
> -		tsk->thread.bad_cause = cause;
> -		bad_area(regs, mm, code, addr);
> -		return;
> -	}
> -	if (unlikely(expand_stack(vma, addr))) {
> -		tsk->thread.bad_cause = cause;
> -		bad_area(regs, mm, code, addr);
> -		return;
> -	}
>
> -	/*
> -	 * Ok, we have a good vm_area for this memory access, so
> -	 * we can handle it.
> -	 */
> -good_area:
> -	code = SEGV_ACCERR;
> +	fault = __do_page_fault(mm, addr, flags, regs);
>
> -	if (unlikely(access_error(cause, vma))) {
> -		tsk->thread.bad_cause = cause;
> -		bad_area(regs, mm, code, addr);
> -		return;
> -	}
> +	if (unlikely(fault & VM_FAULT_BADMAP))
> +		return bad_area(regs, mm, code, addr);
>
> -	/*
> -	 * If for any reason at all we could not handle the fault,
> -	 * make sure we exit gracefully rather than endlessly redo
> -	 * the fault.
> -	 */
> -	fault = handle_mm_fault(vma, addr, flags, regs);
> +	if (unlikely(fault & VM_FAULT_BADACCESS)) {
> +		code = SEGV_ACCERR;
> +		return bad_area(regs, mm, code, addr);
> +	}
>
>  	/*
>  	 * If we need to retry but a fatal signal is pending, handle the

_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2022-12-03  3:08 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-10-29  8:47 [PATCH -next v2] riscv/mm/fault: simplify code for do_page_fault() Tong Tiangen
2022-12-03  3:08 ` Palmer Dabbelt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).