linux-sh.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/2] kdump: simplify code
@ 2021-12-10 13:35 Tiezhu Yang
  2021-12-10 13:36 ` [PATCH 1/2] kdump: vmcore: move copy_to() from vmcore.c to uaccess.h Tiezhu Yang
  2021-12-10 13:36 ` [PATCH 2/2] kdump: crashdump: use copy_to() to simplify the related code Tiezhu Yang
  0 siblings, 2 replies; 6+ messages in thread
From: Tiezhu Yang @ 2021-12-10 13:35 UTC (permalink / raw)
  To: Dave Young, Baoquan He, Vivek Goyal, Andrew Morton
  Cc: linux-arm-kernel, linux-ia64, linux-mips, linuxppc-dev,
	linux-riscv, linux-sh, x86, linux-fsdevel, kexec, linux-kernel

Tiezhu Yang (2):
  kdump: vmcore: move copy_to() from vmcore.c to uaccess.h
  kdump: crashdump: use copy_to() to simplify the related code

 arch/arm/kernel/crash_dump.c     | 10 ++--------
 arch/arm64/kernel/crash_dump.c   | 10 ++--------
 arch/ia64/kernel/crash_dump.c    | 10 ++++------
 arch/mips/kernel/crash_dump.c    |  9 ++-------
 arch/powerpc/kernel/crash_dump.c |  7 ++-----
 arch/riscv/kernel/crash_dump.c   |  9 ++-------
 arch/sh/kernel/crash_dump.c      |  9 ++-------
 arch/x86/kernel/crash_dump_32.c  |  9 ++-------
 arch/x86/kernel/crash_dump_64.c  |  9 ++-------
 fs/proc/vmcore.c                 | 14 --------------
 include/linux/uaccess.h          | 14 ++++++++++++++
 11 files changed, 34 insertions(+), 76 deletions(-)

-- 
2.1.0


^ permalink raw reply	[flat|nested] 6+ messages in thread

* [PATCH 1/2] kdump: vmcore: move copy_to() from vmcore.c to uaccess.h
  2021-12-10 13:35 [PATCH 0/2] kdump: simplify code Tiezhu Yang
@ 2021-12-10 13:36 ` Tiezhu Yang
  2021-12-10 16:59   ` Andrew Morton
  2021-12-10 13:36 ` [PATCH 2/2] kdump: crashdump: use copy_to() to simplify the related code Tiezhu Yang
  1 sibling, 1 reply; 6+ messages in thread
From: Tiezhu Yang @ 2021-12-10 13:36 UTC (permalink / raw)
  To: Dave Young, Baoquan He, Vivek Goyal, Andrew Morton
  Cc: linux-arm-kernel, linux-ia64, linux-mips, linuxppc-dev,
	linux-riscv, linux-sh, x86, linux-fsdevel, kexec, linux-kernel

In arch/*/kernel/crash_dump*.c, there exist similar code about
copy_oldmem_page(), move copy_to() from vmcore.c to uaccess.h,
and then we can use copy_to() to simplify the related code.

Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
---
 fs/proc/vmcore.c        | 14 --------------
 include/linux/uaccess.h | 14 ++++++++++++++
 2 files changed, 14 insertions(+), 14 deletions(-)

diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 509f851..c5976a8 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -238,20 +238,6 @@ copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
 	return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
 }
 
-/*
- * Copy to either kernel or user space
- */
-static int copy_to(void *target, void *src, size_t size, int userbuf)
-{
-	if (userbuf) {
-		if (copy_to_user((char __user *) target, src, size))
-			return -EFAULT;
-	} else {
-		memcpy(target, src, size);
-	}
-	return 0;
-}
-
 #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
 static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
 {
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index ac03940..4a6c3e4 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -201,6 +201,20 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
 	return n;
 }
 
+/*
+ * Copy to either kernel or user space
+ */
+static inline int copy_to(void *target, void *src, size_t size, int userbuf)
+{
+	if (userbuf) {
+		if (copy_to_user((char __user *) target, src, size))
+			return -EFAULT;
+	} else {
+		memcpy(target, src, size);
+	}
+	return 0;
+}
+
 #ifndef copy_mc_to_kernel
 /*
  * Without arch opt-in this generic copy_mc_to_kernel() will not handle
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* [PATCH 2/2] kdump: crashdump: use copy_to() to simplify the related code
  2021-12-10 13:35 [PATCH 0/2] kdump: simplify code Tiezhu Yang
  2021-12-10 13:36 ` [PATCH 1/2] kdump: vmcore: move copy_to() from vmcore.c to uaccess.h Tiezhu Yang
@ 2021-12-10 13:36 ` Tiezhu Yang
  1 sibling, 0 replies; 6+ messages in thread
From: Tiezhu Yang @ 2021-12-10 13:36 UTC (permalink / raw)
  To: Dave Young, Baoquan He, Vivek Goyal, Andrew Morton
  Cc: linux-arm-kernel, linux-ia64, linux-mips, linuxppc-dev,
	linux-riscv, linux-sh, x86, linux-fsdevel, kexec, linux-kernel

Use copy_to() to simplify the related code about copy_oldmem_page()
in arch/*/kernel/crash_dump*.c files.

Signed-off-by: Tiezhu Yang <yangtiezhu@loongson.cn>
---
 arch/arm/kernel/crash_dump.c     | 10 ++--------
 arch/arm64/kernel/crash_dump.c   | 10 ++--------
 arch/ia64/kernel/crash_dump.c    | 10 ++++------
 arch/mips/kernel/crash_dump.c    |  9 ++-------
 arch/powerpc/kernel/crash_dump.c |  7 ++-----
 arch/riscv/kernel/crash_dump.c   |  9 ++-------
 arch/sh/kernel/crash_dump.c      |  9 ++-------
 arch/x86/kernel/crash_dump_32.c  |  9 ++-------
 arch/x86/kernel/crash_dump_64.c  |  9 ++-------
 9 files changed, 20 insertions(+), 62 deletions(-)

diff --git a/arch/arm/kernel/crash_dump.c b/arch/arm/kernel/crash_dump.c
index 53cb924..6491f1d 100644
--- a/arch/arm/kernel/crash_dump.c
+++ b/arch/arm/kernel/crash_dump.c
@@ -40,14 +40,8 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
 	if (!vaddr)
 		return -ENOMEM;
 
-	if (userbuf) {
-		if (copy_to_user(buf, vaddr + offset, csize)) {
-			iounmap(vaddr);
-			return -EFAULT;
-		}
-	} else {
-		memcpy(buf, vaddr + offset, csize);
-	}
+	if (copy_to(buf, vaddr + offset, csize, userbuf))
+		csize = -EFAULT;
 
 	iounmap(vaddr);
 	return csize;
diff --git a/arch/arm64/kernel/crash_dump.c b/arch/arm64/kernel/crash_dump.c
index 58303a9..496e6a5 100644
--- a/arch/arm64/kernel/crash_dump.c
+++ b/arch/arm64/kernel/crash_dump.c
@@ -38,14 +38,8 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
 	if (!vaddr)
 		return -ENOMEM;
 
-	if (userbuf) {
-		if (copy_to_user((char __user *)buf, vaddr + offset, csize)) {
-			memunmap(vaddr);
-			return -EFAULT;
-		}
-	} else {
-		memcpy(buf, vaddr + offset, csize);
-	}
+	if (copy_to(buf, vaddr + offset, csize, userbuf))
+		csize = -EFAULT;
 
 	memunmap(vaddr);
 
diff --git a/arch/ia64/kernel/crash_dump.c b/arch/ia64/kernel/crash_dump.c
index 0ed3c3d..20f4c4e 100644
--- a/arch/ia64/kernel/crash_dump.c
+++ b/arch/ia64/kernel/crash_dump.c
@@ -39,13 +39,11 @@ copy_oldmem_page(unsigned long pfn, char *buf,
 
 	if (!csize)
 		return 0;
+
 	vaddr = __va(pfn<<PAGE_SHIFT);
-	if (userbuf) {
-		if (copy_to_user(buf, (vaddr + offset), csize)) {
-			return -EFAULT;
-		}
-	} else
-		memcpy(buf, (vaddr + offset), csize);
+	if (copy_to(buf, vaddr + offset, csize, userbuf))
+		return -EFAULT;
+
 	return csize;
 }
 
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c
index 2e50f551..80704dc 100644
--- a/arch/mips/kernel/crash_dump.c
+++ b/arch/mips/kernel/crash_dump.c
@@ -24,13 +24,8 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
 		return 0;
 
 	vaddr = kmap_local_pfn(pfn);
-
-	if (!userbuf) {
-		memcpy(buf, vaddr + offset, csize);
-	} else {
-		if (copy_to_user(buf, vaddr + offset, csize))
-			csize = -EFAULT;
-	}
+	if (copy_to(buf, vaddr + offset, csize, userbuf))
+		csize = -EFAULT;
 
 	kunmap_local(vaddr);
 
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 5693e1c67..43b2658 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -71,11 +71,8 @@ void __init setup_kdump_trampoline(void)
 static size_t copy_oldmem_vaddr(void *vaddr, char *buf, size_t csize,
                                unsigned long offset, int userbuf)
 {
-	if (userbuf) {
-		if (copy_to_user((char __user *)buf, (vaddr + offset), csize))
-			return -EFAULT;
-	} else
-		memcpy(buf, (vaddr + offset), csize);
+	if (copy_to(buf, vaddr + offset, csize, userbuf))
+		return -EFAULT;
 
 	return csize;
 }
diff --git a/arch/riscv/kernel/crash_dump.c b/arch/riscv/kernel/crash_dump.c
index 86cc0ad..707fbc1 100644
--- a/arch/riscv/kernel/crash_dump.c
+++ b/arch/riscv/kernel/crash_dump.c
@@ -33,13 +33,8 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
 	if (!vaddr)
 		return -ENOMEM;
 
-	if (userbuf) {
-		if (copy_to_user((char __user *)buf, vaddr + offset, csize)) {
-			memunmap(vaddr);
-			return -EFAULT;
-		}
-	} else
-		memcpy(buf, vaddr + offset, csize);
+	if (copy_to(buf, vaddr + offset, csize, userbuf))
+		csize = -EFAULT;
 
 	memunmap(vaddr);
 	return csize;
diff --git a/arch/sh/kernel/crash_dump.c b/arch/sh/kernel/crash_dump.c
index 5b41b59..2af9286 100644
--- a/arch/sh/kernel/crash_dump.c
+++ b/arch/sh/kernel/crash_dump.c
@@ -33,13 +33,8 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
 
 	vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
 
-	if (userbuf) {
-		if (copy_to_user((void __user *)buf, (vaddr + offset), csize)) {
-			iounmap(vaddr);
-			return -EFAULT;
-		}
-	} else
-	memcpy(buf, (vaddr + offset), csize);
+	if (copy_to(buf, vaddr + offset, csize, userbuf))
+		csize = -EFAULT;
 
 	iounmap(vaddr);
 	return csize;
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c
index 5fcac46..731658b 100644
--- a/arch/x86/kernel/crash_dump_32.c
+++ b/arch/x86/kernel/crash_dump_32.c
@@ -54,13 +54,8 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
 		return -EFAULT;
 
 	vaddr = kmap_local_pfn(pfn);
-
-	if (!userbuf) {
-		memcpy(buf, vaddr + offset, csize);
-	} else {
-		if (copy_to_user(buf, vaddr + offset, csize))
-			csize = -EFAULT;
-	}
+	if (copy_to(buf, vaddr + offset, csize, userbuf))
+		csize = -EFAULT;
 
 	kunmap_local(vaddr);
 
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index a7f617a..8e7c192 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -29,13 +29,8 @@ static ssize_t __copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
 	if (!vaddr)
 		return -ENOMEM;
 
-	if (userbuf) {
-		if (copy_to_user((void __user *)buf, vaddr + offset, csize)) {
-			iounmap((void __iomem *)vaddr);
-			return -EFAULT;
-		}
-	} else
-		memcpy(buf, vaddr + offset, csize);
+	if (copy_to(buf, vaddr + offset, csize, userbuf))
+		csize = -EFAULT;
 
 	set_iounmap_nonlazy();
 	iounmap((void __iomem *)vaddr);
-- 
2.1.0


^ permalink raw reply related	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] kdump: vmcore: move copy_to() from vmcore.c to uaccess.h
  2021-12-10 13:36 ` [PATCH 1/2] kdump: vmcore: move copy_to() from vmcore.c to uaccess.h Tiezhu Yang
@ 2021-12-10 16:59   ` Andrew Morton
  2021-12-10 23:50     ` Tiezhu Yang
  2021-12-11  7:37     ` Christophe Leroy
  0 siblings, 2 replies; 6+ messages in thread
From: Andrew Morton @ 2021-12-10 16:59 UTC (permalink / raw)
  To: Tiezhu Yang
  Cc: Dave Young, Baoquan He, Vivek Goyal, linux-arm-kernel,
	linux-ia64, linux-mips, linuxppc-dev, linux-riscv, linux-sh, x86,
	linux-fsdevel, kexec, linux-kernel

On Fri, 10 Dec 2021 21:36:00 +0800 Tiezhu Yang <yangtiezhu@loongson.cn> wrote:

> In arch/*/kernel/crash_dump*.c, there exist similar code about
> copy_oldmem_page(), move copy_to() from vmcore.c to uaccess.h,
> and then we can use copy_to() to simplify the related code.
> 
> ...
>
> --- a/fs/proc/vmcore.c
> +++ b/fs/proc/vmcore.c
> @@ -238,20 +238,6 @@ copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
>  	return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
>  }
>  
> -/*
> - * Copy to either kernel or user space
> - */
> -static int copy_to(void *target, void *src, size_t size, int userbuf)
> -{
> -	if (userbuf) {
> -		if (copy_to_user((char __user *) target, src, size))
> -			return -EFAULT;
> -	} else {
> -		memcpy(target, src, size);
> -	}
> -	return 0;
> -}
> -
>  #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
>  static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
>  {
> diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
> index ac03940..4a6c3e4 100644
> --- a/include/linux/uaccess.h
> +++ b/include/linux/uaccess.h
> @@ -201,6 +201,20 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
>  	return n;
>  }
>  
> +/*
> + * Copy to either kernel or user space
> + */
> +static inline int copy_to(void *target, void *src, size_t size, int userbuf)
> +{
> +	if (userbuf) {
> +		if (copy_to_user((char __user *) target, src, size))
> +			return -EFAULT;
> +	} else {
> +		memcpy(target, src, size);
> +	}
> +	return 0;
> +}
> +

Ordinarily I'd say "this is too large to be inlined".  But the function
has only a single callsite per architecture so inlining it won't cause
bloat at present.

But hopefully copy_to() will get additional callers in the future, in
which case it shouldn't be inlined.  So I'm thinking it would be best
to start out with this as a regular non-inlined function, in
lib/usercopy.c.

Also, copy_to() is a very poor name for a globally-visible helper
function.  Better would be copy_to_user_or_kernel(), although that's
perhaps a bit long.

And the `userbuf' arg should have type bool, yes?


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] kdump: vmcore: move copy_to() from vmcore.c to uaccess.h
  2021-12-10 16:59   ` Andrew Morton
@ 2021-12-10 23:50     ` Tiezhu Yang
  2021-12-11  7:37     ` Christophe Leroy
  1 sibling, 0 replies; 6+ messages in thread
From: Tiezhu Yang @ 2021-12-10 23:50 UTC (permalink / raw)
  To: Andrew Morton
  Cc: Dave Young, Baoquan He, Vivek Goyal, linux-arm-kernel,
	linux-ia64, linux-mips, linuxppc-dev, linux-riscv, linux-sh, x86,
	linux-fsdevel, kexec, linux-kernel, Xuefeng Li

On 12/11/2021 12:59 AM, Andrew Morton wrote:
> On Fri, 10 Dec 2021 21:36:00 +0800 Tiezhu Yang <yangtiezhu@loongson.cn> wrote:
>
>> In arch/*/kernel/crash_dump*.c, there exist similar code about
>> copy_oldmem_page(), move copy_to() from vmcore.c to uaccess.h,
>> and then we can use copy_to() to simplify the related code.
>>
>> ...
>>
>> --- a/fs/proc/vmcore.c
>> +++ b/fs/proc/vmcore.c
>> @@ -238,20 +238,6 @@ copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
>>  	return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
>>  }
>>
>> -/*
>> - * Copy to either kernel or user space
>> - */
>> -static int copy_to(void *target, void *src, size_t size, int userbuf)
>> -{
>> -	if (userbuf) {
>> -		if (copy_to_user((char __user *) target, src, size))
>> -			return -EFAULT;
>> -	} else {
>> -		memcpy(target, src, size);
>> -	}
>> -	return 0;
>> -}
>> -
>>  #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
>>  static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
>>  {
>> diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
>> index ac03940..4a6c3e4 100644
>> --- a/include/linux/uaccess.h
>> +++ b/include/linux/uaccess.h
>> @@ -201,6 +201,20 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
>>  	return n;
>>  }
>>
>> +/*
>> + * Copy to either kernel or user space
>> + */
>> +static inline int copy_to(void *target, void *src, size_t size, int userbuf)
>> +{
>> +	if (userbuf) {
>> +		if (copy_to_user((char __user *) target, src, size))
>> +			return -EFAULT;
>> +	} else {
>> +		memcpy(target, src, size);
>> +	}
>> +	return 0;
>> +}
>> +
>
> Ordinarily I'd say "this is too large to be inlined".  But the function
> has only a single callsite per architecture so inlining it won't cause
> bloat at present.
>
> But hopefully copy_to() will get additional callers in the future, in
> which case it shouldn't be inlined.  So I'm thinking it would be best
> to start out with this as a regular non-inlined function, in
> lib/usercopy.c.
>
> Also, copy_to() is a very poor name for a globally-visible helper
> function.  Better would be copy_to_user_or_kernel(), although that's
> perhaps a bit long.
>
> And the `userbuf' arg should have type bool, yes?
>

Hi Andrew,

Thank you very much for your reply and suggestion, I agree with you,
I will send v2 later.

Thanks,
Tiezhu


^ permalink raw reply	[flat|nested] 6+ messages in thread

* Re: [PATCH 1/2] kdump: vmcore: move copy_to() from vmcore.c to uaccess.h
  2021-12-10 16:59   ` Andrew Morton
  2021-12-10 23:50     ` Tiezhu Yang
@ 2021-12-11  7:37     ` Christophe Leroy
  1 sibling, 0 replies; 6+ messages in thread
From: Christophe Leroy @ 2021-12-11  7:37 UTC (permalink / raw)
  To: Andrew Morton, Tiezhu Yang
  Cc: linux-ia64, Baoquan He, linux-sh, linuxppc-dev, x86, kexec,
	linux-mips, linux-kernel, Vivek Goyal, linux-fsdevel,
	linux-riscv, Dave Young, linux-arm-kernel



Le 10/12/2021 à 17:59, Andrew Morton a écrit :
> On Fri, 10 Dec 2021 21:36:00 +0800 Tiezhu Yang <yangtiezhu@loongson.cn> wrote:
> 
>> In arch/*/kernel/crash_dump*.c, there exist similar code about
>> copy_oldmem_page(), move copy_to() from vmcore.c to uaccess.h,
>> and then we can use copy_to() to simplify the related code.
>>
>> ...
>>
>> --- a/fs/proc/vmcore.c
>> +++ b/fs/proc/vmcore.c
>> @@ -238,20 +238,6 @@ copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
>>   	return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
>>   }
>>   
>> -/*
>> - * Copy to either kernel or user space
>> - */
>> -static int copy_to(void *target, void *src, size_t size, int userbuf)
>> -{
>> -	if (userbuf) {
>> -		if (copy_to_user((char __user *) target, src, size))
>> -			return -EFAULT;
>> -	} else {
>> -		memcpy(target, src, size);
>> -	}
>> -	return 0;
>> -}
>> -
>>   #ifdef CONFIG_PROC_VMCORE_DEVICE_DUMP
>>   static int vmcoredd_copy_dumps(void *dst, u64 start, size_t size, int userbuf)
>>   {
>> diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
>> index ac03940..4a6c3e4 100644
>> --- a/include/linux/uaccess.h
>> +++ b/include/linux/uaccess.h
>> @@ -201,6 +201,20 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
>>   	return n;
>>   }
>>   
>> +/*
>> + * Copy to either kernel or user space
>> + */
>> +static inline int copy_to(void *target, void *src, size_t size, int userbuf)
>> +{
>> +	if (userbuf) {
>> +		if (copy_to_user((char __user *) target, src, size))
>> +			return -EFAULT;
>> +	} else {
>> +		memcpy(target, src, size);
>> +	}
>> +	return 0;
>> +}
>> +
> 
> Ordinarily I'd say "this is too large to be inlined".  But the function
> has only a single callsite per architecture so inlining it won't cause
> bloat at present.
> 
> But hopefully copy_to() will get additional callers in the future, in
> which case it shouldn't be inlined.  So I'm thinking it would be best
> to start out with this as a regular non-inlined function, in
> lib/usercopy.c.
> 
> Also, copy_to() is a very poor name for a globally-visible helper
> function.  Better would be copy_to_user_or_kernel(), although that's
> perhaps a bit long.
> 
> And the `userbuf' arg should have type bool, yes?
> 

I think keeping it inlined is better.

copy_oldmem_page() is bigger with v2 (outlined) than with v1 (inlined), 
see both below:

v1:

00000000 <copy_oldmem_page>:
    0:	94 21 ff e0 	stwu    r1,-32(r1)
    4:	93 e1 00 1c 	stw     r31,28(r1)
    8:	7c bf 2b 79 	mr.     r31,r5
    c:	40 82 00 14 	bne     20 <copy_oldmem_page+0x20>
   10:	83 e1 00 1c 	lwz     r31,28(r1)
   14:	38 60 00 00 	li      r3,0
   18:	38 21 00 20 	addi    r1,r1,32
   1c:	4e 80 00 20 	blr
   20:	28 1f 10 00 	cmplwi  r31,4096
   24:	93 61 00 0c 	stw     r27,12(r1)
   28:	7c 08 02 a6 	mflr    r0
   2c:	93 81 00 10 	stw     r28,16(r1)
   30:	93 a1 00 14 	stw     r29,20(r1)
   34:	7c 9b 23 78 	mr      r27,r4
   38:	90 01 00 24 	stw     r0,36(r1)
   3c:	7c dd 33 78 	mr      r29,r6
   40:	93 c1 00 18 	stw     r30,24(r1)
   44:	7c fc 3b 78 	mr      r28,r7
   48:	40 81 00 08 	ble     50 <copy_oldmem_page+0x50>
   4c:	3b e0 10 00 	li      r31,4096
   50:	54 7e 60 26 	rlwinm  r30,r3,12,0,19
   54:	7f c3 f3 78 	mr      r3,r30
   58:	7f e4 fb 78 	mr      r4,r31
   5c:	48 00 00 01 	bl      5c <copy_oldmem_page+0x5c>
			5c: R_PPC_REL24	memblock_is_region_memory
   60:	2c 03 00 00 	cmpwi   r3,0
   64:	41 82 00 30 	beq     94 <copy_oldmem_page+0x94>
   68:	2c 1c 00 00 	cmpwi   r28,0
   6c:	3f de c0 00 	addis   r30,r30,-16384
   70:	7f 63 db 78 	mr      r3,r27
   74:	7f e5 fb 78 	mr      r5,r31
   78:	7c 9e ea 14 	add     r4,r30,r29
   7c:	41 82 00 7c 	beq     f8 <copy_oldmem_page+0xf8>
   80:	48 00 00 01 	bl      80 <copy_oldmem_page+0x80>
			80: R_PPC_REL24	_copy_to_user
   84:	2c 03 00 00 	cmpwi   r3,0
   88:	41 a2 00 48 	beq     d0 <copy_oldmem_page+0xd0>
   8c:	3b e0 ff f2 	li      r31,-14
   90:	48 00 00 40 	b       d0 <copy_oldmem_page+0xd0>
   94:	7f c3 f3 78 	mr      r3,r30
   98:	38 a0 05 91 	li      r5,1425
   9c:	38 80 10 00 	li      r4,4096
   a0:	48 00 00 01 	bl      a0 <copy_oldmem_page+0xa0>
			a0: R_PPC_REL24	ioremap_prot
   a4:	2c 1c 00 00 	cmpwi   r28,0
   a8:	7c 7e 1b 78 	mr      r30,r3
   ac:	7c 83 ea 14 	add     r4,r3,r29
   b0:	7f e5 fb 78 	mr      r5,r31
   b4:	7f 63 db 78 	mr      r3,r27
   b8:	41 82 00 48 	beq     100 <copy_oldmem_page+0x100>
   bc:	48 00 00 01 	bl      bc <copy_oldmem_page+0xbc>
			bc: R_PPC_REL24	_copy_to_user
   c0:	2c 03 00 00 	cmpwi   r3,0
   c4:	40 82 00 44 	bne     108 <copy_oldmem_page+0x108>
   c8:	7f c3 f3 78 	mr      r3,r30
   cc:	48 00 00 01 	bl      cc <copy_oldmem_page+0xcc>
			cc: R_PPC_REL24	iounmap
   d0:	80 01 00 24 	lwz     r0,36(r1)
   d4:	7f e3 fb 78 	mr      r3,r31
   d8:	83 61 00 0c 	lwz     r27,12(r1)
   dc:	83 81 00 10 	lwz     r28,16(r1)
   e0:	7c 08 03 a6 	mtlr    r0
   e4:	83 a1 00 14 	lwz     r29,20(r1)
   e8:	83 c1 00 18 	lwz     r30,24(r1)
   ec:	83 e1 00 1c 	lwz     r31,28(r1)
   f0:	38 21 00 20 	addi    r1,r1,32
   f4:	4e 80 00 20 	blr
   f8:	48 00 00 01 	bl      f8 <copy_oldmem_page+0xf8>
			f8: R_PPC_REL24	memcpy
   fc:	4b ff ff d4 	b       d0 <copy_oldmem_page+0xd0>
  100:	48 00 00 01 	bl      100 <copy_oldmem_page+0x100>
			100: R_PPC_REL24	memcpy
  104:	4b ff ff c4 	b       c8 <copy_oldmem_page+0xc8>
  108:	3b e0 ff f2 	li      r31,-14
  10c:	4b ff ff bc 	b       c8 <copy_oldmem_page+0xc8>


v2:

00000000 <copy_oldmem_page>:
    0:	94 21 ff e0 	stwu    r1,-32(r1)
    4:	93 e1 00 1c 	stw     r31,28(r1)
    8:	7c bf 2b 79 	mr.     r31,r5
    c:	93 c1 00 18 	stw     r30,24(r1)
   10:	3b c0 00 00 	li      r30,0
   14:	40 82 00 18 	bne     2c <copy_oldmem_page+0x2c>
   18:	7f c3 f3 78 	mr      r3,r30
   1c:	83 e1 00 1c 	lwz     r31,28(r1)
   20:	83 c1 00 18 	lwz     r30,24(r1)
   24:	38 21 00 20 	addi    r1,r1,32
   28:	4e 80 00 20 	blr
   2c:	28 1f 10 00 	cmplwi  r31,4096
   30:	93 61 00 0c 	stw     r27,12(r1)
   34:	7c 08 02 a6 	mflr    r0
   38:	93 81 00 10 	stw     r28,16(r1)
   3c:	93 a1 00 14 	stw     r29,20(r1)
   40:	7c db 33 78 	mr      r27,r6
   44:	90 01 00 24 	stw     r0,36(r1)
   48:	7c 9d 23 78 	mr      r29,r4
   4c:	7c fc 3b 78 	mr      r28,r7
   50:	40 81 00 08 	ble     58 <copy_oldmem_page+0x58>
   54:	3b e0 10 00 	li      r31,4096
   58:	54 7e 60 26 	rlwinm  r30,r3,12,0,19
   5c:	7f c3 f3 78 	mr      r3,r30
   60:	7f e4 fb 78 	mr      r4,r31
   64:	48 00 00 01 	bl      64 <copy_oldmem_page+0x64>
			64: R_PPC_REL24	memblock_is_region_memory
   68:	2c 03 00 00 	cmpwi   r3,0
   6c:	41 82 00 54 	beq     c0 <copy_oldmem_page+0xc0>
   70:	3f de c0 00 	addis   r30,r30,-16384
   74:	7c 9e da 14 	add     r4,r30,r27
   78:	7f 86 e3 78 	mr      r6,r28
   7c:	7f a3 eb 78 	mr      r3,r29
   80:	7f e5 fb 78 	mr      r5,r31
   84:	48 00 00 01 	bl      84 <copy_oldmem_page+0x84>
			84: R_PPC_REL24	copy_to_user_or_kernel
   88:	3b c0 ff f2 	li      r30,-14
   8c:	2c 03 00 00 	cmpwi   r3,0
   90:	40 82 00 08 	bne     98 <copy_oldmem_page+0x98>
   94:	7f fe fb 78 	mr      r30,r31
   98:	80 01 00 24 	lwz     r0,36(r1)
   9c:	83 61 00 0c 	lwz     r27,12(r1)
   a0:	83 81 00 10 	lwz     r28,16(r1)
   a4:	7c 08 03 a6 	mtlr    r0
   a8:	83 a1 00 14 	lwz     r29,20(r1)
   ac:	7f c3 f3 78 	mr      r3,r30
   b0:	83 e1 00 1c 	lwz     r31,28(r1)
   b4:	83 c1 00 18 	lwz     r30,24(r1)
   b8:	38 21 00 20 	addi    r1,r1,32
   bc:	4e 80 00 20 	blr
   c0:	7f c3 f3 78 	mr      r3,r30
   c4:	93 41 00 08 	stw     r26,8(r1)
   c8:	38 a0 05 91 	li      r5,1425
   cc:	38 80 10 00 	li      r4,4096
   d0:	48 00 00 01 	bl      d0 <copy_oldmem_page+0xd0>
			d0: R_PPC_REL24	ioremap_prot
   d4:	7f 86 e3 78 	mr      r6,r28
   d8:	7c 83 da 14 	add     r4,r3,r27
   dc:	7c 7a 1b 78 	mr      r26,r3
   e0:	7f e5 fb 78 	mr      r5,r31
   e4:	7f a3 eb 78 	mr      r3,r29
   e8:	48 00 00 01 	bl      e8 <copy_oldmem_page+0xe8>
			e8: R_PPC_REL24	copy_to_user_or_kernel
   ec:	3b c0 ff f2 	li      r30,-14
   f0:	2c 03 00 00 	cmpwi   r3,0
   f4:	40 82 00 08 	bne     fc <copy_oldmem_page+0xfc>
   f8:	7f fe fb 78 	mr      r30,r31
   fc:	7f 43 d3 78 	mr      r3,r26
  100:	48 00 00 01 	bl      100 <copy_oldmem_page+0x100>
			100: R_PPC_REL24	iounmap
  104:	80 01 00 24 	lwz     r0,36(r1)
  108:	83 41 00 08 	lwz     r26,8(r1)
  10c:	83 61 00 0c 	lwz     r27,12(r1)
  110:	7c 08 03 a6 	mtlr    r0
  114:	83 81 00 10 	lwz     r28,16(r1)
  118:	83 a1 00 14 	lwz     r29,20(r1)
  11c:	4b ff ff 90 	b       ac <copy_oldmem_page+0xac>


Christophe

^ permalink raw reply	[flat|nested] 6+ messages in thread

end of thread, other threads:[~2021-12-11  7:37 UTC | newest]

Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-12-10 13:35 [PATCH 0/2] kdump: simplify code Tiezhu Yang
2021-12-10 13:36 ` [PATCH 1/2] kdump: vmcore: move copy_to() from vmcore.c to uaccess.h Tiezhu Yang
2021-12-10 16:59   ` Andrew Morton
2021-12-10 23:50     ` Tiezhu Yang
2021-12-11  7:37     ` Christophe Leroy
2021-12-10 13:36 ` [PATCH 2/2] kdump: crashdump: use copy_to() to simplify the related code Tiezhu Yang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).