linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] kprobes: x86: cleanup __recover_probed_insn().
@ 2015-03-03  6:39 Wang Nan
  2015-03-03  8:21 ` Masami Hiramatsu
  0 siblings, 1 reply; 3+ messages in thread
From: Wang Nan @ 2015-03-03  6:39 UTC (permalink / raw)
  To: masami.hiramatsu.pt, mingo; +Cc: lizefan, linux-kernel, x86

Since kernel kconfig forbids turning off KPROBES_ON_FTRACE for x86, we
don't need to consider the situation that a kprobe probing on a ftrace
location. The only exception should be early kprobe with
KPROBES_ON_FTRACE enabled. However, it is still impossible for it to get
a tainted by ftrace if it is registered before ftrace is ready.

Thus this patch removes unneed logic to make code simpler.

Signed-off-by: Wang Nan <wangnan0@huawei.com>
---
 arch/x86/kernel/kprobes/core.c | 62 ++++++++----------------------------------
 1 file changed, 12 insertions(+), 50 deletions(-)

diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index 4e3d5a9..88a99c0 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -219,55 +219,6 @@ retry:
 	}
 }
 
-static unsigned long
-__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
-{
-	struct kprobe *kp;
-	unsigned long faddr;
-
-	kp = get_kprobe((void *)addr);
-	faddr = ftrace_location(addr);
-	/*
-	 * Addresses inside the ftrace location are refused by
-	 * arch_check_ftrace_location(). Something went terribly wrong
-	 * if such an address is checked here.
-	 */
-	if (WARN_ON(faddr && faddr != addr))
-		return 0UL;
-	/*
-	 * Use the current code if it is not modified by Kprobe
-	 * and it cannot be modified by ftrace.
-	 */
-	if (!kp && !faddr)
-		return addr;
-
-	/*
-	 * Basically, kp->ainsn.insn has an original instruction.
-	 * However, RIP-relative instruction can not do single-stepping
-	 * at different place, __copy_instruction() tweaks the displacement of
-	 * that instruction. In that case, we can't recover the instruction
-	 * from the kp->ainsn.insn.
-	 *
-	 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
-	 * of the first byte of the probed instruction, which is overwritten
-	 * by int3. And the instruction at kp->addr is not modified by kprobes
-	 * except for the first byte, we can recover the original instruction
-	 * from it and kp->opcode.
-	 *
-	 * In case of Kprobes using ftrace, we do not have a copy of
-	 * the original instruction. In fact, the ftrace location might
-	 * be modified at anytime and even could be in an inconsistent state.
-	 * Fortunately, we know that the original code is the ideal 5-byte
-	 * long NOP.
-	 */
-	memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
-	if (faddr)
-		memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
-	else
-		buf[0] = kp->opcode;
-	return (unsigned long)buf;
-}
-
 /*
  * Recover the probed instruction at addr for further analysis.
  * Caller must lock kprobes by kprobe_mutex, or disable preemption
@@ -282,7 +233,18 @@ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long add
 	if (__addr != addr)
 		return __addr;
 
-	return __recover_probed_insn(buf, addr);
+	/*
+	 * If KPROBES_ON_FTRACE is off, we are not allowed probing at
+	 * ftrace location. If it is on, we should use
+	 * arm_kprobe_ftrace() and never get here.  As a result, there
+	 * is no need to care about confliction between kprobe and
+	 * ftrace. The only exception should be early kprobes. However,
+	 * for such kprobes registered before ftrace is ready, it is
+	 * impossible to get a tainted instruction; for such kprobes
+	 * registered after ftrace ready, it will use
+	 * arm_kprobe_ftrace() and won't get here.
+	 */
+	return addr;
 }
 
 /* Check if paddr is at an instruction boundary */
-- 
1.8.4


^ permalink raw reply related	[flat|nested] 3+ messages in thread

* Re: [PATCH] kprobes: x86: cleanup __recover_probed_insn().
  2015-03-03  6:39 [PATCH] kprobes: x86: cleanup __recover_probed_insn() Wang Nan
@ 2015-03-03  8:21 ` Masami Hiramatsu
  2015-03-03  8:33   ` Wang Nan
  0 siblings, 1 reply; 3+ messages in thread
From: Masami Hiramatsu @ 2015-03-03  8:21 UTC (permalink / raw)
  To: Wang Nan; +Cc: mingo, lizefan, linux-kernel, x86, pmladek

(2015/03/03 15:39), Wang Nan wrote:
> Since kernel kconfig forbids turning off KPROBES_ON_FTRACE for x86, we
> don't need to consider the situation that a kprobe probing on a ftrace
> location. The only exception should be early kprobe with
> KPROBES_ON_FTRACE enabled. However, it is still impossible for it to get
> a tainted by ftrace if it is registered before ftrace is ready.
> 
> Thus this patch removes unneed logic to make code simpler.

Nak.
Please make sure why this is introduced (and try to check by reproducing it).
https://lkml.org/lkml/2015/2/20/208

Thank you,

> 
> Signed-off-by: Wang Nan <wangnan0@huawei.com>
> ---
>  arch/x86/kernel/kprobes/core.c | 62 ++++++++----------------------------------
>  1 file changed, 12 insertions(+), 50 deletions(-)
> 
> diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
> index 4e3d5a9..88a99c0 100644
> --- a/arch/x86/kernel/kprobes/core.c
> +++ b/arch/x86/kernel/kprobes/core.c
> @@ -219,55 +219,6 @@ retry:
>  	}
>  }
>  
> -static unsigned long
> -__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
> -{
> -	struct kprobe *kp;
> -	unsigned long faddr;
> -
> -	kp = get_kprobe((void *)addr);
> -	faddr = ftrace_location(addr);
> -	/*
> -	 * Addresses inside the ftrace location are refused by
> -	 * arch_check_ftrace_location(). Something went terribly wrong
> -	 * if such an address is checked here.
> -	 */
> -	if (WARN_ON(faddr && faddr != addr))
> -		return 0UL;
> -	/*
> -	 * Use the current code if it is not modified by Kprobe
> -	 * and it cannot be modified by ftrace.
> -	 */
> -	if (!kp && !faddr)
> -		return addr;
> -
> -	/*
> -	 * Basically, kp->ainsn.insn has an original instruction.
> -	 * However, RIP-relative instruction can not do single-stepping
> -	 * at different place, __copy_instruction() tweaks the displacement of
> -	 * that instruction. In that case, we can't recover the instruction
> -	 * from the kp->ainsn.insn.
> -	 *
> -	 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
> -	 * of the first byte of the probed instruction, which is overwritten
> -	 * by int3. And the instruction at kp->addr is not modified by kprobes
> -	 * except for the first byte, we can recover the original instruction
> -	 * from it and kp->opcode.
> -	 *
> -	 * In case of Kprobes using ftrace, we do not have a copy of
> -	 * the original instruction. In fact, the ftrace location might
> -	 * be modified at anytime and even could be in an inconsistent state.
> -	 * Fortunately, we know that the original code is the ideal 5-byte
> -	 * long NOP.
> -	 */
> -	memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
> -	if (faddr)
> -		memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
> -	else
> -		buf[0] = kp->opcode;
> -	return (unsigned long)buf;
> -}
> -
>  /*
>   * Recover the probed instruction at addr for further analysis.
>   * Caller must lock kprobes by kprobe_mutex, or disable preemption
> @@ -282,7 +233,18 @@ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long add
>  	if (__addr != addr)
>  		return __addr;
>  
> -	return __recover_probed_insn(buf, addr);
> +	/*
> +	 * If KPROBES_ON_FTRACE is off, we are not allowed probing at
> +	 * ftrace location. If it is on, we should use
> +	 * arm_kprobe_ftrace() and never get here.  As a result, there
> +	 * is no need to care about confliction between kprobe and
> +	 * ftrace. The only exception should be early kprobes. However,
> +	 * for such kprobes registered before ftrace is ready, it is
> +	 * impossible to get a tainted instruction; for such kprobes
> +	 * registered after ftrace ready, it will use
> +	 * arm_kprobe_ftrace() and won't get here.
> +	 */
> +	return addr;
>  }
>  
>  /* Check if paddr is at an instruction boundary */
> 


-- 
Masami HIRAMATSU
Software Platform Research Dept. Linux Technology Research Center
Hitachi, Ltd., Yokohama Research Laboratory
E-mail: masami.hiramatsu.pt@hitachi.com



^ permalink raw reply	[flat|nested] 3+ messages in thread

* Re: [PATCH] kprobes: x86: cleanup __recover_probed_insn().
  2015-03-03  8:21 ` Masami Hiramatsu
@ 2015-03-03  8:33   ` Wang Nan
  0 siblings, 0 replies; 3+ messages in thread
From: Wang Nan @ 2015-03-03  8:33 UTC (permalink / raw)
  To: Masami Hiramatsu; +Cc: mingo, lizefan, linux-kernel, x86, pmladek

On 2015/3/3 16:21, Masami Hiramatsu wrote:
> (2015/03/03 15:39), Wang Nan wrote:
>> Since kernel kconfig forbids turning off KPROBES_ON_FTRACE for x86, we
>> don't need to consider the situation that a kprobe probing on a ftrace
>> location. The only exception should be early kprobe with
>> KPROBES_ON_FTRACE enabled. However, it is still impossible for it to get
>> a tainted by ftrace if it is registered before ftrace is ready.
>>
>> Thus this patch removes unneed logic to make code simpler.
> 
> Nak.
> Please make sure why this is introduced (and try to check by reproducing it).
> https://lkml.org/lkml/2015/2/20/208
> 
> Thank you,
> 

Thank you for your response. I didn't realize it is newly introduced code.

It breaks my early kprobes on ftrace code. I provided a fix on it:

https://lkml.org/lkml/2015/3/3/4

Do you have any suggestion on it?

Thank you.

>>
>> Signed-off-by: Wang Nan <wangnan0@huawei.com>
>> ---
>>  arch/x86/kernel/kprobes/core.c | 62 ++++++++----------------------------------
>>  1 file changed, 12 insertions(+), 50 deletions(-)
>>
>> diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
>> index 4e3d5a9..88a99c0 100644
>> --- a/arch/x86/kernel/kprobes/core.c
>> +++ b/arch/x86/kernel/kprobes/core.c
>> @@ -219,55 +219,6 @@ retry:
>>  	}
>>  }
>>  
>> -static unsigned long
>> -__recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
>> -{
>> -	struct kprobe *kp;
>> -	unsigned long faddr;
>> -
>> -	kp = get_kprobe((void *)addr);
>> -	faddr = ftrace_location(addr);
>> -	/*
>> -	 * Addresses inside the ftrace location are refused by
>> -	 * arch_check_ftrace_location(). Something went terribly wrong
>> -	 * if such an address is checked here.
>> -	 */
>> -	if (WARN_ON(faddr && faddr != addr))
>> -		return 0UL;
>> -	/*
>> -	 * Use the current code if it is not modified by Kprobe
>> -	 * and it cannot be modified by ftrace.
>> -	 */
>> -	if (!kp && !faddr)
>> -		return addr;
>> -
>> -	/*
>> -	 * Basically, kp->ainsn.insn has an original instruction.
>> -	 * However, RIP-relative instruction can not do single-stepping
>> -	 * at different place, __copy_instruction() tweaks the displacement of
>> -	 * that instruction. In that case, we can't recover the instruction
>> -	 * from the kp->ainsn.insn.
>> -	 *
>> -	 * On the other hand, in case on normal Kprobe, kp->opcode has a copy
>> -	 * of the first byte of the probed instruction, which is overwritten
>> -	 * by int3. And the instruction at kp->addr is not modified by kprobes
>> -	 * except for the first byte, we can recover the original instruction
>> -	 * from it and kp->opcode.
>> -	 *
>> -	 * In case of Kprobes using ftrace, we do not have a copy of
>> -	 * the original instruction. In fact, the ftrace location might
>> -	 * be modified at anytime and even could be in an inconsistent state.
>> -	 * Fortunately, we know that the original code is the ideal 5-byte
>> -	 * long NOP.
>> -	 */
>> -	memcpy(buf, (void *)addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
>> -	if (faddr)
>> -		memcpy(buf, ideal_nops[NOP_ATOMIC5], 5);
>> -	else
>> -		buf[0] = kp->opcode;
>> -	return (unsigned long)buf;
>> -}
>> -
>>  /*
>>   * Recover the probed instruction at addr for further analysis.
>>   * Caller must lock kprobes by kprobe_mutex, or disable preemption
>> @@ -282,7 +233,18 @@ unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long add
>>  	if (__addr != addr)
>>  		return __addr;
>>  
>> -	return __recover_probed_insn(buf, addr);
>> +	/*
>> +	 * If KPROBES_ON_FTRACE is off, we are not allowed probing at
>> +	 * ftrace location. If it is on, we should use
>> +	 * arm_kprobe_ftrace() and never get here.  As a result, there
>> +	 * is no need to care about confliction between kprobe and
>> +	 * ftrace. The only exception should be early kprobes. However,
>> +	 * for such kprobes registered before ftrace is ready, it is
>> +	 * impossible to get a tainted instruction; for such kprobes
>> +	 * registered after ftrace ready, it will use
>> +	 * arm_kprobe_ftrace() and won't get here.
>> +	 */
>> +	return addr;
>>  }
>>  
>>  /* Check if paddr is at an instruction boundary */
>>
> 
> 



^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2015-03-03  8:33 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-03-03  6:39 [PATCH] kprobes: x86: cleanup __recover_probed_insn() Wang Nan
2015-03-03  8:21 ` Masami Hiramatsu
2015-03-03  8:33   ` Wang Nan

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).