All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] kprobes: minor code cleanups
@ 2010-08-04 16:09 Namhyung Kim
  2010-08-05 12:08 ` Masami Hiramatsu
  0 siblings, 1 reply; 2+ messages in thread
From: Namhyung Kim @ 2010-08-04 16:09 UTC (permalink / raw)
  To: kernel-janitors

 * make internal functions static
 * get rid of __dummy_buf on x86
 * make sparse happier :-)

Signed-off-by: Namhyung Kim <namhyung@gmail.com>

diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
index 675879b..a4ff35d 100644
--- a/arch/x86/kernel/kprobes.c
+++ b/arch/x86/kernel/kprobes.c
@@ -224,9 +224,6 @@ static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
 	return 0;
 }
 
-/* Dummy buffers for kallsyms_lookup */
-static char __dummy_buf[KSYM_NAME_LEN];
-
 /* Check if paddr is at an instruction boundary */
 static int __kprobes can_probe(unsigned long paddr)
 {
@@ -235,7 +232,7 @@ static int __kprobes can_probe(unsigned long paddr)
 	struct insn insn;
 	kprobe_opcode_t buf[MAX_INSN_SIZE];
 
-	if (!kallsyms_lookup(paddr, NULL, &offset, NULL, __dummy_buf))
+	if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
 		return 0;
 
 	/* Decode instructions */
@@ -1109,7 +1106,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr,
 	*(unsigned long *)addr = val;
 }
 
-void __kprobes kprobes_optinsn_template_holder(void)
+static void __used __kprobes kprobes_optinsn_template_holder(void)
 {
 	asm volatile (
 			".global optprobe_template_entry\n"
@@ -1249,11 +1246,9 @@ static int __kprobes can_optimize(unsigned long paddr)
 	unsigned long addr, size = 0, offset = 0;
 	struct insn insn;
 	kprobe_opcode_t buf[MAX_INSN_SIZE];
-	/* Dummy buffers for lookup_symbol_attrs */
-	static char __dummy_buf[KSYM_NAME_LEN];
 
 	/* Lookup symbol including addr */
-	if (!kallsyms_lookup(paddr, &size, &offset, NULL, __dummy_buf))
+	if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
 		return 0;
 
 	/* Check there is enough space for a relative jump. */
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 1b0dbe0..a8d0139 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -399,7 +399,7 @@ static inline int kprobe_optready(struct kprobe *p)
  * Return an optimized kprobe whose optimizing code replaces
  * instructions including addr (exclude breakpoint).
  */
-struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
+static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
 {
 	int i;
 	struct kprobe *p = NULL;
@@ -857,7 +857,8 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
 	spin_unlock_irqrestore(hlist_lock, *flags);
 }
 
-void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
+static void __kprobes kretprobe_table_unlock(unsigned long hash,
+	unsigned long *flags)
 {
 	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
 	spin_unlock_irqrestore(hlist_lock, *flags);
-- 
1.7.0.4


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH] kprobes: minor code cleanups
  2010-08-04 16:09 [PATCH] kprobes: minor code cleanups Namhyung Kim
@ 2010-08-05 12:08 ` Masami Hiramatsu
  0 siblings, 0 replies; 2+ messages in thread
From: Masami Hiramatsu @ 2010-08-05 12:08 UTC (permalink / raw)
  To: kernel-janitors

Namhyung Kim wrote:
>  * make internal functions static
>  * get rid of __dummy_buf on x86
>  * make sparse happier :-)

Thanks, and could you split those into 2 patches?
(make function static, and remove __dummy_buf)
It's better that a single change has a single reason.

Thank you,

> 
> Signed-off-by: Namhyung Kim <namhyung@gmail.com>
> 
> diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
> index 675879b..a4ff35d 100644
> --- a/arch/x86/kernel/kprobes.c
> +++ b/arch/x86/kernel/kprobes.c
> @@ -224,9 +224,6 @@ static int recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr)
>  	return 0;
>  }
>  
> -/* Dummy buffers for kallsyms_lookup */
> -static char __dummy_buf[KSYM_NAME_LEN];
> -
>  /* Check if paddr is at an instruction boundary */
>  static int __kprobes can_probe(unsigned long paddr)
>  {
> @@ -235,7 +232,7 @@ static int __kprobes can_probe(unsigned long paddr)
>  	struct insn insn;
>  	kprobe_opcode_t buf[MAX_INSN_SIZE];
>  
> -	if (!kallsyms_lookup(paddr, NULL, &offset, NULL, __dummy_buf))
> +	if (!kallsyms_lookup_size_offset(paddr, NULL, &offset))
>  		return 0;
>  
>  	/* Decode instructions */
> @@ -1109,7 +1106,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr,
>  	*(unsigned long *)addr = val;
>  }
>  
> -void __kprobes kprobes_optinsn_template_holder(void)
> +static void __used __kprobes kprobes_optinsn_template_holder(void)
>  {
>  	asm volatile (
>  			".global optprobe_template_entry\n"
> @@ -1249,11 +1246,9 @@ static int __kprobes can_optimize(unsigned long paddr)
>  	unsigned long addr, size = 0, offset = 0;
>  	struct insn insn;
>  	kprobe_opcode_t buf[MAX_INSN_SIZE];
> -	/* Dummy buffers for lookup_symbol_attrs */
> -	static char __dummy_buf[KSYM_NAME_LEN];
>  
>  	/* Lookup symbol including addr */
> -	if (!kallsyms_lookup(paddr, &size, &offset, NULL, __dummy_buf))
> +	if (!kallsyms_lookup_size_offset(paddr, &size, &offset))
>  		return 0;
>  
>  	/* Check there is enough space for a relative jump. */
> diff --git a/kernel/kprobes.c b/kernel/kprobes.c
> index 1b0dbe0..a8d0139 100644
> --- a/kernel/kprobes.c
> +++ b/kernel/kprobes.c
> @@ -399,7 +399,7 @@ static inline int kprobe_optready(struct kprobe *p)
>   * Return an optimized kprobe whose optimizing code replaces
>   * instructions including addr (exclude breakpoint).
>   */
> -struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
> +static struct kprobe *__kprobes get_optimized_kprobe(unsigned long addr)
>  {
>  	int i;
>  	struct kprobe *p = NULL;
> @@ -857,7 +857,8 @@ void __kprobes kretprobe_hash_unlock(struct task_struct *tsk,
>  	spin_unlock_irqrestore(hlist_lock, *flags);
>  }
>  
> -void __kprobes kretprobe_table_unlock(unsigned long hash, unsigned long *flags)
> +static void __kprobes kretprobe_table_unlock(unsigned long hash,
> +	unsigned long *flags)
>  {
>  	spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
>  	spin_unlock_irqrestore(hlist_lock, *flags);

-- 
Masami HIRAMATSU
2nd Research Dept.
Hitachi, Ltd., Systems Development Laboratory
E-mail: masami.hiramatsu.pt@hitachi.com

^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2010-08-05 12:08 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2010-08-04 16:09 [PATCH] kprobes: minor code cleanups Namhyung Kim
2010-08-05 12:08 ` Masami Hiramatsu

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.