linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH RT 0/2 v4] preempt-rt/x86: Handle sending signals from do_trap() by gdb
@ 2012-02-03 18:28 Steven Rostedt
  2012-02-03 18:28 ` [PATCH RT 1/2 v4] x86: Do not disable preemption in int3 on 32bit Steven Rostedt
  2012-02-03 18:28 ` [PATCH RT 2/2 v4] preempt-rt/x86: Delay calling signals in int3 Steven Rostedt
  0 siblings, 2 replies; 11+ messages in thread
From: Steven Rostedt @ 2012-02-03 18:28 UTC (permalink / raw)
  To: linux-kernel, linux-rt-users
  Cc: Thomas Gleixner, Carsten Emde, John Kacur, Masami Hiramatsu,
	Ingo Molnar, Andrew Morton, H. Peter Anvin,
	Alexander van Heukelum, Andi Kleen, Oleg Nesterov,
	Clark Williams, Luis Goncalves

Thomas,

Can you apply these to v3.2-rt.

Version 4:
 In testing, Clark Williams triggered a bug in the paranoid_exit return
 path. The %rcx register was being clobbered by a function call and
 needed to be restored with: GET_THREAD_INFO(%rcx)

This version has been tested and so far has triggered no bugs.

-- Steve


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH RT 1/2 v4] x86: Do not disable preemption in int3 on 32bit
  2012-02-03 18:28 [PATCH RT 0/2 v4] preempt-rt/x86: Handle sending signals from do_trap() by gdb Steven Rostedt
@ 2012-02-03 18:28 ` Steven Rostedt
  2012-02-03 18:28 ` [PATCH RT 2/2 v4] preempt-rt/x86: Delay calling signals in int3 Steven Rostedt
  1 sibling, 0 replies; 11+ messages in thread
From: Steven Rostedt @ 2012-02-03 18:28 UTC (permalink / raw)
  To: linux-kernel, linux-rt-users
  Cc: Thomas Gleixner, Carsten Emde, John Kacur, Masami Hiramatsu,
	Ingo Molnar, Andrew Morton, H. Peter Anvin,
	Alexander van Heukelum, Andi Kleen, Oleg Nesterov,
	Clark Williams, Luis Goncalves, stable-rt

[-- Attachment #1: fix-rt-int3-x86_32-3.2-rt.patch --]
[-- Type: text/plain, Size: 3335 bytes --]

Preemption must be disabled before enabling interrupts in do_trap
on x86_64 because the stack in use for int3 and debug is a per CPU
stack set by th IST. But 32bit does not have an IST and the stack
still belongs to the current task and there is no problem in scheduling
out the task.

Keep preemption enabled on X86_32 when enabling interrupts for
do_trap().

The name of the function is changed from preempt_conditional_sti/cli()
to conditional_sti/cli_ist(), to annotate that this function is used
when the stack is on the IST.

Cc: stable-rt@vger.kernel.org
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>

Index: linux-rt.git/arch/x86/kernel/traps.c
===================================================================
--- linux-rt.git.orig/arch/x86/kernel/traps.c
+++ linux-rt.git/arch/x86/kernel/traps.c
@@ -87,9 +87,21 @@ static inline void conditional_sti(struc
 		local_irq_enable();
 }
 
-static inline void preempt_conditional_sti(struct pt_regs *regs)
+static inline void conditional_sti_ist(struct pt_regs *regs)
 {
+#ifdef CONFIG_X86_64
+	/*
+	 * X86_64 uses a per CPU stack on the IST for certain traps
+	 * like int3. The task can not be preempted when using one
+	 * of these stacks, thus preemption must be disabled, otherwise
+	 * the stack can be corrupted if the task is scheduled out,
+	 * and another task comes in and uses this stack.
+	 *
+	 * On x86_32 the task keeps its own stack and it is OK if the
+	 * task schedules out.
+	 */
 	inc_preempt_count();
+#endif
 	if (regs->flags & X86_EFLAGS_IF)
 		local_irq_enable();
 }
@@ -100,11 +112,13 @@ static inline void conditional_cli(struc
 		local_irq_disable();
 }
 
-static inline void preempt_conditional_cli(struct pt_regs *regs)
+static inline void conditional_cli_ist(struct pt_regs *regs)
 {
 	if (regs->flags & X86_EFLAGS_IF)
 		local_irq_disable();
+#ifdef CONFIG_X86_64
 	dec_preempt_count();
+#endif
 }
 
 static void __kprobes
@@ -222,9 +236,9 @@ dotraplinkage void do_stack_segment(stru
 	if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
 			12, SIGBUS) == NOTIFY_STOP)
 		return;
-	preempt_conditional_sti(regs);
+	conditional_sti_ist(regs);
 	do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
-	preempt_conditional_cli(regs);
+	conditional_cli_ist(regs);
 }
 
 dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
@@ -316,9 +330,9 @@ dotraplinkage void __kprobes do_int3(str
 		return;
 #endif
 
-	preempt_conditional_sti(regs);
+	conditional_sti_ist(regs);
 	do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
-	preempt_conditional_cli(regs);
+	conditional_cli_ist(regs);
 }
 
 #ifdef CONFIG_X86_64
@@ -412,12 +426,12 @@ dotraplinkage void __kprobes do_debug(st
 		return;
 
 	/* It's safe to allow irq's after DR6 has been saved */
-	preempt_conditional_sti(regs);
+	conditional_sti_ist(regs);
 
 	if (regs->flags & X86_VM_MASK) {
 		handle_vm86_trap((struct kernel_vm86_regs *) regs,
 				error_code, 1);
-		preempt_conditional_cli(regs);
+		conditional_cli_ist(regs);
 		return;
 	}
 
@@ -436,7 +450,7 @@ dotraplinkage void __kprobes do_debug(st
 	si_code = get_si_code(tsk->thread.debugreg6);
 	if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
 		send_sigtrap(tsk, regs, error_code, si_code);
-	preempt_conditional_cli(regs);
+	conditional_cli_ist(regs);
 
 	return;
 }


^ permalink raw reply	[flat|nested] 11+ messages in thread

* [PATCH RT 2/2 v4] preempt-rt/x86: Delay calling signals in int3
  2012-02-03 18:28 [PATCH RT 0/2 v4] preempt-rt/x86: Handle sending signals from do_trap() by gdb Steven Rostedt
  2012-02-03 18:28 ` [PATCH RT 1/2 v4] x86: Do not disable preemption in int3 on 32bit Steven Rostedt
@ 2012-02-03 18:28 ` Steven Rostedt
  2012-02-03 18:40   ` Oleg Nesterov
  1 sibling, 1 reply; 11+ messages in thread
From: Steven Rostedt @ 2012-02-03 18:28 UTC (permalink / raw)
  To: linux-kernel, linux-rt-users
  Cc: Thomas Gleixner, Carsten Emde, John Kacur, Masami Hiramatsu,
	Ingo Molnar, Andrew Morton, H. Peter Anvin,
	Alexander van Heukelum, Andi Kleen, Oleg Nesterov,
	Clark Williams, Luis Goncalves, stable-rt

[-- Attachment #1: fix-rt-int3_x86_64-3.2-rt.patch --]
[-- Type: text/plain, Size: 9165 bytes --]

On x86_64 we must disable preemption before we enable interrupts
for int3 and debugging, because the current task is using a per CPU
debug stack defined by the IST. If we schedule out, another task
can come in and use the same stack and cause the stack to be corrupted
and crash the kernel on return.

When CONFIG_PREEMPT_RT_FULL is enabled, spin_locks become mutexes, and
one of these is the spin lock used in signal handling.

Some of the debug code (int3) causes do_trap() to send a signal.
This function calls a spin lock that has been converted to a mutex
and has the possibility to sleep. If this happens, the above issues with
the corrupted stack is possible.

Instead of calling the signal right away, for PREEMPT_RT and x86_64,
the signal information is stored on the stacks task_struct and a
new TIF flag is set (TIF_FORCE_SIG_TRAP). On exit of the exception,
in paranoid_exit, if NEED_RESCHED is set, the task stack is switched
back to the kernel stack and interrupts is enabled. In this code
the TIF_FORCE_SIG_TRAP is also checked and a function is called to
do the force_sig() in a context that may schedule.

Note, to get into this path, the NEED_RESCHED flag is also set.
But as this only happens in debug context, an extra schedule should not
be an issue.

Cc: stable-rt@vger.kernel.org
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>

Index: linux-rt.git/arch/x86/include/asm/thread_info.h
===================================================================
--- linux-rt.git.orig/arch/x86/include/asm/thread_info.h
+++ linux-rt.git/arch/x86/include/asm/thread_info.h
@@ -95,6 +95,7 @@ struct thread_info {
 #define TIF_BLOCKSTEP		25	/* set when we want DEBUGCTLMSR_BTF */
 #define TIF_LAZY_MMU_UPDATES	27	/* task is updating the mmu lazily */
 #define TIF_SYSCALL_TRACEPOINT	28	/* syscall tracepoint instrumentation */
+#define TIF_FORCE_SIG_TRAP	29	/* force a signal coming back from trap */
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
@@ -117,6 +118,7 @@ struct thread_info {
 #define _TIF_BLOCKSTEP		(1 << TIF_BLOCKSTEP)
 #define _TIF_LAZY_MMU_UPDATES	(1 << TIF_LAZY_MMU_UPDATES)
 #define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
+#define _TIF_FORCE_SIG_TRAP	(1 << TIF_FORCE_SIG_TRAP)
 
 /* work to do in syscall_trace_enter() */
 #define _TIF_WORK_SYSCALL_ENTRY	\
@@ -266,5 +268,14 @@ extern void arch_task_cache_init(void);
 extern void free_thread_info(struct thread_info *ti);
 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
 #define arch_task_cache_init arch_task_cache_init
+
+struct siginfo;
+/*
+ * Hacks for RT to get around signal processing with int3 and do_debug.
+ */
+void
+force_sig_info_rt(int sig, struct siginfo *info, struct task_struct *p, int rt);
+void send_sigtrap_rt(struct task_struct *tsk, struct pt_regs *regs,
+		     int error_code, int si_code);
 #endif
 #endif /* _ASM_X86_THREAD_INFO_H */
Index: linux-rt.git/arch/x86/kernel/entry_64.S
===================================================================
--- linux-rt.git.orig/arch/x86/kernel/entry_64.S
+++ linux-rt.git/arch/x86/kernel/entry_64.S
@@ -1391,6 +1391,14 @@ paranoid_userspace:
 paranoid_schedule:
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_ANY)
+#ifdef CONFIG_PREEMPT_RT_FULL
+	GET_THREAD_INFO(%rcx)
+	movl TI_flags(%rcx),%ebx
+	testl $_TIF_FORCE_SIG_TRAP,%ebx
+	jz paranoid_do_schedule
+	call do_force_sig_trap
+paranoid_do_schedule:
+#endif
 	call schedule
 	DISABLE_INTERRUPTS(CLBR_ANY)
 	TRACE_IRQS_OFF
Index: linux-rt.git/arch/x86/kernel/ptrace.c
===================================================================
--- linux-rt.git.orig/arch/x86/kernel/ptrace.c
+++ linux-rt.git/arch/x86/kernel/ptrace.c
@@ -1341,14 +1341,31 @@ void user_single_step_siginfo(struct tas
 	fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
 }
 
-void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
-					 int error_code, int si_code)
+static void __send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+		  int error_code, int si_code, int rt)
 {
 	struct siginfo info;
 
 	fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
 	/* Send us the fake SIGTRAP */
-	force_sig_info(SIGTRAP, &info, tsk);
+	force_sig_info_rt(SIGTRAP, &info, tsk, rt);
+}
+
+void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+		  int error_code, int si_code)
+{
+	__send_sigtrap(tsk, regs, error_code, si_code, 0);
+}
+
+void send_sigtrap_rt(struct task_struct *tsk, struct pt_regs *regs,
+		  int error_code, int si_code)
+{
+#if defined(CONFIG_X86_64) && defined(CONFIG_PREEMPT_RT_FULL)
+	int rt = 1;
+#else
+	int rt = 0;
+#endif
+	__send_sigtrap(tsk, regs, error_code, si_code, rt);
 }
 
 
Index: linux-rt.git/arch/x86/kernel/traps.c
===================================================================
--- linux-rt.git.orig/arch/x86/kernel/traps.c
+++ linux-rt.git/arch/x86/kernel/traps.c
@@ -121,9 +121,83 @@ static inline void conditional_cli_ist(s
 #endif
 }
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_PREEMPT_RT_FULL)
+/*
+ * In PREEMP_RT_FULL, the signal spinlocks are mutexes. But if
+ * do_int3 calls do_trap, we are running on the debug stack, and
+ * not the task struct stack. We must keep preemption disabled
+ * because the current stack is per CPU not per task.
+ *
+ * Instead, we set the
+
+ */
+void
+__force_sig_info_rt(int sig, struct siginfo *info, struct task_struct *p, int rt)
+{
+	if (!rt) {
+		/* simple case */
+		if (info)
+			force_sig_info(sig, info, p);
+		else
+			force_sig(sig, p);
+		return;
+	}
+	trace_printk("doing delayed force_sig info=%p\n", info);
+	/*
+	 * Sad, but to make things easier we set need resched,
+	 * this forces the paranoid exit in traps to swap out
+	 * of the debug stack and back to the users stack.
+	 * Then there we call do_force_sig_trap() which does
+	 * the delayed force_sig() with interrupts enabled and
+	 * a thread stack that we can schedule on.
+	 */
+	set_need_resched();
+	set_thread_flag(TIF_FORCE_SIG_TRAP);
+	if (info) {
+		memcpy(&p->stored_info, info, sizeof(p->stored_info));
+		p->stored_info_set = 1;
+	} else
+		p->stored_info_set = 0;
+
+}
+
+void force_sig_rt(int sig, struct task_struct *p, int rt)
+{
+	__force_sig_info_rt(sig, NULL, p, rt);
+}
+
+void
+force_sig_info_rt(int sig, struct siginfo *info, struct task_struct *p, int rt)
+{
+	__force_sig_info_rt(sig, info, p, rt);
+}
+
+void do_force_sig_trap(void)
+{
+	struct task_struct *p = current;
+
+	trace_printk("forced sig! (set=%d)\n", p->stored_info_set);
+	if (p->stored_info_set)
+		force_sig_info(SIGTRAP, &p->stored_info, p);
+	else
+		force_sig(SIGTRAP, p);
+	p->stored_info_set = 0;
+	clear_thread_flag(TIF_FORCE_SIG_TRAP);
+}
+#else
+void force_sig_rt(int sig, struct task_struct *p, int rt)
+{
+	force_sig(sig, p);
+}
+void force_sig_info_rt(int sig, struct siginfo *info, struct task_struct *p, int rt)
+{
+	force_sig_info(sig, info, p);
+}
+#endif
+
 static void __kprobes
-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
-	long error_code, siginfo_t *info)
+__do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
+	  long error_code, siginfo_t *info, int rt)
 {
 	struct task_struct *tsk = current;
 
@@ -172,7 +246,7 @@ trap_signal:
 	if (info)
 		force_sig_info(signr, info, tsk);
 	else
-		force_sig(signr, tsk);
+		force_sig_rt(signr, tsk, rt);
 	return;
 
 kernel_trap:
@@ -192,6 +266,20 @@ vm86_trap:
 #endif
 }
 
+static void __kprobes
+do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
+	long error_code, siginfo_t *info)
+{
+	__do_trap(trapnr, signr, str, regs, error_code, info, 0);
+}
+
+static void __kprobes
+do_trap_rt(int trapnr, int signr, char *str, struct pt_regs *regs,
+	long error_code, siginfo_t *info)
+{
+	__do_trap(trapnr, signr, str, regs, error_code, info, 1);
+}
+
 #define DO_ERROR(trapnr, signr, str, name)				\
 dotraplinkage void do_##name(struct pt_regs *regs, long error_code)	\
 {									\
@@ -331,7 +419,7 @@ dotraplinkage void __kprobes do_int3(str
 #endif
 
 	conditional_sti_ist(regs);
-	do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
+	do_trap_rt(3, SIGTRAP, "int3", regs, error_code, NULL);
 	conditional_cli_ist(regs);
 }
 
@@ -449,7 +537,7 @@ dotraplinkage void __kprobes do_debug(st
 	}
 	si_code = get_si_code(tsk->thread.debugreg6);
 	if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
-		send_sigtrap(tsk, regs, error_code, si_code);
+		send_sigtrap_rt(tsk, regs, error_code, si_code);
 	conditional_cli_ist(regs);
 
 	return;
Index: linux-rt.git/include/linux/sched.h
===================================================================
--- linux-rt.git.orig/include/linux/sched.h
+++ linux-rt.git/include/linux/sched.h
@@ -1600,10 +1600,16 @@ struct task_struct {
 	struct rcu_head put_rcu;
 	int softirq_nestcnt;
 #endif
-#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
+#if defined CONFIG_PREEMPT_RT_FULL
+#ifdef CONFIG_X86_64
+	struct siginfo stored_info;
+	int stored_info_set;
+#endif
+#ifdef CONFIG_HIGHMEM
 	int kmap_idx;
 	pte_t kmap_pte[KM_TYPE_NR];
 #endif
+#endif /* CONFIG_PREEMPT_RT_FULL */
 };
 
 #ifdef CONFIG_PREEMPT_RT_FULL


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH RT 2/2 v4] preempt-rt/x86: Delay calling signals in int3
  2012-02-03 18:28 ` [PATCH RT 2/2 v4] preempt-rt/x86: Delay calling signals in int3 Steven Rostedt
@ 2012-02-03 18:40   ` Oleg Nesterov
  2012-02-03 20:10     ` Steven Rostedt
  0 siblings, 1 reply; 11+ messages in thread
From: Oleg Nesterov @ 2012-02-03 18:40 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: linux-kernel, linux-rt-users, Thomas Gleixner, Carsten Emde,
	John Kacur, Masami Hiramatsu, Ingo Molnar, Andrew Morton,
	H. Peter Anvin, Alexander van Heukelum, Andi Kleen,
	Clark Williams, Luis Goncalves, stable-rt

Steven, I guess I need to actually read the patch before asking the
questions... I'll try later, but

On 02/03, Steven Rostedt wrote:
>
> --- linux-rt.git.orig/arch/x86/kernel/entry_64.S
> +++ linux-rt.git/arch/x86/kernel/entry_64.S
> @@ -1391,6 +1391,14 @@ paranoid_userspace:
>  paranoid_schedule:
>  	TRACE_IRQS_ON
>  	ENABLE_INTERRUPTS(CLBR_ANY)
> +#ifdef CONFIG_PREEMPT_RT_FULL
> +	GET_THREAD_INFO(%rcx)
> +	movl TI_flags(%rcx),%ebx
> +	testl $_TIF_FORCE_SIG_TRAP,%ebx
> +	jz paranoid_do_schedule
> +	call do_force_sig_trap
> +paranoid_do_schedule:
> +#endif

Stupid question. Do we really need to send the signal from here?

Why force_sig(rt => T) can't set TIF_NOTIFY_RESUME instead? Then
we can change do_notify_resume() to check TIF_FORCE_SIG_TRAP. And
perhaps we can even avoid the new TIF_FORCE_SIG_TRAP, we could
check task->stored_info_set.

In fact I feel this can be simplified even more, but I am not sure.

Oleg.


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH RT 2/2 v4] preempt-rt/x86: Delay calling signals in int3
  2012-02-03 18:40   ` Oleg Nesterov
@ 2012-02-03 20:10     ` Steven Rostedt
  2012-02-05 19:23       ` Oleg Nesterov
  0 siblings, 1 reply; 11+ messages in thread
From: Steven Rostedt @ 2012-02-03 20:10 UTC (permalink / raw)
  To: Oleg Nesterov
  Cc: linux-kernel, linux-rt-users, Thomas Gleixner, Carsten Emde,
	John Kacur, Masami Hiramatsu, Ingo Molnar, Andrew Morton,
	H. Peter Anvin, Alexander van Heukelum, Andi Kleen,
	Clark Williams, Luis Goncalves, stable-rt

On Fri, 2012-02-03 at 19:40 +0100, Oleg Nesterov wrote:

> Stupid question. Do we really need to send the signal from here?

If we can do it correctly elsewhere, I'm fine with that too :-)

> 
> Why force_sig(rt => T) can't set TIF_NOTIFY_RESUME instead? Then
> we can change do_notify_resume() to check TIF_FORCE_SIG_TRAP. And
> perhaps we can even avoid the new TIF_FORCE_SIG_TRAP, we could
> check task->stored_info_set.

You know the signal code much better than I do. If that works, I'm all
for that too. I really don't like the entry_64 solution, but it was what
I knew would work.

> 
> In fact I feel this can be simplified even more, but I am not sure.

My strengths are in the entry_64.S code, not the signal code, so I fixed
it the best way that I felt. This does not imply my fix is the best. If
we can solve this in a clean way using the existing signal
infrastructure, I'm all for that.

-- Steve



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH RT 2/2 v4] preempt-rt/x86: Delay calling signals in int3
  2012-02-03 20:10     ` Steven Rostedt
@ 2012-02-05 19:23       ` Oleg Nesterov
  2012-02-05 19:31         ` Oleg Nesterov
  2012-02-07 14:17         ` Steven Rostedt
  0 siblings, 2 replies; 11+ messages in thread
From: Oleg Nesterov @ 2012-02-05 19:23 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: linux-kernel, linux-rt-users, Thomas Gleixner, Carsten Emde,
	John Kacur, Masami Hiramatsu, Ingo Molnar, Andrew Morton,
	H. Peter Anvin, Alexander van Heukelum, Andi Kleen,
	Clark Williams, Luis Goncalves, stable-rt

On 02/03, Steven Rostedt wrote:
>
> If
> we can solve this in a clean way using the existing signal
> infrastructure, I'm all for that.

I am not sure, I know almost nothing about rt and about this
low-level stuff. But please look at my attempt below.

So. it is very simple. The patch simply changes force_sig_info() to
check in_atomic(), if it is true we offload the sending to
do_notify_resume(). Of course, I do not know if we can rely on this
check in rt kernels.

Note:

	- The patch adds the new code under CONFIG_PREEMPT_RT_FULL,
	  it should probably check X86_64 or defined(TIF_NOTIFY_RESUME)
	  as well.

	- I think we can later move task->forced_info into restart_block's
	  union.

	- We could modify get_signal_to_deliver() instead of the
	  arch-dependant do_notify_resume(). In this case we do not
	  need TIF_NOTIFY_RESUME, TIF_SIGPENDING is enough.

What do you think?

Oleg.
---

 arch/x86/kernel/signal.c |    9 +++++++++
 include/linux/sched.h    |    4 ++++
 kernel/signal.c          |   31 +++++++++++++++++++++++++++++--
 3 files changed, 42 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 46a01bd..22cb8ff 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -816,6 +816,15 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
 		mce_notify_process();
 #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+	if (unlikely(current->forced_info.si_signo)) {
+		struct task_struct *t = current;
+		force_sig_info(t->forced_info.si_signo,
+					&t->forced_info, t);
+		t->forced_info.si_signo = 0;
+	}
+#endif
+
 	/* deal with pending signal delivery */
 	if (thread_info_flags & _TIF_SIGPENDING)
 		do_signal(regs);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 2234985..942c545 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1407,6 +1407,10 @@ struct task_struct {
 	sigset_t blocked, real_blocked;
 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
 	struct sigpending pending;
+#ifdef CONFIG_PREEMPT_RT_FULL
+	/* TODO: move me into ->restart_block ? */
+	struct siginfo forced_info;
+#endif
 
 	unsigned long sas_ss_sp;
 	size_t sas_ss_size;
diff --git a/kernel/signal.c b/kernel/signal.c
index c73c428..5c0b61a 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1228,8 +1228,8 @@ int do_send_sig_info(int sig, struct siginfo *info, struct task_struct *p,
  * We don't want to have recursive SIGSEGV's etc, for example,
  * that is why we also clear SIGNAL_UNKILLABLE.
  */
-int
-force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+static int
+do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
 {
 	unsigned long int flags;
 	int ret, blocked, ignored;
@@ -1254,6 +1254,33 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
 	return ret;
 }
 
+int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+{
+#ifdef CONFIG_PREEMPT_RT_FULL
+	if (in_atomic()) {
+		if (WARN_ON_ONCE(t != current))
+			return 0;
+		if (WARN_ON_ONCE(t->forced_info.si_signo))
+			return 0;
+
+		if (is_si_special(info)) {
+			WARN_ON_ONCE(info != SEND_SIG_PRIV);
+			t->forced_info.si_signo = sig;
+			t->forced_info.si_errno = 0;
+			t->forced_info.si_code = SI_KERNEL;
+			t->forced_info.si_pid = 0;
+			t->forced_info.si_uid = 0;
+		} else {
+			t->forced_info = *info;
+		}
+
+		set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
+		return 0;
+	}
+#endif
+	return do_force_sig_info(sig, info, t);
+}
+
 /*
  * Nuke all other threads in the group.
  */


^ permalink raw reply related	[flat|nested] 11+ messages in thread

* Re: [PATCH RT 2/2 v4] preempt-rt/x86: Delay calling signals in int3
  2012-02-05 19:23       ` Oleg Nesterov
@ 2012-02-05 19:31         ` Oleg Nesterov
  2012-02-06 16:12           ` Steven Rostedt
  2012-02-07 14:17         ` Steven Rostedt
  1 sibling, 1 reply; 11+ messages in thread
From: Oleg Nesterov @ 2012-02-05 19:31 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: linux-kernel, linux-rt-users, Thomas Gleixner, Carsten Emde,
	John Kacur, Masami Hiramatsu, Ingo Molnar, Andrew Morton,
	H. Peter Anvin, Alexander van Heukelum, Andi Kleen,
	Clark Williams, Luis Goncalves, stable-rt

Damn. Sorry for noise...

On 02/05, Oleg Nesterov wrote:
>
> +int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
> +{
> +#ifdef CONFIG_PREEMPT_RT_FULL
> +	if (in_atomic()) {
> +		if (WARN_ON_ONCE(t != current))

This is certainly wrong in upstream kernel. It does use force_
this way although it shouldn't imho.

But _probably_ this is fine for rt? We are going to take the mutex,
we shouldn't do this in atomic context. But, once again, I do not
really know what in_atomic() means in rt.

Oleg.


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH RT 2/2 v4] preempt-rt/x86: Delay calling signals in int3
  2012-02-05 19:31         ` Oleg Nesterov
@ 2012-02-06 16:12           ` Steven Rostedt
  2012-02-06 16:25             ` Oleg Nesterov
  0 siblings, 1 reply; 11+ messages in thread
From: Steven Rostedt @ 2012-02-06 16:12 UTC (permalink / raw)
  To: Oleg Nesterov
  Cc: linux-kernel, linux-rt-users, Thomas Gleixner, Carsten Emde,
	John Kacur, Masami Hiramatsu, Ingo Molnar, Andrew Morton,
	H. Peter Anvin, Alexander van Heukelum, Andi Kleen,
	Clark Williams, Luis Goncalves, stable-rt

On Sun, 2012-02-05 at 20:31 +0100, Oleg Nesterov wrote:
> Damn. Sorry for noise...
> 
> On 02/05, Oleg Nesterov wrote:
> >
> > +int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
> > +{
> > +#ifdef CONFIG_PREEMPT_RT_FULL
> > +	if (in_atomic()) {
> > +		if (WARN_ON_ONCE(t != current))
> 
> This is certainly wrong in upstream kernel. It does use force_
> this way although it shouldn't imho.

It's wrong in upstream even with the #ifdef define here?

> 
> But _probably_ this is fine for rt? We are going to take the mutex,
> we shouldn't do this in atomic context. But, once again, I do not
> really know what in_atomic() means in rt.

in_atomic() is the same in rt as in mainline. It should still work.

-- Steve



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH RT 2/2 v4] preempt-rt/x86: Delay calling signals in int3
  2012-02-06 16:12           ` Steven Rostedt
@ 2012-02-06 16:25             ` Oleg Nesterov
  2012-02-06 16:38               ` Steven Rostedt
  0 siblings, 1 reply; 11+ messages in thread
From: Oleg Nesterov @ 2012-02-06 16:25 UTC (permalink / raw)
  To: Steven Rostedt
  Cc: linux-kernel, linux-rt-users, Thomas Gleixner, Carsten Emde,
	John Kacur, Masami Hiramatsu, Ingo Molnar, Andrew Morton,
	H. Peter Anvin, Alexander van Heukelum, Andi Kleen,
	Clark Williams, Luis Goncalves, stable-rt

On 02/06, Steven Rostedt wrote:
>
> On Sun, 2012-02-05 at 20:31 +0100, Oleg Nesterov wrote:
> > Damn. Sorry for noise...
> >
> > On 02/05, Oleg Nesterov wrote:
> > >
> > > +int force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
> > > +{
> > > +#ifdef CONFIG_PREEMPT_RT_FULL
> > > +	if (in_atomic()) {
> > > +		if (WARN_ON_ONCE(t != current))
> >
> > This is certainly wrong in upstream kernel. It does use force_
> > this way although it shouldn't imho.
>
> It's wrong in upstream even with the #ifdef define here?

No, the patch has no effect if !CONFIG_PREEMPT_RT_FULL.

Oleg.


^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH RT 2/2 v4] preempt-rt/x86: Delay calling signals in int3
  2012-02-06 16:25             ` Oleg Nesterov
@ 2012-02-06 16:38               ` Steven Rostedt
  0 siblings, 0 replies; 11+ messages in thread
From: Steven Rostedt @ 2012-02-06 16:38 UTC (permalink / raw)
  To: Oleg Nesterov
  Cc: linux-kernel, linux-rt-users, Thomas Gleixner, Carsten Emde,
	John Kacur, Masami Hiramatsu, Ingo Molnar, Andrew Morton,
	H. Peter Anvin, Alexander van Heukelum, Andi Kleen,
	Clark Williams, Luis Goncalves, stable-rt

On Mon, 2012-02-06 at 17:25 +0100, Oleg Nesterov wrote:
> On 02/06, Steven Rostedt wrote:

> > > This is certainly wrong in upstream kernel. It does use force_
> > > this way although it shouldn't imho.
> >
> > It's wrong in upstream even with the #ifdef define here?
> 
> No, the patch has no effect if !CONFIG_PREEMPT_RT_FULL.

Well then, it is perfectly correct for the upstream kernel with the
#ifdef CONFIG_PREEMPT_RT_FULL added ;-)

-- Steve



^ permalink raw reply	[flat|nested] 11+ messages in thread

* Re: [PATCH RT 2/2 v4] preempt-rt/x86: Delay calling signals in int3
  2012-02-05 19:23       ` Oleg Nesterov
  2012-02-05 19:31         ` Oleg Nesterov
@ 2012-02-07 14:17         ` Steven Rostedt
  1 sibling, 0 replies; 11+ messages in thread
From: Steven Rostedt @ 2012-02-07 14:17 UTC (permalink / raw)
  To: Oleg Nesterov
  Cc: linux-kernel, linux-rt-users, Thomas Gleixner, Carsten Emde,
	John Kacur, Masami Hiramatsu, Ingo Molnar, Andrew Morton,
	H. Peter Anvin, Alexander van Heukelum, Andi Kleen,
	Clark Williams, Luis Goncalves, stable-rt

On Sun, 2012-02-05 at 20:23 +0100, Oleg Nesterov wrote:
> On 02/03, Steven Rostedt wrote:
> >
> > If
> > we can solve this in a clean way using the existing signal
> > infrastructure, I'm all for that.
> 
> I am not sure, I know almost nothing about rt and about this
> low-level stuff. But please look at my attempt below.
> 
> So. it is very simple. The patch simply changes force_sig_info() to
> check in_atomic(), if it is true we offload the sending to
> do_notify_resume(). Of course, I do not know if we can rely on this
> check in rt kernels.
> 
> Note:
> 
> 	- The patch adds the new code under CONFIG_PREEMPT_RT_FULL,
> 	  it should probably check X86_64 or defined(TIF_NOTIFY_RESUME)
> 	  as well.
> 
> 	- I think we can later move task->forced_info into restart_block's
> 	  union.
> 
> 	- We could modify get_signal_to_deliver() instead of the
> 	  arch-dependant do_notify_resume(). In this case we do not
> 	  need TIF_NOTIFY_RESUME, TIF_SIGPENDING is enough.
> 
> What do you think?
> 
> Oleg.
> ---
> 
>  arch/x86/kernel/signal.c |    9 +++++++++
>  include/linux/sched.h    |    4 ++++
>  kernel/signal.c          |   31 +++++++++++++++++++++++++++++--

The problem I have with this patch is here. The change to
kernel/signal.c. If anything, all the changes should be encompassed with
a #ifdef CONFIG_X86_64 as well (or defined(CONFIG_PREEMPT_RT_FULL) &&
defined(CONFIG_X86_64)).

Below is an update of my patch that also handles the stack_segment
fault. I used the info.si_signo to pass what sig is to be sent, and
changed the flag from TIF_FORCE_SIG_TRAP to just TIF_FORCE_SIG. Is this
still acceptable.

I'm not attached to this patch over Oleg's. I've tested both, and they
both work. Oleg's is simpler but puts some of the changes into the core
kernel/signal.c file. Mine is a little more complex but keeps the code
more contained in the x86 arch. If adding x86 specific code into the
core signal code is acceptable, I'll take Oleg's patch.

I'd like to hear from others. Which is more appropriate if we ever need
to send this mainline?

Again, I'd take Oleg's patch just as much as I'd take my own. I really
don't care.

Oleg, if I do end up taking your patch, I still need your signed-off-by.

Thanks!

-- Steve

preempt-rt/x86: Delay calling signals in int3

On x86_64 we must disable preemption before we enable interrupts
for int3 and debugging, because the current task is using a per CPU
debug stack defined by the IST. If we schedule out, another task
can come in and use the same stack and cause the stack to be corrupted
and crash the kernel on return.

When CONFIG_PREEMPT_RT_FULL is enabled, spin_locks become mutexes, and
one of these is the spin lock used in signal handling.

Some of the debug code (int3) causes do_trap() to send a signal.
This function calls a spin lock that has been converted to a mutex
and has the possibility to sleep. If this happens, the above issues with
the corrupted stack is possible.

Instead of calling the signal right away, for PREEMPT_RT and x86_64,
the signal information is stored on the stacks task_struct and a
new TIF flag is set (TIF_FORCE_SIG_TRAP). On exit of the exception,
in paranoid_exit, if NEED_RESCHED is set, the task stack is switched
back to the kernel stack and interrupts is enabled. In this code
the TIF_FORCE_SIG_TRAP is also checked and a function is called to
do the force_sig() in a context that may schedule.

Note, to get into this path, the NEED_RESCHED flag is also set.
But as this only happens in debug context, an extra schedule should not
be an issue.

Cc: stable-rt@vger.kernel.org
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>

Index: linux-rt.git/arch/x86/include/asm/thread_info.h
===================================================================
--- linux-rt.git.orig/arch/x86/include/asm/thread_info.h
+++ linux-rt.git/arch/x86/include/asm/thread_info.h
@@ -95,6 +95,7 @@ struct thread_info {
 #define TIF_BLOCKSTEP		25	/* set when we want DEBUGCTLMSR_BTF */
 #define TIF_LAZY_MMU_UPDATES	27	/* task is updating the mmu lazily */
 #define TIF_SYSCALL_TRACEPOINT	28	/* syscall tracepoint instrumentation */
+#define TIF_FORCE_SIG		29	/* force a signal coming back from trap */
 
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
@@ -117,6 +118,7 @@ struct thread_info {
 #define _TIF_BLOCKSTEP		(1 << TIF_BLOCKSTEP)
 #define _TIF_LAZY_MMU_UPDATES	(1 << TIF_LAZY_MMU_UPDATES)
 #define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
+#define _TIF_FORCE_SIG		(1 << TIF_FORCE_SIG)
 
 /* work to do in syscall_trace_enter() */
 #define _TIF_WORK_SYSCALL_ENTRY	\
@@ -266,5 +268,14 @@ extern void arch_task_cache_init(void);
 extern void free_thread_info(struct thread_info *ti);
 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
 #define arch_task_cache_init arch_task_cache_init
+
+struct siginfo;
+/*
+ * Hacks for RT to get around signal processing with int3 and do_debug.
+ */
+void
+force_sig_info_rt(int sig, struct siginfo *info, struct task_struct *p, int rt);
+void send_sigtrap_rt(struct task_struct *tsk, struct pt_regs *regs,
+		     int error_code, int si_code);
 #endif
 #endif /* _ASM_X86_THREAD_INFO_H */
Index: linux-rt.git/arch/x86/kernel/entry_64.S
===================================================================
--- linux-rt.git.orig/arch/x86/kernel/entry_64.S
+++ linux-rt.git/arch/x86/kernel/entry_64.S
@@ -1391,6 +1391,14 @@ paranoid_userspace:
 paranoid_schedule:
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_ANY)
+#ifdef CONFIG_PREEMPT_RT_FULL
+	GET_THREAD_INFO(%rcx)
+	movl TI_flags(%rcx),%ebx
+	testl $_TIF_FORCE_SIG,%ebx
+	jz paranoid_do_schedule
+	call do_force_sig_trap
+paranoid_do_schedule:
+#endif
 	call schedule
 	DISABLE_INTERRUPTS(CLBR_ANY)
 	TRACE_IRQS_OFF
Index: linux-rt.git/arch/x86/kernel/ptrace.c
===================================================================
--- linux-rt.git.orig/arch/x86/kernel/ptrace.c
+++ linux-rt.git/arch/x86/kernel/ptrace.c
@@ -1341,14 +1341,31 @@ void user_single_step_siginfo(struct tas
 	fill_sigtrap_info(tsk, regs, 0, TRAP_BRKPT, info);
 }
 
-void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
-					 int error_code, int si_code)
+static void __send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+		  int error_code, int si_code, int rt)
 {
 	struct siginfo info;
 
 	fill_sigtrap_info(tsk, regs, error_code, si_code, &info);
 	/* Send us the fake SIGTRAP */
-	force_sig_info(SIGTRAP, &info, tsk);
+	force_sig_info_rt(SIGTRAP, &info, tsk, rt);
+}
+
+void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
+		  int error_code, int si_code)
+{
+	__send_sigtrap(tsk, regs, error_code, si_code, 0);
+}
+
+void send_sigtrap_rt(struct task_struct *tsk, struct pt_regs *regs,
+		  int error_code, int si_code)
+{
+#if defined(CONFIG_X86_64) && defined(CONFIG_PREEMPT_RT_FULL)
+	int rt = 1;
+#else
+	int rt = 0;
+#endif
+	__send_sigtrap(tsk, regs, error_code, si_code, rt);
 }
 
 
Index: linux-rt.git/arch/x86/kernel/traps.c
===================================================================
--- linux-rt.git.orig/arch/x86/kernel/traps.c
+++ linux-rt.git/arch/x86/kernel/traps.c
@@ -121,9 +121,84 @@ static inline void conditional_cli_ist(s
 #endif
 }
 
+#if defined(CONFIG_X86_64) && defined(CONFIG_PREEMPT_RT_FULL)
+/*
+ * In PREEMP_RT_FULL, the signal spinlocks are mutexes. But if
+ * do_int3 calls do_trap, we are running on the debug stack, and
+ * not the task struct stack. We must keep preemption disabled
+ * because the current stack is per CPU not per task.
+ *
+ * Instead, we set the
+
+ */
+void
+__force_sig_info_rt(int sig, struct siginfo *info, struct task_struct *p, int rt)
+{
+	if (!rt) {
+		/* simple case */
+		if (info)
+			force_sig_info(sig, info, p);
+		else
+			force_sig(sig, p);
+		return;
+	}
+	trace_printk("doing delayed force_sig info=%p\n", info);
+	/*
+	 * Sad, but to make things easier we set need resched,
+	 * this forces the paranoid exit in traps to swap out
+	 * of the debug stack and back to the users stack.
+	 * Then there we call do_force_sig_trap() which does
+	 * the delayed force_sig() with interrupts enabled and
+	 * a thread stack that we can schedule on.
+	 */
+	set_need_resched();
+	set_thread_flag(TIF_FORCE_SIG);
+	if (info) {
+		memcpy(&p->stored_info, info, sizeof(p->stored_info));
+		p->stored_info_set = 1;
+	} else {
+		p->stored_info.si_signo = sig;
+		p->stored_info_set = 0;
+	}
+}
+
+void force_sig_rt(int sig, struct task_struct *p, int rt)
+{
+	__force_sig_info_rt(sig, NULL, p, rt);
+}
+
+void
+force_sig_info_rt(int sig, struct siginfo *info, struct task_struct *p, int rt)
+{
+	__force_sig_info_rt(sig, info, p, rt);
+}
+
+void do_force_sig_trap(void)
+{
+	struct task_struct *p = current;
+
+	trace_printk("forced sig! (set=%d)\n", p->stored_info_set);
+	if (p->stored_info_set)
+		force_sig_info(p->stored_info.si_signo, &p->stored_info, p);
+	else
+		force_sig(p->stored_info.si_signo, p);
+	p->stored_info_set = 0;
+	clear_thread_flag(TIF_FORCE_SIG);
+}
+#else
+void force_sig_rt(int sig, struct task_struct *p, int rt)
+{
+	force_sig(sig, p);
+}
+void force_sig_info_rt(int sig, struct siginfo *info, struct task_struct *p, int rt)
+{
+	force_sig_info(sig, info, p);
+}
+#endif
+
 static void __kprobes
-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
-	long error_code, siginfo_t *info)
+__do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
+	  long error_code, siginfo_t *info, int rt)
 {
 	struct task_struct *tsk = current;
 
@@ -172,7 +247,7 @@ trap_signal:
 	if (info)
 		force_sig_info(signr, info, tsk);
 	else
-		force_sig(signr, tsk);
+		force_sig_rt(signr, tsk, rt);
 	return;
 
 kernel_trap:
@@ -192,6 +267,20 @@ vm86_trap:
 #endif
 }
 
+static void __kprobes
+do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
+	long error_code, siginfo_t *info)
+{
+	__do_trap(trapnr, signr, str, regs, error_code, info, 0);
+}
+
+static void __kprobes
+do_trap_rt(int trapnr, int signr, char *str, struct pt_regs *regs,
+	long error_code, siginfo_t *info)
+{
+	__do_trap(trapnr, signr, str, regs, error_code, info, 1);
+}
+
 #define DO_ERROR(trapnr, signr, str, name)				\
 dotraplinkage void do_##name(struct pt_regs *regs, long error_code)	\
 {									\
@@ -237,7 +326,7 @@ dotraplinkage void do_stack_segment(stru
 			12, SIGBUS) == NOTIFY_STOP)
 		return;
 	conditional_sti_ist(regs);
-	do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
+	do_trap_rt(12, SIGBUS, "stack segment", regs, error_code, NULL);
 	conditional_cli_ist(regs);
 }
 
@@ -331,7 +420,7 @@ dotraplinkage void __kprobes do_int3(str
 #endif
 
 	conditional_sti_ist(regs);
-	do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
+	do_trap_rt(3, SIGTRAP, "int3", regs, error_code, NULL);
 	conditional_cli_ist(regs);
 }
 
@@ -449,7 +538,7 @@ dotraplinkage void __kprobes do_debug(st
 	}
 	si_code = get_si_code(tsk->thread.debugreg6);
 	if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
-		send_sigtrap(tsk, regs, error_code, si_code);
+		send_sigtrap_rt(tsk, regs, error_code, si_code);
 	conditional_cli_ist(regs);
 
 	return;
Index: linux-rt.git/include/linux/sched.h
===================================================================
--- linux-rt.git.orig/include/linux/sched.h
+++ linux-rt.git/include/linux/sched.h
@@ -1600,10 +1600,16 @@ struct task_struct {
 	struct rcu_head put_rcu;
 	int softirq_nestcnt;
 #endif
-#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
+#if defined CONFIG_PREEMPT_RT_FULL
+#ifdef CONFIG_X86_64
+	struct siginfo stored_info;
+	int stored_info_set;
+#endif
+#ifdef CONFIG_HIGHMEM
 	int kmap_idx;
 	pte_t kmap_pte[KM_TYPE_NR];
 #endif
+#endif /* CONFIG_PREEMPT_RT_FULL */
 };
 
 #ifdef CONFIG_PREEMPT_RT_FULL



^ permalink raw reply	[flat|nested] 11+ messages in thread

end of thread, other threads:[~2012-02-07 14:17 UTC | newest]

Thread overview: 11+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2012-02-03 18:28 [PATCH RT 0/2 v4] preempt-rt/x86: Handle sending signals from do_trap() by gdb Steven Rostedt
2012-02-03 18:28 ` [PATCH RT 1/2 v4] x86: Do not disable preemption in int3 on 32bit Steven Rostedt
2012-02-03 18:28 ` [PATCH RT 2/2 v4] preempt-rt/x86: Delay calling signals in int3 Steven Rostedt
2012-02-03 18:40   ` Oleg Nesterov
2012-02-03 20:10     ` Steven Rostedt
2012-02-05 19:23       ` Oleg Nesterov
2012-02-05 19:31         ` Oleg Nesterov
2012-02-06 16:12           ` Steven Rostedt
2012-02-06 16:25             ` Oleg Nesterov
2012-02-06 16:38               ` Steven Rostedt
2012-02-07 14:17         ` Steven Rostedt

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).