All of lore.kernel.org
 help / color / mirror / Atom feed
* [Xenomai] [PATCHv2 1/4] cobalt/x86: add support for eager fpu handling
@ 2018-09-14 16:14 Henning Schild
  2018-09-14 16:14 ` [Xenomai] [PATCH] gitignore: add build output for in-tree builds Henning Schild
                   ` (4 more replies)
  0 siblings, 5 replies; 12+ messages in thread
From: Henning Schild @ 2018-09-14 16:14 UTC (permalink / raw)
  To: xenomai

Upstream 4.14 switched to purely eager fpu switching. That was
backported to 4.4 and 4.9. This commit makes cobalt able to deal whith
the changed kernel behaviour.
This commit takes care of 4.9 to begin with.

Introduce IPIPE_X86_FPU_EAGER to switch between the new and the old
implementations. The new implementation is much simpler than the old
one. We basically only deal with the odd case where Xenomai preempts
Linux in a kernel-fpu context.
In a regular Linux that can never happen and if it happens we need to
make sure to get things into a consistent state. We have to make
"current" as not owning the fpu anymore and allow others to use
in-kernel fpu (kernel_fpu_enable). __switch_to from Linux will do the
rest.

Signed-off-by: Henning Schild <henning.schild@siemens.com>
---
 .../arch/x86/include/asm/xenomai/thread.h     |  8 ++-
 .../arch/x86/include/asm/xenomai/wrappers.h   |  5 ++
 kernel/cobalt/arch/x86/thread.c               | 69 ++++++++++++++++++-
 3 files changed, 79 insertions(+), 3 deletions(-)

diff --git a/kernel/cobalt/arch/x86/include/asm/xenomai/thread.h b/kernel/cobalt/arch/x86/include/asm/xenomai/thread.h
index f174a82c0..0c5c4da9c 100644
--- a/kernel/cobalt/arch/x86/include/asm/xenomai/thread.h
+++ b/kernel/cobalt/arch/x86/include/asm/xenomai/thread.h
@@ -24,6 +24,7 @@
 #include <asm/xenomai/wrappers.h>
 #include <asm/traps.h>
 
+#ifndef IPIPE_X86_FPU_EAGER
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)
 typedef union thread_xstate x86_fpustate;
 #define x86_fpustate_ptr(t) ((t)->fpu.state)
@@ -31,6 +32,7 @@ typedef union thread_xstate x86_fpustate;
 typedef union fpregs_state x86_fpustate;
 #define x86_fpustate_ptr(t) ((t)->fpu.active_state)
 #endif
+#endif
 
 struct xnarchtcb {
 	struct xntcb core;
@@ -40,10 +42,14 @@ struct xnarchtcb {
 	unsigned long ip;
 	unsigned long *ipp;
 #endif  
+#ifdef IPIPE_X86_FPU_EAGER
+	struct fpu *kfpu;
+#else
 	x86_fpustate *fpup;
-	unsigned int root_kfpu: 1;
 	unsigned int root_used_math: 1;
 	x86_fpustate *kfpu_state;
+#endif
+	unsigned int root_kfpu: 1;
 	struct {
 		unsigned long ip;
 		unsigned long ax;
diff --git a/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h b/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h
index 5f9cff3c9..00f0aaae5 100644
--- a/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h
+++ b/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h
@@ -24,6 +24,11 @@
 #define __get_user_inatomic __get_user
 #define __put_user_inatomic __put_user
 
+#if LINUX_VERSION_CODE > KERNEL_VERSION(4,9,108) && \
+    LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)
+#define IPIPE_X86_FPU_EAGER
+#endif
+
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
 #include <asm/i387.h>
 #include <asm/fpu-internal.h>
diff --git a/kernel/cobalt/arch/x86/thread.c b/kernel/cobalt/arch/x86/thread.c
index 7fb136300..18cf636e5 100644
--- a/kernel/cobalt/arch/x86/thread.c
+++ b/kernel/cobalt/arch/x86/thread.c
@@ -28,9 +28,13 @@
 
 static struct kmem_cache *xstate_cache;
 
+#ifdef IPIPE_X86_FPU_EAGER
+#define fpu_kernel_xstate_size sizeof(struct fpu)
+#else
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
 #define fpu_kernel_xstate_size xstate_size
 #endif
+#endif /* IPIPE_X86_FPU_EAGER */
 
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)
 #define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
@@ -199,14 +203,17 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in)
 	struct mm_struct *prev_mm, *next_mm;
 
 	prev = out_tcb->core.host_task;
+#ifndef IPIPE_X86_FPU_EAGER
 	if (x86_fpregs_active(prev))
 		/*
 		 * __switch_to will try and use __unlazy_fpu, so we
 		 * need to clear the ts bit.
 		 */
 		clts();
+#endif /* ! IPIPE_X86_FPU_EAGER */
 
 	next = in_tcb->core.host_task;
+#ifndef IPIPE_X86_FPU_EAGER
 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)
 	next->thread.fpu.counter = 0;
 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)
@@ -214,6 +221,7 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in)
 #else
 	next->fpu_counter = 0;
 #endif
+#endif /* ! IPIPE_X86_FPU_EAGER */
 	prev_mm = out_tcb->core.active_mm;
 	next_mm = in_tcb->core.mm;
 	if (next_mm == NULL) {
@@ -245,9 +253,13 @@ void xnarch_switch_to(struct xnthread *out, struct xnthread *in)
 	switch_to(prev, next, last);
 #endif /* LINUX_VERSION_CODE >= 4.8 */
 
+#ifndef IPIPE_X86_FPU_EAGER
 	stts();
+#endif /* ! IPIPE_X86_FPU_EAGER */
 }
 
+#ifndef IPIPE_X86_FPU_EAGER
+
 #ifdef CONFIG_X86_64
 #define XSAVE_PREFIX	"0x48,"
 #define XSAVE_SUFFIX	"q"
@@ -359,11 +371,21 @@ int xnarch_handle_fpu_fault(struct xnthread *from,
 
 	return 1;
 }
+#else /* IPIPE_X86_FPU_EAGER */
+
+int xnarch_handle_fpu_fault(struct xnthread *from,
+			struct xnthread *to, struct ipipe_trap_data *d)
+{
+	// in eager mode there are no such faults
+	BUG_ON(1);
+}
+#endif /* ! IPIPE_X86_FPU_EAGER */
 
 #define current_task_used_kfpu() kernel_fpu_disabled()
 
 #define tcb_used_kfpu(t) ((t)->root_kfpu)
 
+#ifndef IPIPE_X86_FPU_EAGER
 void xnarch_leave_root(struct xnthread *root)
 {
 	struct xnarchtcb *const rootcb = xnthread_archtcb(root);
@@ -430,6 +452,35 @@ void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to)
 		p->flags &= ~PF_USED_MATH;
 	}
 }
+#else /* IPIPE_X86_FPU_EAGER */
+void xnarch_leave_root(struct xnthread *root)
+{
+	struct xnarchtcb *const rootcb = xnthread_archtcb(root);
+
+	rootcb->root_kfpu = current_task_used_kfpu();
+
+	if (!tcb_used_kfpu(rootcb))
+		return;
+
+	// save fpregs from in-kernel use
+	copy_fpregs_to_fpstate(rootcb->kfpu);
+	kernel_fpu_enable();
+	// mark current thread as not owning the FPU anymore
+	if (&current->thread.fpu.fpstate_active)
+		fpregs_deactivate(&current->thread.fpu);
+}
+
+void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to)
+{
+	struct xnarchtcb *const to_tcb = xnthread_archtcb(to);
+
+	if (!tcb_used_kfpu(to_tcb))
+		return;
+
+	copy_kernel_to_fpregs(&to_tcb->kfpu->state);
+	kernel_fpu_disable();
+}
+#endif /* ! IPIPE_X86_FPU_EAGER */
 
 void xnarch_init_root_tcb(struct xnthread *thread)
 {
@@ -440,9 +491,13 @@ void xnarch_init_root_tcb(struct xnthread *thread)
 	tcb->spp = &tcb->sp;
 	tcb->ipp = &tcb->ip;
 #endif	
+#ifndef IPIPE_X86_FPU_EAGER
 	tcb->fpup = NULL;
-	tcb->root_kfpu = 0;
 	tcb->kfpu_state = kmem_cache_zalloc(xstate_cache, GFP_KERNEL);
+#else /* IPIPE_X86_FPU_EAGER */
+	tcb->kfpu = kmem_cache_zalloc(xstate_cache, GFP_KERNEL);
+#endif /* ! IPIPE_X86_FPU_EAGER */
+	tcb->root_kfpu = 0;
 }
 
 void xnarch_init_shadow_tcb(struct xnthread *thread)
@@ -459,12 +514,22 @@ void xnarch_init_shadow_tcb(struct xnthread *thread)
 	tcb->ipp = &p->thread.rip; /* <!> raw naming intended. */
 #endif
 #endif
+#ifndef IPIPE_X86_FPU_EAGER
 	tcb->fpup = x86_fpustate_ptr(&p->thread);
-	tcb->root_kfpu = 0;
 	tcb->kfpu_state = NULL;
+#else /* IPIPE_X86_FPU_EAGER */
+	tcb->kfpu = NULL;
+#endif /* ! IPIPE_X86_FPU_EAGER */
+	tcb->root_kfpu = 0;
 
+#ifndef IPIPE_X86_FPU_EAGER
 	/* XNFPU is set upon first FPU fault */
 	xnthread_clear_state(thread, XNFPU);
+#else /* IPIPE_X86_FPU_EAGER */
+	/* XNFPU is always set */
+	xnthread_set_state(thread, XNFPU);
+	fpu__activate_fpstate_read(&p->thread.fpu);
+#endif /* ! IPIPE_X86_FPU_EAGER */
 }
 
 int mach_x86_thread_init(void)
-- 
2.19.0



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [Xenomai] [PATCH] gitignore: add build output for in-tree builds
  2018-09-14 16:14 [Xenomai] [PATCHv2 1/4] cobalt/x86: add support for eager fpu handling Henning Schild
@ 2018-09-14 16:14 ` Henning Schild
  2018-09-14 16:16   ` Henning Schild
  2018-09-14 16:14 ` [Xenomai] [IPIPE] [PATCHv2] x86: make fpu switching eager Henning Schild
                   ` (3 subsequent siblings)
  4 siblings, 1 reply; 12+ messages in thread
From: Henning Schild @ 2018-09-14 16:14 UTC (permalink / raw)
  To: xenomai

This adds all build output to .gitignore. Not everybody works out of
tree, or knows that this option exists. So let us add all build output
to .gitignore. While this names all our binaries and has the risk of
getting out of sync, it at least makes "git status" human readable again.

Signed-off-by: Henning Schild <henning.schild@siemens.com>
---
 .gitignore | 67 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 67 insertions(+)

diff --git a/.gitignore b/.gitignore
index b1d682032..a054a65af 100644
--- a/.gitignore
+++ b/.gitignore
@@ -15,3 +15,70 @@ config/missing
 configure
 aclocal.m4
 autom4te.cache
+Makefile
+.deps
+config.log
+config.status
+include/stamp-h1
+include/xeno_config.h
+libtool
+scripts/xeno
+scripts/xeno-config
+utils/net/rtnet
+utils/net/rtnet.conf
+*.a
+*.o
+*.la
+*.lo
+.libs
+.dirstamp
+demo/alchemy/altency
+demo/alchemy/cobalt/cross-link
+demo/posix/cobalt/bufp-label
+demo/posix/cobalt/bufp-readwrite
+demo/posix/cobalt/can_rtt
+demo/posix/cobalt/eth_p_all
+demo/posix/cobalt/gpiopwm
+demo/posix/cobalt/iddp-label
+demo/posix/cobalt/iddp-sendrecv
+demo/posix/cobalt/xddp-echo
+demo/posix/cobalt/xddp-label
+demo/posix/cobalt/xddp-stream
+demo/posix/cyclictest/cyclictest
+lib/boilerplate/config-dump.h
+lib/boilerplate/git-stamp.h
+lib/boilerplate/version
+testsuite/clocktest/clocktest
+testsuite/gpiotest/gpiotest
+testsuite/latency/latency
+testsuite/smokey/net_common/smokey_net_server
+testsuite/smokey/smokey
+testsuite/spitest/spitest
+testsuite/switchtest/switchtest
+testsuite/xeno-test/xeno-test
+testsuite/xeno-test/xeno-test-run
+utils/analogy/analogy_calibrate
+utils/analogy/analogy_config
+utils/analogy/cmd_bits
+utils/analogy/cmd_read
+utils/analogy/cmd_write
+utils/analogy/insn_bits
+utils/analogy/insn_read
+utils/analogy/insn_write
+utils/analogy/wf_generate
+utils/autotune/autotune
+utils/can/rtcanconfig
+utils/can/rtcanrecv
+utils/can/rtcansend
+utils/corectl/corectl
+utils/hdb/hdb
+utils/net/nomaccfg
+utils/net/rtcfg
+utils/net/rtifconfig
+utils/net/rtiwconfig
+utils/net/rtping
+utils/net/rtroute
+utils/net/tdmacfg
+utils/ps/rtps
+utils/slackspot/slackspot
+testsuite/smokey/dlopen/dlopentest
-- 
2.19.0



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [Xenomai] [IPIPE] [PATCHv2] x86: make fpu switching eager
  2018-09-14 16:14 [Xenomai] [PATCHv2 1/4] cobalt/x86: add support for eager fpu handling Henning Schild
  2018-09-14 16:14 ` [Xenomai] [PATCH] gitignore: add build output for in-tree builds Henning Schild
@ 2018-09-14 16:14 ` Henning Schild
  2018-09-25 11:20   ` Jan Kiszka
  2018-09-14 16:14 ` [Xenomai] [PATCHv2 2/4] cobalt/x86: add ipipe-4.4 eager fpu support Henning Schild
                   ` (2 subsequent siblings)
  4 siblings, 1 reply; 12+ messages in thread
From: Henning Schild @ 2018-09-14 16:14 UTC (permalink / raw)
  To: xenomai

Linux 4.14 dropped support for lazy fpu switching and in the 4.4 and 4.9
series similar changes where backported.
So fpu is eager for those versions. That simplifies things a lot and we can
drop several changes from the IPIPE patch.
On the Xenomai side the only thing we still have to care about is the
kernel fpu state when we interrupt an in kernel fpu user. But we do that
explicit and can drop the indirection active_(fp)state.

This patch basically drops most of the fpu specifics from the ipipe
patch.

This patch applies on ipipe-4.9.y and it has to be followed by a merge with
>= 4.9.109. Cobalt will not compile if you are already eager and still
4.9.108

Signed-off-by: Henning Schild <henning.schild@siemens.com>
---
 arch/x86/include/asm/fpu/internal.h | 30 ++++++++---------------------
 arch/x86/include/asm/fpu/types.h    | 12 ------------
 arch/x86/kernel/fpu/core.c          | 17 ++++++----------
 arch/x86/kernel/fpu/init.c          |  5 +----
 4 files changed, 15 insertions(+), 49 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 35ca184e83e1..6bdceba90f17 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -13,7 +13,6 @@
 #include <linux/compat.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
-#include <linux/kconfig.h>
 #include <linux/ipipe.h>
 
 #include <asm/user.h>
@@ -190,24 +189,12 @@ static inline int copy_user_to_fregs(struct fregs_state __user *fx)
 	return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
 }
 
-#ifdef CONFIG_IPIPE
-static inline union fpregs_state *active_fpstate(struct fpu *fpu)
-{
-	return fpu->active_state;
-}
-#else
-static inline union fpregs_state *active_fpstate(struct fpu *fpu)
-{
-	return &fpu->state;
-}
-#endif
-
 static inline void copy_fxregs_to_kernel(struct fpu *fpu)
 {
 	if (IS_ENABLED(CONFIG_X86_32))
-		asm volatile( "fxsave %[fx]" : [fx] "=m" (active_fpstate(fpu)->fxsave));
+		asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
 	else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
-		asm volatile("fxsaveq %[fx]" : [fx] "=m" (active_fpstate(fpu)->fxsave));
+		asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
 	else {
 		/* Using "rex64; fxsave %0" is broken because, if the memory
 		 * operand uses any extended registers for addressing, a second
@@ -231,8 +218,8 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
 		 * registers.
 		 */
 		asm volatile( "rex64/fxsave (%[fx])"
-			      : "=m" (active_fpstate(fpu)->fxsave)
-			      : [fx] "R" (&active_fpstate(fpu)->fxsave));
+                             : "=m" (fpu->state.fxsave)
+                             : [fx] "R" (&fpu->state.fxsave));
 	}
 }
 
@@ -441,7 +428,7 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
 static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
 {
 	if (likely(use_xsave())) {
-		copy_xregs_to_kernel(&active_fpstate(fpu)->xsave);
+		copy_xregs_to_kernel(&fpu->state.xsave);
 		return 1;
 	}
 
@@ -454,7 +441,7 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
 	 * Legacy FPU register saving, FNSAVE always clears FPU registers,
 	 * so we have to mark them inactive:
 	 */
-	asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (active_fpstate(fpu)->fsave));
+	asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
 
 	return 0;
 }
@@ -609,8 +596,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
 	 * If the task has used the math, pre-load the FPU on xsave processors
 	 * or if the past 5 consecutive context-switches used math.
 	 */
-	fpu.preload = !IS_ENABLED(CONFIG_IPIPE) &&
-		      static_cpu_has(X86_FEATURE_FPU) &&
+	fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
 		      new_fpu->fpstate_active &&
 		      (use_eager_fpu() || new_fpu->counter > 5);
 
@@ -660,7 +646,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
  */
 static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
 {
-	if (!IS_ENABLED(CONFIG_IPIPE) && fpu_switch.preload)
+	if (fpu_switch.preload)
 		copy_kernel_to_fpregs(&new_fpu->state);
 }
 
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 497c551469ec..48df486b02f9 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -332,18 +332,6 @@ struct fpu {
 	 * deal with bursty apps that only use the FPU for a short time:
 	 */
 	unsigned char			counter;
-
-#ifdef CONFIG_IPIPE
-	/*
-	 * @active_state
-	 *
-	 * An indirection pointer to reach the active state context
-	 * for the task.  This is used by co-kernels for dealing with
-	 * preemption of kernel fpu contexts by their own tasks.
-	 */
-	union fpregs_state		*active_state;
-#endif
-	
 	/*
 	 * @state:
 	 *
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index d31a73b2ab76..7dd8518272ca 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -124,7 +124,7 @@ void __kernel_fpu_end(void)
 
 	flags = hard_cond_local_irq_save();
 	if (fpu->fpregs_active)
-		copy_kernel_to_fpregs(active_fpstate(fpu));
+		copy_kernel_to_fpregs(&fpu->state);
 	else
 		__fpregs_deactivate_hw();
 
@@ -192,7 +192,7 @@ void fpu__save(struct fpu *fpu)
 	if (fpu->fpregs_active) {
 		if (!copy_fpregs_to_fpstate(fpu)) {
 			if (use_eager_fpu())
-				copy_kernel_to_fpregs(active_fpstate(fpu));
+				copy_kernel_to_fpregs(&fpu->state);
 			else
 				fpregs_deactivate(fpu);
 		}
@@ -244,13 +244,8 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 	dst_fpu->counter = 0;
 	dst_fpu->fpregs_active = 0;
 	dst_fpu->last_cpu = -1;
-#ifdef CONFIG_IPIPE
-	/* Must be set before FPU context is copied. */
-	dst_fpu->active_state = &dst_fpu->state;
-#endif
 
-	if (!IS_ENABLED(CONFIG_IPIPE) &&
-	    (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU)))
+	if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU))
 		return 0;
 
 	WARN_ON_FPU(src_fpu != &current->thread.fpu);
@@ -283,7 +278,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 		       fpu_kernel_xstate_size);
 
 		if (use_eager_fpu())
-			copy_kernel_to_fpregs(active_fpstate(src_fpu));
+			copy_kernel_to_fpregs(&src_fpu->state);
 		else
 			fpregs_deactivate(src_fpu);
 	}
@@ -430,7 +425,7 @@ void fpu__current_fpstate_write_end(void)
 	 * an XRSTOR if they are active.
 	 */
 	if (fpregs_active())
-		copy_kernel_to_fpregs(active_fpstate(fpu));
+		copy_kernel_to_fpregs(&fpu->state);
 
 	/*
 	 * Our update is done and the fpregs/fpstate are in sync
@@ -460,7 +455,7 @@ void fpu__restore(struct fpu *fpu)
 	kernel_fpu_disable();
 	trace_x86_fpu_before_restore(fpu);
 	fpregs_activate(fpu);
-	copy_kernel_to_fpregs(active_fpstate(fpu));
+	copy_kernel_to_fpregs(&fpu->state);
 	fpu->counter++;
 	trace_x86_fpu_after_restore(fpu);
 	kernel_fpu_enable();
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 3eee35d3d6f8..a1fc061d03e1 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -42,9 +42,6 @@ static void fpu__init_cpu_generic(void)
 		cr0 |= X86_CR0_EM;
 	write_cr0(cr0);
 
-#ifdef CONFIG_IPIPE
-	current->thread.fpu.active_state = &current->thread.fpu.state;
-#endif
 	/* Flush out any pending x87 state: */
 #ifdef CONFIG_MATH_EMULATION
 	if (!boot_cpu_has(X86_FEATURE_FPU))
@@ -329,7 +326,7 @@ static void __init fpu__init_system_ctx_switch(void)
 		eagerfpu = ENABLE;
 
 	if (IS_ENABLED(CONFIG_IPIPE))
-		eagerfpu = DISABLE;
+		eagerfpu = ENABLE;
 
 	if (eagerfpu == ENABLE)
 		setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
-- 
2.19.0



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [Xenomai] [PATCHv2 2/4] cobalt/x86: add ipipe-4.4 eager fpu support
  2018-09-14 16:14 [Xenomai] [PATCHv2 1/4] cobalt/x86: add support for eager fpu handling Henning Schild
  2018-09-14 16:14 ` [Xenomai] [PATCH] gitignore: add build output for in-tree builds Henning Schild
  2018-09-14 16:14 ` [Xenomai] [IPIPE] [PATCHv2] x86: make fpu switching eager Henning Schild
@ 2018-09-14 16:14 ` Henning Schild
  2018-09-14 16:14 ` [Xenomai] [PATCHv2 3/4] cobalt: fixup for kernel 4.14+ Henning Schild
  2018-09-14 16:14 ` [Xenomai] [PATCHv2 4/4] cobalt/x86: add ipipe-4.14 eager fpu support Henning Schild
  4 siblings, 0 replies; 12+ messages in thread
From: Henning Schild @ 2018-09-14 16:14 UTC (permalink / raw)
  To: xenomai

Linux 4.4.138 switched to eager fpu, set IPIPE_X86_FPU_EAGER
accordingly.

Signed-off-by: Henning Schild <henning.schild@siemens.com>
---
 kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h | 4 ++++
 1 file changed, 4 insertions(+)

diff --git a/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h b/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h
index 00f0aaae5..a47730106 100644
--- a/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h
+++ b/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h
@@ -28,6 +28,10 @@
     LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)
 #define IPIPE_X86_FPU_EAGER
 #endif
+#if LINUX_VERSION_CODE > KERNEL_VERSION(4,4,137) && \
+    LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)
+#define IPIPE_X86_FPU_EAGER
+#endif
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
 #include <asm/i387.h>
-- 
2.19.0



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [Xenomai] [PATCHv2 3/4] cobalt: fixup for kernel 4.14+
  2018-09-14 16:14 [Xenomai] [PATCHv2 1/4] cobalt/x86: add support for eager fpu handling Henning Schild
                   ` (2 preceding siblings ...)
  2018-09-14 16:14 ` [Xenomai] [PATCHv2 2/4] cobalt/x86: add ipipe-4.4 eager fpu support Henning Schild
@ 2018-09-14 16:14 ` Henning Schild
  2018-09-14 16:14 ` [Xenomai] [PATCHv2 4/4] cobalt/x86: add ipipe-4.14 eager fpu support Henning Schild
  4 siblings, 0 replies; 12+ messages in thread
From: Henning Schild @ 2018-09-14 16:14 UTC (permalink / raw)
  To: xenomai

Signed-off-by: Henning Schild <henning.schild@siemens.com>
---
 kernel/cobalt/include/asm-generic/xenomai/syscall.h | 5 +++++
 1 file changed, 5 insertions(+)

diff --git a/kernel/cobalt/include/asm-generic/xenomai/syscall.h b/kernel/cobalt/include/asm-generic/xenomai/syscall.h
index f11ade8e7..3873fa634 100644
--- a/kernel/cobalt/include/asm-generic/xenomai/syscall.h
+++ b/kernel/cobalt/include/asm-generic/xenomai/syscall.h
@@ -20,7 +20,12 @@
 #define _COBALT_ASM_GENERIC_SYSCALL_H
 
 #include <linux/types.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
 #include <asm/uaccess.h>
+#else
+#include <linux/uaccess.h>
+#endif
 #include <asm/xenomai/features.h>
 #include <asm/xenomai/wrappers.h>
 #include <asm/xenomai/machine.h>
-- 
2.19.0



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* [Xenomai] [PATCHv2 4/4] cobalt/x86: add ipipe-4.14 eager fpu support
  2018-09-14 16:14 [Xenomai] [PATCHv2 1/4] cobalt/x86: add support for eager fpu handling Henning Schild
                   ` (3 preceding siblings ...)
  2018-09-14 16:14 ` [Xenomai] [PATCHv2 3/4] cobalt: fixup for kernel 4.14+ Henning Schild
@ 2018-09-14 16:14 ` Henning Schild
  4 siblings, 0 replies; 12+ messages in thread
From: Henning Schild @ 2018-09-14 16:14 UTC (permalink / raw)
  To: xenomai

4.14 is always eager, unfortunately we will need a few ifdefs inside the
eager fpu support as well.

Signed-off-by: Henning Schild <henning.schild@siemens.com>
---
 .../arch/x86/include/asm/xenomai/wrappers.h   |  4 ++++
 kernel/cobalt/arch/x86/thread.c               | 21 ++++++++++++++++++-
 2 files changed, 24 insertions(+), 1 deletion(-)

diff --git a/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h b/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h
index a47730106..a4cc368a5 100644
--- a/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h
+++ b/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h
@@ -32,6 +32,10 @@
     LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)
 #define IPIPE_X86_FPU_EAGER
 #endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+#define IPIPE_X86_FPU_EAGER
+#endif
+
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
 #include <asm/i387.h>
diff --git a/kernel/cobalt/arch/x86/thread.c b/kernel/cobalt/arch/x86/thread.c
index 18cf636e5..2668ce274 100644
--- a/kernel/cobalt/arch/x86/thread.c
+++ b/kernel/cobalt/arch/x86/thread.c
@@ -42,6 +42,7 @@ static struct kmem_cache *xstate_cache;
 #define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
 #endif
 
+#ifndef IPIPE_X86_FPU_EAGER
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
 #include <asm/i387.h>
 #include <asm/fpu-internal.h>
@@ -72,6 +73,9 @@ static inline void x86_fpregs_activate(struct task_struct *t)
 #define x86_xstate_alignment		__alignof__(union fpregs_state)
 
 #endif
+#else /* IPIPE_X86_FPU_EAGER */
+#define x86_xstate_alignment		__alignof__(union fpregs_state)
+#endif /* ! IPIPE_X86_FPU_EAGER */
 
 #if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
 /*
@@ -465,9 +469,15 @@ void xnarch_leave_root(struct xnthread *root)
 	// save fpregs from in-kernel use
 	copy_fpregs_to_fpstate(rootcb->kfpu);
 	kernel_fpu_enable();
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+	// restore current's fpregs
+	__cpu_invalidate_fpregs_state();
+	switch_fpu_finish(&current->thread.fpu, smp_processor_id());
+#else
 	// mark current thread as not owning the FPU anymore
 	if (&current->thread.fpu.fpstate_active)
 		fpregs_deactivate(&current->thread.fpu);
+#endif
 }
 
 void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to)
@@ -528,7 +538,11 @@ void xnarch_init_shadow_tcb(struct xnthread *thread)
 #else /* IPIPE_X86_FPU_EAGER */
 	/* XNFPU is always set */
 	xnthread_set_state(thread, XNFPU);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
 	fpu__activate_fpstate_read(&p->thread.fpu);
+#else
+	fpu__initialize(&p->thread.fpu);
+#endif
 #endif /* ! IPIPE_X86_FPU_EAGER */
 }
 
@@ -537,7 +551,12 @@ int mach_x86_thread_init(void)
 	xstate_cache = kmem_cache_create("cobalt_x86_xstate",
 					 fpu_kernel_xstate_size,
 					 x86_xstate_alignment,
-					 SLAB_NOTRACK, NULL);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
+					 SLAB_NOTRACK,
+#else
+					 0,
+#endif
+					 NULL);
 	if (xstate_cache == NULL)
 		return -ENOMEM;
 
-- 
2.19.0



^ permalink raw reply related	[flat|nested] 12+ messages in thread

* Re: [Xenomai] [PATCH] gitignore: add build output for in-tree builds
  2018-09-14 16:14 ` [Xenomai] [PATCH] gitignore: add build output for in-tree builds Henning Schild
@ 2018-09-14 16:16   ` Henning Schild
  0 siblings, 0 replies; 12+ messages in thread
From: Henning Schild @ 2018-09-14 16:16 UTC (permalink / raw)
  To: xenomai

This is the only new patch, the rest was still sitting in the
send-email directory ... sorry for the spam.

Henning

Am Fri, 14 Sep 2018 18:14:38 +0200
schrieb Henning Schild <henning.schild@siemens.com>:

> This adds all build output to .gitignore. Not everybody works out of
> tree, or knows that this option exists. So let us add all build output
> to .gitignore. While this names all our binaries and has the risk of
> getting out of sync, it at least makes "git status" human readable
> again.
> 
> Signed-off-by: Henning Schild <henning.schild@siemens.com>
> ---
>  .gitignore | 67
> ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file
> changed, 67 insertions(+)
> 
> diff --git a/.gitignore b/.gitignore
> index b1d682032..a054a65af 100644
> --- a/.gitignore
> +++ b/.gitignore
> @@ -15,3 +15,70 @@ config/missing
>  configure
>  aclocal.m4
>  autom4te.cache
> +Makefile
> +.deps
> +config.log
> +config.status
> +include/stamp-h1
> +include/xeno_config.h
> +libtool
> +scripts/xeno
> +scripts/xeno-config
> +utils/net/rtnet
> +utils/net/rtnet.conf
> +*.a
> +*.o
> +*.la
> +*.lo
> +.libs
> +.dirstamp
> +demo/alchemy/altency
> +demo/alchemy/cobalt/cross-link
> +demo/posix/cobalt/bufp-label
> +demo/posix/cobalt/bufp-readwrite
> +demo/posix/cobalt/can_rtt
> +demo/posix/cobalt/eth_p_all
> +demo/posix/cobalt/gpiopwm
> +demo/posix/cobalt/iddp-label
> +demo/posix/cobalt/iddp-sendrecv
> +demo/posix/cobalt/xddp-echo
> +demo/posix/cobalt/xddp-label
> +demo/posix/cobalt/xddp-stream
> +demo/posix/cyclictest/cyclictest
> +lib/boilerplate/config-dump.h
> +lib/boilerplate/git-stamp.h
> +lib/boilerplate/version
> +testsuite/clocktest/clocktest
> +testsuite/gpiotest/gpiotest
> +testsuite/latency/latency
> +testsuite/smokey/net_common/smokey_net_server
> +testsuite/smokey/smokey
> +testsuite/spitest/spitest
> +testsuite/switchtest/switchtest
> +testsuite/xeno-test/xeno-test
> +testsuite/xeno-test/xeno-test-run
> +utils/analogy/analogy_calibrate
> +utils/analogy/analogy_config
> +utils/analogy/cmd_bits
> +utils/analogy/cmd_read
> +utils/analogy/cmd_write
> +utils/analogy/insn_bits
> +utils/analogy/insn_read
> +utils/analogy/insn_write
> +utils/analogy/wf_generate
> +utils/autotune/autotune
> +utils/can/rtcanconfig
> +utils/can/rtcanrecv
> +utils/can/rtcansend
> +utils/corectl/corectl
> +utils/hdb/hdb
> +utils/net/nomaccfg
> +utils/net/rtcfg
> +utils/net/rtifconfig
> +utils/net/rtiwconfig
> +utils/net/rtping
> +utils/net/rtroute
> +utils/net/tdmacfg
> +utils/ps/rtps
> +utils/slackspot/slackspot
> +testsuite/smokey/dlopen/dlopentest



^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Xenomai] [IPIPE] [PATCHv2] x86: make fpu switching eager
  2018-09-14 16:14 ` [Xenomai] [IPIPE] [PATCHv2] x86: make fpu switching eager Henning Schild
@ 2018-09-25 11:20   ` Jan Kiszka
  2018-09-26  8:45     ` Henning Schild
  0 siblings, 1 reply; 12+ messages in thread
From: Jan Kiszka @ 2018-09-25 11:20 UTC (permalink / raw)
  To: Henning Schild, xenomai

On 14.09.18 18:14, Henning Schild wrote:
> Linux 4.14 dropped support for lazy fpu switching and in the 4.4 and 4.9
> series similar changes where backported.
> So fpu is eager for those versions. That simplifies things a lot and we can
> drop several changes from the IPIPE patch.
> On the Xenomai side the only thing we still have to care about is the
> kernel fpu state when we interrupt an in kernel fpu user. But we do that
> explicit and can drop the indirection active_(fp)state.
> 
> This patch basically drops most of the fpu specifics from the ipipe
> patch.
> 
> This patch applies on ipipe-4.9.y and it has to be followed by a merge with
>> = 4.9.109. Cobalt will not compile if you are already eager and still
> 4.9.108
> 
> Signed-off-by: Henning Schild <henning.schild@siemens.com>
> ---
>   arch/x86/include/asm/fpu/internal.h | 30 ++++++++---------------------
>   arch/x86/include/asm/fpu/types.h    | 12 ------------
>   arch/x86/kernel/fpu/core.c          | 17 ++++++----------
>   arch/x86/kernel/fpu/init.c          |  5 +----
>   4 files changed, 15 insertions(+), 49 deletions(-)
> 
> diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
> index 35ca184e83e1..6bdceba90f17 100644
> --- a/arch/x86/include/asm/fpu/internal.h
> +++ b/arch/x86/include/asm/fpu/internal.h
> @@ -13,7 +13,6 @@
>   #include <linux/compat.h>
>   #include <linux/sched.h>
>   #include <linux/slab.h>
> -#include <linux/kconfig.h>
>   #include <linux/ipipe.h>
>   
>   #include <asm/user.h>
> @@ -190,24 +189,12 @@ static inline int copy_user_to_fregs(struct fregs_state __user *fx)
>   	return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
>   }
>   
> -#ifdef CONFIG_IPIPE
> -static inline union fpregs_state *active_fpstate(struct fpu *fpu)
> -{
> -	return fpu->active_state;
> -}
> -#else
> -static inline union fpregs_state *active_fpstate(struct fpu *fpu)
> -{
> -	return &fpu->state;
> -}
> -#endif
> -
>   static inline void copy_fxregs_to_kernel(struct fpu *fpu)
>   {
>   	if (IS_ENABLED(CONFIG_X86_32))
> -		asm volatile( "fxsave %[fx]" : [fx] "=m" (active_fpstate(fpu)->fxsave));
> +		asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
>   	else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
> -		asm volatile("fxsaveq %[fx]" : [fx] "=m" (active_fpstate(fpu)->fxsave));
> +		asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
>   	else {
>   		/* Using "rex64; fxsave %0" is broken because, if the memory
>   		 * operand uses any extended registers for addressing, a second
> @@ -231,8 +218,8 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
>   		 * registers.
>   		 */
>   		asm volatile( "rex64/fxsave (%[fx])"
> -			      : "=m" (active_fpstate(fpu)->fxsave)
> -			      : [fx] "R" (&active_fpstate(fpu)->fxsave));
> +                             : "=m" (fpu->state.fxsave)
> +                             : [fx] "R" (&fpu->state.fxsave));
>   	}
>   }
>   
> @@ -441,7 +428,7 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
>   static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
>   {
>   	if (likely(use_xsave())) {
> -		copy_xregs_to_kernel(&active_fpstate(fpu)->xsave);
> +		copy_xregs_to_kernel(&fpu->state.xsave);
>   		return 1;
>   	}
>   
> @@ -454,7 +441,7 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
>   	 * Legacy FPU register saving, FNSAVE always clears FPU registers,
>   	 * so we have to mark them inactive:
>   	 */
> -	asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (active_fpstate(fpu)->fsave));
> +	asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
>   
>   	return 0;
>   }
> @@ -609,8 +596,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
>   	 * If the task has used the math, pre-load the FPU on xsave processors
>   	 * or if the past 5 consecutive context-switches used math.
>   	 */
> -	fpu.preload = !IS_ENABLED(CONFIG_IPIPE) &&
> -		      static_cpu_has(X86_FEATURE_FPU) &&
> +	fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
>   		      new_fpu->fpstate_active &&
>   		      (use_eager_fpu() || new_fpu->counter > 5);
>   
> @@ -660,7 +646,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
>    */
>   static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
>   {
> -	if (!IS_ENABLED(CONFIG_IPIPE) && fpu_switch.preload)
> +	if (fpu_switch.preload)
>   		copy_kernel_to_fpregs(&new_fpu->state);
>   }
>   
> diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
> index 497c551469ec..48df486b02f9 100644
> --- a/arch/x86/include/asm/fpu/types.h
> +++ b/arch/x86/include/asm/fpu/types.h
> @@ -332,18 +332,6 @@ struct fpu {
>   	 * deal with bursty apps that only use the FPU for a short time:
>   	 */
>   	unsigned char			counter;
> -
> -#ifdef CONFIG_IPIPE
> -	/*
> -	 * @active_state
> -	 *
> -	 * An indirection pointer to reach the active state context
> -	 * for the task.  This is used by co-kernels for dealing with
> -	 * preemption of kernel fpu contexts by their own tasks.
> -	 */
> -	union fpregs_state		*active_state;
> -#endif
> -	
>   	/*
>   	 * @state:
>   	 *
> diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
> index d31a73b2ab76..7dd8518272ca 100644
> --- a/arch/x86/kernel/fpu/core.c
> +++ b/arch/x86/kernel/fpu/core.c
> @@ -124,7 +124,7 @@ void __kernel_fpu_end(void)
>   
>   	flags = hard_cond_local_irq_save();
>   	if (fpu->fpregs_active)
> -		copy_kernel_to_fpregs(active_fpstate(fpu));
> +		copy_kernel_to_fpregs(&fpu->state);
>   	else
>   		__fpregs_deactivate_hw();
>   
> @@ -192,7 +192,7 @@ void fpu__save(struct fpu *fpu)
>   	if (fpu->fpregs_active) {
>   		if (!copy_fpregs_to_fpstate(fpu)) {
>   			if (use_eager_fpu())
> -				copy_kernel_to_fpregs(active_fpstate(fpu));
> +				copy_kernel_to_fpregs(&fpu->state);
>   			else
>   				fpregs_deactivate(fpu);
>   		}
> @@ -244,13 +244,8 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
>   	dst_fpu->counter = 0;
>   	dst_fpu->fpregs_active = 0;
>   	dst_fpu->last_cpu = -1;
> -#ifdef CONFIG_IPIPE
> -	/* Must be set before FPU context is copied. */
> -	dst_fpu->active_state = &dst_fpu->state;
> -#endif
>   
> -	if (!IS_ENABLED(CONFIG_IPIPE) &&
> -	    (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU)))
> +	if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU))
>   		return 0;
>   
>   	WARN_ON_FPU(src_fpu != &current->thread.fpu);
> @@ -283,7 +278,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
>   		       fpu_kernel_xstate_size);
>   
>   		if (use_eager_fpu())
> -			copy_kernel_to_fpregs(active_fpstate(src_fpu));
> +			copy_kernel_to_fpregs(&src_fpu->state);
>   		else
>   			fpregs_deactivate(src_fpu);
>   	}
> @@ -430,7 +425,7 @@ void fpu__current_fpstate_write_end(void)
>   	 * an XRSTOR if they are active.
>   	 */
>   	if (fpregs_active())
> -		copy_kernel_to_fpregs(active_fpstate(fpu));
> +		copy_kernel_to_fpregs(&fpu->state);
>   
>   	/*
>   	 * Our update is done and the fpregs/fpstate are in sync
> @@ -460,7 +455,7 @@ void fpu__restore(struct fpu *fpu)
>   	kernel_fpu_disable();
>   	trace_x86_fpu_before_restore(fpu);
>   	fpregs_activate(fpu);
> -	copy_kernel_to_fpregs(active_fpstate(fpu));
> +	copy_kernel_to_fpregs(&fpu->state);
>   	fpu->counter++;
>   	trace_x86_fpu_after_restore(fpu);
>   	kernel_fpu_enable();
> diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
> index 3eee35d3d6f8..a1fc061d03e1 100644
> --- a/arch/x86/kernel/fpu/init.c
> +++ b/arch/x86/kernel/fpu/init.c
> @@ -42,9 +42,6 @@ static void fpu__init_cpu_generic(void)
>   		cr0 |= X86_CR0_EM;
>   	write_cr0(cr0);
>   
> -#ifdef CONFIG_IPIPE
> -	current->thread.fpu.active_state = &current->thread.fpu.state;
> -#endif
>   	/* Flush out any pending x87 state: */
>   #ifdef CONFIG_MATH_EMULATION
>   	if (!boot_cpu_has(X86_FEATURE_FPU))
> @@ -329,7 +326,7 @@ static void __init fpu__init_system_ctx_switch(void)
>   		eagerfpu = ENABLE;
>   
>   	if (IS_ENABLED(CONFIG_IPIPE))
> -		eagerfpu = DISABLE;
> +		eagerfpu = ENABLE;
>   
>   	if (eagerfpu == ENABLE)
>   		setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
> 

This last hunk will conflict with a merge of 4.9.109+ which completely removes 
the whole section. Shouldn't we just remove that I-pipe specific here?

Jan

-- 
Siemens AG, Corporate Technology, CT RDA IOT SES-DE
Corporate Competence Center Embedded Linux


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Xenomai] [IPIPE] [PATCHv2] x86: make fpu switching eager
  2018-09-25 11:20   ` Jan Kiszka
@ 2018-09-26  8:45     ` Henning Schild
  2018-09-26  8:56       ` Jan Kiszka
  0 siblings, 1 reply; 12+ messages in thread
From: Henning Schild @ 2018-09-26  8:45 UTC (permalink / raw)
  To: Jan Kiszka; +Cc: xenomai

Am Tue, 25 Sep 2018 13:20:57 +0200
schrieb Jan Kiszka <jan.kiszka@siemens.com>:

> On 14.09.18 18:14, Henning Schild wrote:
> > Linux 4.14 dropped support for lazy fpu switching and in the 4.4
> > and 4.9 series similar changes where backported.
> > So fpu is eager for those versions. That simplifies things a lot
> > and we can drop several changes from the IPIPE patch.
> > On the Xenomai side the only thing we still have to care about is
> > the kernel fpu state when we interrupt an in kernel fpu user. But
> > we do that explicit and can drop the indirection active_(fp)state.
> > 
> > This patch basically drops most of the fpu specifics from the ipipe
> > patch.
> > 
> > This patch applies on ipipe-4.9.y and it has to be followed by a
> > merge with  
> >> = 4.9.109. Cobalt will not compile if you are already eager and
> >> still  
> > 4.9.108
> > 
> > Signed-off-by: Henning Schild <henning.schild@siemens.com>
> > ---
> >   arch/x86/include/asm/fpu/internal.h | 30
> > ++++++++--------------------- arch/x86/include/asm/fpu/types.h    |
> > 12 ------------ arch/x86/kernel/fpu/core.c          | 17
> > ++++++---------- arch/x86/kernel/fpu/init.c          |  5 +----
> >   4 files changed, 15 insertions(+), 49 deletions(-)
> > 
> > diff --git a/arch/x86/include/asm/fpu/internal.h
> > b/arch/x86/include/asm/fpu/internal.h index
> > 35ca184e83e1..6bdceba90f17 100644 ---
> > a/arch/x86/include/asm/fpu/internal.h +++
> > b/arch/x86/include/asm/fpu/internal.h @@ -13,7 +13,6 @@
> >   #include <linux/compat.h>
> >   #include <linux/sched.h>
> >   #include <linux/slab.h>
> > -#include <linux/kconfig.h>
> >   #include <linux/ipipe.h>
> >   
> >   #include <asm/user.h>
> > @@ -190,24 +189,12 @@ static inline int copy_user_to_fregs(struct
> > fregs_state __user *fx) return user_insn(frstor %[fx], "=m" (*fx),
> > [fx] "m" (*fx)); }
> >   
> > -#ifdef CONFIG_IPIPE
> > -static inline union fpregs_state *active_fpstate(struct fpu *fpu)
> > -{
> > -	return fpu->active_state;
> > -}
> > -#else
> > -static inline union fpregs_state *active_fpstate(struct fpu *fpu)
> > -{
> > -	return &fpu->state;
> > -}
> > -#endif
> > -
> >   static inline void copy_fxregs_to_kernel(struct fpu *fpu)
> >   {
> >   	if (IS_ENABLED(CONFIG_X86_32))
> > -		asm volatile( "fxsave %[fx]" : [fx]
> > "=m" (active_fpstate(fpu)->fxsave));
> > +		asm volatile( "fxsave %[fx]" : [fx]
> > "=m" (fpu->state.fxsave)); else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
> > -		asm volatile("fxsaveq %[fx]" : [fx]
> > "=m" (active_fpstate(fpu)->fxsave));
> > +		asm volatile("fxsaveq %[fx]" : [fx]
> > "=m" (fpu->state.fxsave)); else {
> >   		/* Using "rex64; fxsave %0" is broken because, if
> > the memory
> >   		 * operand uses any extended registers for
> > addressing, a second @@ -231,8 +218,8 @@ static inline void
> > copy_fxregs_to_kernel(struct fpu *fpu)
> >   		 * registers.
> >   		 */
> >   		asm volatile( "rex64/fxsave (%[fx])"
> > -			      : "=m" (active_fpstate(fpu)->fxsave)
> > -			      : [fx]
> > "R" (&active_fpstate(fpu)->fxsave));
> > +                             : "=m" (fpu->state.fxsave)
> > +                             : [fx] "R" (&fpu->state.fxsave));
> >   	}
> >   }
> >   
> > @@ -441,7 +428,7 @@ static inline int copy_user_to_xregs(struct
> > xregs_state __user *buf, u64 mask) static inline int
> > copy_fpregs_to_fpstate(struct fpu *fpu) {
> >   	if (likely(use_xsave())) {
> > -		copy_xregs_to_kernel(&active_fpstate(fpu)->xsave);
> > +		copy_xregs_to_kernel(&fpu->state.xsave);
> >   		return 1;
> >   	}
> >   
> > @@ -454,7 +441,7 @@ static inline int copy_fpregs_to_fpstate(struct
> > fpu *fpu)
> >   	 * Legacy FPU register saving, FNSAVE always clears FPU
> > registers,
> >   	 * so we have to mark them inactive:
> >   	 */
> > -	asm volatile("fnsave %[fp]; fwait" : [fp]
> > "=m" (active_fpstate(fpu)->fsave));
> > +	asm volatile("fnsave %[fp]; fwait" : [fp]
> > "=m" (fpu->state.fsave)); 
> >   	return 0;
> >   }
> > @@ -609,8 +596,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct
> > fpu *new_fpu, int cpu)
> >   	 * If the task has used the math, pre-load the FPU on
> > xsave processors
> >   	 * or if the past 5 consecutive context-switches used
> > math. */
> > -	fpu.preload = !IS_ENABLED(CONFIG_IPIPE) &&
> > -		      static_cpu_has(X86_FEATURE_FPU) &&
> > +	fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
> >   		      new_fpu->fpstate_active &&
> >   		      (use_eager_fpu() || new_fpu->counter > 5);
> >   
> > @@ -660,7 +646,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct
> > fpu *new_fpu, int cpu) */
> >   static inline void switch_fpu_finish(struct fpu *new_fpu,
> > fpu_switch_t fpu_switch) {
> > -	if (!IS_ENABLED(CONFIG_IPIPE) && fpu_switch.preload)
> > +	if (fpu_switch.preload)
> >   		copy_kernel_to_fpregs(&new_fpu->state);
> >   }
> >   
> > diff --git a/arch/x86/include/asm/fpu/types.h
> > b/arch/x86/include/asm/fpu/types.h index 497c551469ec..48df486b02f9
> > 100644 --- a/arch/x86/include/asm/fpu/types.h
> > +++ b/arch/x86/include/asm/fpu/types.h
> > @@ -332,18 +332,6 @@ struct fpu {
> >   	 * deal with bursty apps that only use the FPU for a
> > short time: */
> >   	unsigned char			counter;
> > -
> > -#ifdef CONFIG_IPIPE
> > -	/*
> > -	 * @active_state
> > -	 *
> > -	 * An indirection pointer to reach the active state context
> > -	 * for the task.  This is used by co-kernels for dealing
> > with
> > -	 * preemption of kernel fpu contexts by their own tasks.
> > -	 */
> > -	union fpregs_state		*active_state;
> > -#endif
> > -	
> >   	/*
> >   	 * @state:
> >   	 *
> > diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
> > index d31a73b2ab76..7dd8518272ca 100644
> > --- a/arch/x86/kernel/fpu/core.c
> > +++ b/arch/x86/kernel/fpu/core.c
> > @@ -124,7 +124,7 @@ void __kernel_fpu_end(void)
> >   
> >   	flags = hard_cond_local_irq_save();
> >   	if (fpu->fpregs_active)
> > -		copy_kernel_to_fpregs(active_fpstate(fpu));
> > +		copy_kernel_to_fpregs(&fpu->state);
> >   	else
> >   		__fpregs_deactivate_hw();
> >   
> > @@ -192,7 +192,7 @@ void fpu__save(struct fpu *fpu)
> >   	if (fpu->fpregs_active) {
> >   		if (!copy_fpregs_to_fpstate(fpu)) {
> >   			if (use_eager_fpu())
> > -
> > copy_kernel_to_fpregs(active_fpstate(fpu));
> > +				copy_kernel_to_fpregs(&fpu->state);
> >   			else
> >   				fpregs_deactivate(fpu);
> >   		}
> > @@ -244,13 +244,8 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu
> > *src_fpu) dst_fpu->counter = 0;
> >   	dst_fpu->fpregs_active = 0;
> >   	dst_fpu->last_cpu = -1;
> > -#ifdef CONFIG_IPIPE
> > -	/* Must be set before FPU context is copied. */
> > -	dst_fpu->active_state = &dst_fpu->state;
> > -#endif
> >   
> > -	if (!IS_ENABLED(CONFIG_IPIPE) &&
> > -	    (!src_fpu->fpstate_active
> > || !static_cpu_has(X86_FEATURE_FPU)))
> > +	if (!src_fpu->fpstate_active
> > || !static_cpu_has(X86_FEATURE_FPU)) return 0;
> >   
> >   	WARN_ON_FPU(src_fpu != &current->thread.fpu);
> > @@ -283,7 +278,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu
> > *src_fpu) fpu_kernel_xstate_size);
> >   
> >   		if (use_eager_fpu())
> > -
> > copy_kernel_to_fpregs(active_fpstate(src_fpu));
> > +			copy_kernel_to_fpregs(&src_fpu->state);
> >   		else
> >   			fpregs_deactivate(src_fpu);
> >   	}
> > @@ -430,7 +425,7 @@ void fpu__current_fpstate_write_end(void)
> >   	 * an XRSTOR if they are active.
> >   	 */
> >   	if (fpregs_active())
> > -		copy_kernel_to_fpregs(active_fpstate(fpu));
> > +		copy_kernel_to_fpregs(&fpu->state);
> >   
> >   	/*
> >   	 * Our update is done and the fpregs/fpstate are in sync
> > @@ -460,7 +455,7 @@ void fpu__restore(struct fpu *fpu)
> >   	kernel_fpu_disable();
> >   	trace_x86_fpu_before_restore(fpu);
> >   	fpregs_activate(fpu);
> > -	copy_kernel_to_fpregs(active_fpstate(fpu));
> > +	copy_kernel_to_fpregs(&fpu->state);
> >   	fpu->counter++;
> >   	trace_x86_fpu_after_restore(fpu);
> >   	kernel_fpu_enable();
> > diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
> > index 3eee35d3d6f8..a1fc061d03e1 100644
> > --- a/arch/x86/kernel/fpu/init.c
> > +++ b/arch/x86/kernel/fpu/init.c
> > @@ -42,9 +42,6 @@ static void fpu__init_cpu_generic(void)
> >   		cr0 |= X86_CR0_EM;
> >   	write_cr0(cr0);
> >   
> > -#ifdef CONFIG_IPIPE
> > -	current->thread.fpu.active_state =
> > &current->thread.fpu.state; -#endif
> >   	/* Flush out any pending x87 state: */
> >   #ifdef CONFIG_MATH_EMULATION
> >   	if (!boot_cpu_has(X86_FEATURE_FPU))
> > @@ -329,7 +326,7 @@ static void __init
> > fpu__init_system_ctx_switch(void) eagerfpu = ENABLE;
> >   
> >   	if (IS_ENABLED(CONFIG_IPIPE))
> > -		eagerfpu = DISABLE;
> > +		eagerfpu = ENABLE;
> >   
> >   	if (eagerfpu == ENABLE)
> >   		setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
> >   
> 
> This last hunk will conflict with a merge of 4.9.109+ which
> completely removes the whole section. Shouldn't we just remove that
> I-pipe specific here?

I would suggest to remove it when doing the actual merge. It is in this
patch because it allowed me to test right after that commit, with a 
Xenomai that did not match the 108+.

Henning


> Jan
> 



^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Xenomai] [IPIPE] [PATCHv2] x86: make fpu switching eager
  2018-09-26  8:45     ` Henning Schild
@ 2018-09-26  8:56       ` Jan Kiszka
  0 siblings, 0 replies; 12+ messages in thread
From: Jan Kiszka @ 2018-09-26  8:56 UTC (permalink / raw)
  To: Henning Schild; +Cc: xenomai

On 26.09.18 10:45, Henning Schild wrote:
> Am Tue, 25 Sep 2018 13:20:57 +0200
> schrieb Jan Kiszka <jan.kiszka@siemens.com>:
> 
>> On 14.09.18 18:14, Henning Schild wrote:
>>> Linux 4.14 dropped support for lazy fpu switching and in the 4.4
>>> and 4.9 series similar changes where backported.
>>> So fpu is eager for those versions. That simplifies things a lot
>>> and we can drop several changes from the IPIPE patch.
>>> On the Xenomai side the only thing we still have to care about is
>>> the kernel fpu state when we interrupt an in kernel fpu user. But
>>> we do that explicit and can drop the indirection active_(fp)state.
>>>
>>> This patch basically drops most of the fpu specifics from the ipipe
>>> patch.
>>>
>>> This patch applies on ipipe-4.9.y and it has to be followed by a
>>> merge with
>>>> = 4.9.109. Cobalt will not compile if you are already eager and
>>>> still
>>> 4.9.108
>>>
>>> Signed-off-by: Henning Schild <henning.schild@siemens.com>
>>> ---
>>>    arch/x86/include/asm/fpu/internal.h | 30
>>> ++++++++--------------------- arch/x86/include/asm/fpu/types.h    |
>>> 12 ------------ arch/x86/kernel/fpu/core.c          | 17
>>> ++++++---------- arch/x86/kernel/fpu/init.c          |  5 +----
>>>    4 files changed, 15 insertions(+), 49 deletions(-)
>>>
>>> diff --git a/arch/x86/include/asm/fpu/internal.h
>>> b/arch/x86/include/asm/fpu/internal.h index
>>> 35ca184e83e1..6bdceba90f17 100644 ---
>>> a/arch/x86/include/asm/fpu/internal.h +++
>>> b/arch/x86/include/asm/fpu/internal.h @@ -13,7 +13,6 @@
>>>    #include <linux/compat.h>
>>>    #include <linux/sched.h>
>>>    #include <linux/slab.h>
>>> -#include <linux/kconfig.h>
>>>    #include <linux/ipipe.h>
>>>    
>>>    #include <asm/user.h>
>>> @@ -190,24 +189,12 @@ static inline int copy_user_to_fregs(struct
>>> fregs_state __user *fx) return user_insn(frstor %[fx], "=m" (*fx),
>>> [fx] "m" (*fx)); }
>>>    
>>> -#ifdef CONFIG_IPIPE
>>> -static inline union fpregs_state *active_fpstate(struct fpu *fpu)
>>> -{
>>> -	return fpu->active_state;
>>> -}
>>> -#else
>>> -static inline union fpregs_state *active_fpstate(struct fpu *fpu)
>>> -{
>>> -	return &fpu->state;
>>> -}
>>> -#endif
>>> -
>>>    static inline void copy_fxregs_to_kernel(struct fpu *fpu)
>>>    {
>>>    	if (IS_ENABLED(CONFIG_X86_32))
>>> -		asm volatile( "fxsave %[fx]" : [fx]
>>> "=m" (active_fpstate(fpu)->fxsave));
>>> +		asm volatile( "fxsave %[fx]" : [fx]
>>> "=m" (fpu->state.fxsave)); else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
>>> -		asm volatile("fxsaveq %[fx]" : [fx]
>>> "=m" (active_fpstate(fpu)->fxsave));
>>> +		asm volatile("fxsaveq %[fx]" : [fx]
>>> "=m" (fpu->state.fxsave)); else {
>>>    		/* Using "rex64; fxsave %0" is broken because, if
>>> the memory
>>>    		 * operand uses any extended registers for
>>> addressing, a second @@ -231,8 +218,8 @@ static inline void
>>> copy_fxregs_to_kernel(struct fpu *fpu)
>>>    		 * registers.
>>>    		 */
>>>    		asm volatile( "rex64/fxsave (%[fx])"
>>> -			      : "=m" (active_fpstate(fpu)->fxsave)
>>> -			      : [fx]
>>> "R" (&active_fpstate(fpu)->fxsave));
>>> +                             : "=m" (fpu->state.fxsave)
>>> +                             : [fx] "R" (&fpu->state.fxsave));
>>>    	}
>>>    }
>>>    
>>> @@ -441,7 +428,7 @@ static inline int copy_user_to_xregs(struct
>>> xregs_state __user *buf, u64 mask) static inline int
>>> copy_fpregs_to_fpstate(struct fpu *fpu) {
>>>    	if (likely(use_xsave())) {
>>> -		copy_xregs_to_kernel(&active_fpstate(fpu)->xsave);
>>> +		copy_xregs_to_kernel(&fpu->state.xsave);
>>>    		return 1;
>>>    	}
>>>    
>>> @@ -454,7 +441,7 @@ static inline int copy_fpregs_to_fpstate(struct
>>> fpu *fpu)
>>>    	 * Legacy FPU register saving, FNSAVE always clears FPU
>>> registers,
>>>    	 * so we have to mark them inactive:
>>>    	 */
>>> -	asm volatile("fnsave %[fp]; fwait" : [fp]
>>> "=m" (active_fpstate(fpu)->fsave));
>>> +	asm volatile("fnsave %[fp]; fwait" : [fp]
>>> "=m" (fpu->state.fsave));
>>>    	return 0;
>>>    }
>>> @@ -609,8 +596,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct
>>> fpu *new_fpu, int cpu)
>>>    	 * If the task has used the math, pre-load the FPU on
>>> xsave processors
>>>    	 * or if the past 5 consecutive context-switches used
>>> math. */
>>> -	fpu.preload = !IS_ENABLED(CONFIG_IPIPE) &&
>>> -		      static_cpu_has(X86_FEATURE_FPU) &&
>>> +	fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
>>>    		      new_fpu->fpstate_active &&
>>>    		      (use_eager_fpu() || new_fpu->counter > 5);
>>>    
>>> @@ -660,7 +646,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct
>>> fpu *new_fpu, int cpu) */
>>>    static inline void switch_fpu_finish(struct fpu *new_fpu,
>>> fpu_switch_t fpu_switch) {
>>> -	if (!IS_ENABLED(CONFIG_IPIPE) && fpu_switch.preload)
>>> +	if (fpu_switch.preload)
>>>    		copy_kernel_to_fpregs(&new_fpu->state);
>>>    }
>>>    
>>> diff --git a/arch/x86/include/asm/fpu/types.h
>>> b/arch/x86/include/asm/fpu/types.h index 497c551469ec..48df486b02f9
>>> 100644 --- a/arch/x86/include/asm/fpu/types.h
>>> +++ b/arch/x86/include/asm/fpu/types.h
>>> @@ -332,18 +332,6 @@ struct fpu {
>>>    	 * deal with bursty apps that only use the FPU for a
>>> short time: */
>>>    	unsigned char			counter;
>>> -
>>> -#ifdef CONFIG_IPIPE
>>> -	/*
>>> -	 * @active_state
>>> -	 *
>>> -	 * An indirection pointer to reach the active state context
>>> -	 * for the task.  This is used by co-kernels for dealing
>>> with
>>> -	 * preemption of kernel fpu contexts by their own tasks.
>>> -	 */
>>> -	union fpregs_state		*active_state;
>>> -#endif
>>> -	
>>>    	/*
>>>    	 * @state:
>>>    	 *
>>> diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
>>> index d31a73b2ab76..7dd8518272ca 100644
>>> --- a/arch/x86/kernel/fpu/core.c
>>> +++ b/arch/x86/kernel/fpu/core.c
>>> @@ -124,7 +124,7 @@ void __kernel_fpu_end(void)
>>>    
>>>    	flags = hard_cond_local_irq_save();
>>>    	if (fpu->fpregs_active)
>>> -		copy_kernel_to_fpregs(active_fpstate(fpu));
>>> +		copy_kernel_to_fpregs(&fpu->state);
>>>    	else
>>>    		__fpregs_deactivate_hw();
>>>    
>>> @@ -192,7 +192,7 @@ void fpu__save(struct fpu *fpu)
>>>    	if (fpu->fpregs_active) {
>>>    		if (!copy_fpregs_to_fpstate(fpu)) {
>>>    			if (use_eager_fpu())
>>> -
>>> copy_kernel_to_fpregs(active_fpstate(fpu));
>>> +				copy_kernel_to_fpregs(&fpu->state);
>>>    			else
>>>    				fpregs_deactivate(fpu);
>>>    		}
>>> @@ -244,13 +244,8 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu
>>> *src_fpu) dst_fpu->counter = 0;
>>>    	dst_fpu->fpregs_active = 0;
>>>    	dst_fpu->last_cpu = -1;
>>> -#ifdef CONFIG_IPIPE
>>> -	/* Must be set before FPU context is copied. */
>>> -	dst_fpu->active_state = &dst_fpu->state;
>>> -#endif
>>>    
>>> -	if (!IS_ENABLED(CONFIG_IPIPE) &&
>>> -	    (!src_fpu->fpstate_active
>>> || !static_cpu_has(X86_FEATURE_FPU)))
>>> +	if (!src_fpu->fpstate_active
>>> || !static_cpu_has(X86_FEATURE_FPU)) return 0;
>>>    
>>>    	WARN_ON_FPU(src_fpu != &current->thread.fpu);
>>> @@ -283,7 +278,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu
>>> *src_fpu) fpu_kernel_xstate_size);
>>>    
>>>    		if (use_eager_fpu())
>>> -
>>> copy_kernel_to_fpregs(active_fpstate(src_fpu));
>>> +			copy_kernel_to_fpregs(&src_fpu->state);
>>>    		else
>>>    			fpregs_deactivate(src_fpu);
>>>    	}
>>> @@ -430,7 +425,7 @@ void fpu__current_fpstate_write_end(void)
>>>    	 * an XRSTOR if they are active.
>>>    	 */
>>>    	if (fpregs_active())
>>> -		copy_kernel_to_fpregs(active_fpstate(fpu));
>>> +		copy_kernel_to_fpregs(&fpu->state);
>>>    
>>>    	/*
>>>    	 * Our update is done and the fpregs/fpstate are in sync
>>> @@ -460,7 +455,7 @@ void fpu__restore(struct fpu *fpu)
>>>    	kernel_fpu_disable();
>>>    	trace_x86_fpu_before_restore(fpu);
>>>    	fpregs_activate(fpu);
>>> -	copy_kernel_to_fpregs(active_fpstate(fpu));
>>> +	copy_kernel_to_fpregs(&fpu->state);
>>>    	fpu->counter++;
>>>    	trace_x86_fpu_after_restore(fpu);
>>>    	kernel_fpu_enable();
>>> diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
>>> index 3eee35d3d6f8..a1fc061d03e1 100644
>>> --- a/arch/x86/kernel/fpu/init.c
>>> +++ b/arch/x86/kernel/fpu/init.c
>>> @@ -42,9 +42,6 @@ static void fpu__init_cpu_generic(void)
>>>    		cr0 |= X86_CR0_EM;
>>>    	write_cr0(cr0);
>>>    
>>> -#ifdef CONFIG_IPIPE
>>> -	current->thread.fpu.active_state =
>>> &current->thread.fpu.state; -#endif
>>>    	/* Flush out any pending x87 state: */
>>>    #ifdef CONFIG_MATH_EMULATION
>>>    	if (!boot_cpu_has(X86_FEATURE_FPU))
>>> @@ -329,7 +326,7 @@ static void __init
>>> fpu__init_system_ctx_switch(void) eagerfpu = ENABLE;
>>>    
>>>    	if (IS_ENABLED(CONFIG_IPIPE))
>>> -		eagerfpu = DISABLE;
>>> +		eagerfpu = ENABLE;
>>>    
>>>    	if (eagerfpu == ENABLE)
>>>    		setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
>>>    
>>
>> This last hunk will conflict with a merge of 4.9.109+ which
>> completely removes the whole section. Shouldn't we just remove that
>> I-pipe specific here?
> 
> I would suggest to remove it when doing the actual merge. It is in this
> patch because it allowed me to test right after that commit, with a
> Xenomai that did not match the 108+.

Yeah, that's what I done, see 
https://gitlab.denx.de/Xenomai/ipipe-x86/commits/for-upstream/4.9

Jan

-- 
Siemens AG, Corporate Technology, CT RDA IOT SES-DE
Corporate Competence Center Embedded Linux


^ permalink raw reply	[flat|nested] 12+ messages in thread

* Re: [Xenomai] [IPIPE] [PATCHv2] x86: make fpu switching eager
  2018-09-14 15:10 ` [Xenomai] [IPIPE] [PATCHv2] x86: make fpu switching eager Henning Schild
@ 2018-09-14 15:13   ` Henning Schild
  0 siblings, 0 replies; 12+ messages in thread
From: Henning Schild @ 2018-09-14 15:13 UTC (permalink / raw)
  To: xenomai

Change to v1:
  removed whitespace changes and changed commit message

Am Fri, 14 Sep 2018 17:10:15 +0200
schrieb Henning Schild <henning.schild@siemens.com>:

> Linux 4.14 dropped support for lazy fpu switching and in the 4.4 and
> 4.9 series similar changes where backported.
> So fpu is eager for those versions. That simplifies things a lot and
> we can drop several changes from the IPIPE patch.
> On the Xenomai side the only thing we still have to care about is the
> kernel fpu state when we interrupt an in kernel fpu user. But we do
> that explicit and can drop the indirection active_(fp)state.
> 
> This patch basically drops most of the fpu specifics from the ipipe
> patch.
> 
> This patch applies on ipipe-4.9.y and it has to be followed by a
> merge with
> >= 4.9.109. Cobalt will not compile if you are already eager and
> >still  
> 4.9.108
> 
> Signed-off-by: Henning Schild <henning.schild@siemens.com>
> ---
>  arch/x86/include/asm/fpu/internal.h | 30
> ++++++++--------------------- arch/x86/include/asm/fpu/types.h    |
> 12 ------------ arch/x86/kernel/fpu/core.c          | 17
> ++++++---------- arch/x86/kernel/fpu/init.c          |  5 +----
>  4 files changed, 15 insertions(+), 49 deletions(-)
> 
> diff --git a/arch/x86/include/asm/fpu/internal.h
> b/arch/x86/include/asm/fpu/internal.h index
> 35ca184e83e1..6bdceba90f17 100644 ---
> a/arch/x86/include/asm/fpu/internal.h +++
> b/arch/x86/include/asm/fpu/internal.h @@ -13,7 +13,6 @@
>  #include <linux/compat.h>
>  #include <linux/sched.h>
>  #include <linux/slab.h>
> -#include <linux/kconfig.h>
>  #include <linux/ipipe.h>
>  
>  #include <asm/user.h>
> @@ -190,24 +189,12 @@ static inline int copy_user_to_fregs(struct
> fregs_state __user *fx) return user_insn(frstor %[fx], "=m" (*fx),
> [fx] "m" (*fx)); }
>  
> -#ifdef CONFIG_IPIPE
> -static inline union fpregs_state *active_fpstate(struct fpu *fpu)
> -{
> -	return fpu->active_state;
> -}
> -#else
> -static inline union fpregs_state *active_fpstate(struct fpu *fpu)
> -{
> -	return &fpu->state;
> -}
> -#endif
> -
>  static inline void copy_fxregs_to_kernel(struct fpu *fpu)
>  {
>  	if (IS_ENABLED(CONFIG_X86_32))
> -		asm volatile( "fxsave %[fx]" : [fx]
> "=m" (active_fpstate(fpu)->fxsave));
> +		asm volatile( "fxsave %[fx]" : [fx]
> "=m" (fpu->state.fxsave)); else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
> -		asm volatile("fxsaveq %[fx]" : [fx]
> "=m" (active_fpstate(fpu)->fxsave));
> +		asm volatile("fxsaveq %[fx]" : [fx]
> "=m" (fpu->state.fxsave)); else {
>  		/* Using "rex64; fxsave %0" is broken because, if
> the memory
>  		 * operand uses any extended registers for
> addressing, a second @@ -231,8 +218,8 @@ static inline void
> copy_fxregs_to_kernel(struct fpu *fpu)
>  		 * registers.
>  		 */
>  		asm volatile( "rex64/fxsave (%[fx])"
> -			      : "=m" (active_fpstate(fpu)->fxsave)
> -			      : [fx]
> "R" (&active_fpstate(fpu)->fxsave));
> +                             : "=m" (fpu->state.fxsave)
> +                             : [fx] "R" (&fpu->state.fxsave));
>  	}
>  }
>  
> @@ -441,7 +428,7 @@ static inline int copy_user_to_xregs(struct
> xregs_state __user *buf, u64 mask) static inline int
> copy_fpregs_to_fpstate(struct fpu *fpu) {
>  	if (likely(use_xsave())) {
> -		copy_xregs_to_kernel(&active_fpstate(fpu)->xsave);
> +		copy_xregs_to_kernel(&fpu->state.xsave);
>  		return 1;
>  	}
>  
> @@ -454,7 +441,7 @@ static inline int copy_fpregs_to_fpstate(struct
> fpu *fpu)
>  	 * Legacy FPU register saving, FNSAVE always clears FPU
> registers,
>  	 * so we have to mark them inactive:
>  	 */
> -	asm volatile("fnsave %[fp]; fwait" : [fp]
> "=m" (active_fpstate(fpu)->fsave));
> +	asm volatile("fnsave %[fp]; fwait" : [fp]
> "=m" (fpu->state.fsave)); 
>  	return 0;
>  }
> @@ -609,8 +596,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct
> fpu *new_fpu, int cpu)
>  	 * If the task has used the math, pre-load the FPU on xsave
> processors
>  	 * or if the past 5 consecutive context-switches used math.
>  	 */
> -	fpu.preload = !IS_ENABLED(CONFIG_IPIPE) &&
> -		      static_cpu_has(X86_FEATURE_FPU) &&
> +	fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
>  		      new_fpu->fpstate_active &&
>  		      (use_eager_fpu() || new_fpu->counter > 5);
>  
> @@ -660,7 +646,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct
> fpu *new_fpu, int cpu) */
>  static inline void switch_fpu_finish(struct fpu *new_fpu,
> fpu_switch_t fpu_switch) {
> -	if (!IS_ENABLED(CONFIG_IPIPE) && fpu_switch.preload)
> +	if (fpu_switch.preload)
>  		copy_kernel_to_fpregs(&new_fpu->state);
>  }
>  
> diff --git a/arch/x86/include/asm/fpu/types.h
> b/arch/x86/include/asm/fpu/types.h index 497c551469ec..48df486b02f9
> 100644 --- a/arch/x86/include/asm/fpu/types.h
> +++ b/arch/x86/include/asm/fpu/types.h
> @@ -332,18 +332,6 @@ struct fpu {
>  	 * deal with bursty apps that only use the FPU for a short
> time: */
>  	unsigned char			counter;
> -
> -#ifdef CONFIG_IPIPE
> -	/*
> -	 * @active_state
> -	 *
> -	 * An indirection pointer to reach the active state context
> -	 * for the task.  This is used by co-kernels for dealing with
> -	 * preemption of kernel fpu contexts by their own tasks.
> -	 */
> -	union fpregs_state		*active_state;
> -#endif
> -	
>  	/*
>  	 * @state:
>  	 *
> diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
> index d31a73b2ab76..7dd8518272ca 100644
> --- a/arch/x86/kernel/fpu/core.c
> +++ b/arch/x86/kernel/fpu/core.c
> @@ -124,7 +124,7 @@ void __kernel_fpu_end(void)
>  
>  	flags = hard_cond_local_irq_save();
>  	if (fpu->fpregs_active)
> -		copy_kernel_to_fpregs(active_fpstate(fpu));
> +		copy_kernel_to_fpregs(&fpu->state);
>  	else
>  		__fpregs_deactivate_hw();
>  
> @@ -192,7 +192,7 @@ void fpu__save(struct fpu *fpu)
>  	if (fpu->fpregs_active) {
>  		if (!copy_fpregs_to_fpstate(fpu)) {
>  			if (use_eager_fpu())
> -
> copy_kernel_to_fpregs(active_fpstate(fpu));
> +				copy_kernel_to_fpregs(&fpu->state);
>  			else
>  				fpregs_deactivate(fpu);
>  		}
> @@ -244,13 +244,8 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu
> *src_fpu) dst_fpu->counter = 0;
>  	dst_fpu->fpregs_active = 0;
>  	dst_fpu->last_cpu = -1;
> -#ifdef CONFIG_IPIPE
> -	/* Must be set before FPU context is copied. */
> -	dst_fpu->active_state = &dst_fpu->state;
> -#endif
>  
> -	if (!IS_ENABLED(CONFIG_IPIPE) &&
> -	    (!src_fpu->fpstate_active
> || !static_cpu_has(X86_FEATURE_FPU)))
> +	if (!src_fpu->fpstate_active
> || !static_cpu_has(X86_FEATURE_FPU)) return 0;
>  
>  	WARN_ON_FPU(src_fpu != &current->thread.fpu);
> @@ -283,7 +278,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu
> *src_fpu) fpu_kernel_xstate_size);
>  
>  		if (use_eager_fpu())
> -
> copy_kernel_to_fpregs(active_fpstate(src_fpu));
> +			copy_kernel_to_fpregs(&src_fpu->state);
>  		else
>  			fpregs_deactivate(src_fpu);
>  	}
> @@ -430,7 +425,7 @@ void fpu__current_fpstate_write_end(void)
>  	 * an XRSTOR if they are active.
>  	 */
>  	if (fpregs_active())
> -		copy_kernel_to_fpregs(active_fpstate(fpu));
> +		copy_kernel_to_fpregs(&fpu->state);
>  
>  	/*
>  	 * Our update is done and the fpregs/fpstate are in sync
> @@ -460,7 +455,7 @@ void fpu__restore(struct fpu *fpu)
>  	kernel_fpu_disable();
>  	trace_x86_fpu_before_restore(fpu);
>  	fpregs_activate(fpu);
> -	copy_kernel_to_fpregs(active_fpstate(fpu));
> +	copy_kernel_to_fpregs(&fpu->state);
>  	fpu->counter++;
>  	trace_x86_fpu_after_restore(fpu);
>  	kernel_fpu_enable();
> diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
> index 3eee35d3d6f8..a1fc061d03e1 100644
> --- a/arch/x86/kernel/fpu/init.c
> +++ b/arch/x86/kernel/fpu/init.c
> @@ -42,9 +42,6 @@ static void fpu__init_cpu_generic(void)
>  		cr0 |= X86_CR0_EM;
>  	write_cr0(cr0);
>  
> -#ifdef CONFIG_IPIPE
> -	current->thread.fpu.active_state =
> &current->thread.fpu.state; -#endif
>  	/* Flush out any pending x87 state: */
>  #ifdef CONFIG_MATH_EMULATION
>  	if (!boot_cpu_has(X86_FEATURE_FPU))
> @@ -329,7 +326,7 @@ static void __init
> fpu__init_system_ctx_switch(void) eagerfpu = ENABLE;
>  
>  	if (IS_ENABLED(CONFIG_IPIPE))
> -		eagerfpu = DISABLE;
> +		eagerfpu = ENABLE;
>  
>  	if (eagerfpu == ENABLE)
>  		setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);



-- 
Siemens AG
Corporate Technology
CT RDA IOT SES-DE
Otto-Hahn-Ring 6
81739 Muenchen, Germany
Mobile: +49 172 8378927
mailto: henning.schild@siemens.com


^ permalink raw reply	[flat|nested] 12+ messages in thread

* [Xenomai] [IPIPE] [PATCHv2] x86: make fpu switching eager
  2018-09-14 15:10 [Xenomai] [PATCHv2 1/4] cobalt/x86: add support for eager fpu handling Henning Schild
@ 2018-09-14 15:10 ` Henning Schild
  2018-09-14 15:13   ` Henning Schild
  0 siblings, 1 reply; 12+ messages in thread
From: Henning Schild @ 2018-09-14 15:10 UTC (permalink / raw)
  To: xenomai

Linux 4.14 dropped support for lazy fpu switching and in the 4.4 and 4.9
series similar changes where backported.
So fpu is eager for those versions. That simplifies things a lot and we can
drop several changes from the IPIPE patch.
On the Xenomai side the only thing we still have to care about is the
kernel fpu state when we interrupt an in kernel fpu user. But we do that
explicit and can drop the indirection active_(fp)state.

This patch basically drops most of the fpu specifics from the ipipe
patch.

This patch applies on ipipe-4.9.y and it has to be followed by a merge with
>= 4.9.109. Cobalt will not compile if you are already eager and still
4.9.108

Signed-off-by: Henning Schild <henning.schild@siemens.com>
---
 arch/x86/include/asm/fpu/internal.h | 30 ++++++++---------------------
 arch/x86/include/asm/fpu/types.h    | 12 ------------
 arch/x86/kernel/fpu/core.c          | 17 ++++++----------
 arch/x86/kernel/fpu/init.c          |  5 +----
 4 files changed, 15 insertions(+), 49 deletions(-)

diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 35ca184e83e1..6bdceba90f17 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -13,7 +13,6 @@
 #include <linux/compat.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
-#include <linux/kconfig.h>
 #include <linux/ipipe.h>
 
 #include <asm/user.h>
@@ -190,24 +189,12 @@ static inline int copy_user_to_fregs(struct fregs_state __user *fx)
 	return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
 }
 
-#ifdef CONFIG_IPIPE
-static inline union fpregs_state *active_fpstate(struct fpu *fpu)
-{
-	return fpu->active_state;
-}
-#else
-static inline union fpregs_state *active_fpstate(struct fpu *fpu)
-{
-	return &fpu->state;
-}
-#endif
-
 static inline void copy_fxregs_to_kernel(struct fpu *fpu)
 {
 	if (IS_ENABLED(CONFIG_X86_32))
-		asm volatile( "fxsave %[fx]" : [fx] "=m" (active_fpstate(fpu)->fxsave));
+		asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
 	else if (IS_ENABLED(CONFIG_AS_FXSAVEQ))
-		asm volatile("fxsaveq %[fx]" : [fx] "=m" (active_fpstate(fpu)->fxsave));
+		asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
 	else {
 		/* Using "rex64; fxsave %0" is broken because, if the memory
 		 * operand uses any extended registers for addressing, a second
@@ -231,8 +218,8 @@ static inline void copy_fxregs_to_kernel(struct fpu *fpu)
 		 * registers.
 		 */
 		asm volatile( "rex64/fxsave (%[fx])"
-			      : "=m" (active_fpstate(fpu)->fxsave)
-			      : [fx] "R" (&active_fpstate(fpu)->fxsave));
+                             : "=m" (fpu->state.fxsave)
+                             : [fx] "R" (&fpu->state.fxsave));
 	}
 }
 
@@ -441,7 +428,7 @@ static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
 static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
 {
 	if (likely(use_xsave())) {
-		copy_xregs_to_kernel(&active_fpstate(fpu)->xsave);
+		copy_xregs_to_kernel(&fpu->state.xsave);
 		return 1;
 	}
 
@@ -454,7 +441,7 @@ static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
 	 * Legacy FPU register saving, FNSAVE always clears FPU registers,
 	 * so we have to mark them inactive:
 	 */
-	asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (active_fpstate(fpu)->fsave));
+	asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
 
 	return 0;
 }
@@ -609,8 +596,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
 	 * If the task has used the math, pre-load the FPU on xsave processors
 	 * or if the past 5 consecutive context-switches used math.
 	 */
-	fpu.preload = !IS_ENABLED(CONFIG_IPIPE) &&
-		      static_cpu_has(X86_FEATURE_FPU) &&
+	fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
 		      new_fpu->fpstate_active &&
 		      (use_eager_fpu() || new_fpu->counter > 5);
 
@@ -660,7 +646,7 @@ switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
  */
 static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
 {
-	if (!IS_ENABLED(CONFIG_IPIPE) && fpu_switch.preload)
+	if (fpu_switch.preload)
 		copy_kernel_to_fpregs(&new_fpu->state);
 }
 
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 497c551469ec..48df486b02f9 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -332,18 +332,6 @@ struct fpu {
 	 * deal with bursty apps that only use the FPU for a short time:
 	 */
 	unsigned char			counter;
-
-#ifdef CONFIG_IPIPE
-	/*
-	 * @active_state
-	 *
-	 * An indirection pointer to reach the active state context
-	 * for the task.  This is used by co-kernels for dealing with
-	 * preemption of kernel fpu contexts by their own tasks.
-	 */
-	union fpregs_state		*active_state;
-#endif
-	
 	/*
 	 * @state:
 	 *
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index d31a73b2ab76..7dd8518272ca 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -124,7 +124,7 @@ void __kernel_fpu_end(void)
 
 	flags = hard_cond_local_irq_save();
 	if (fpu->fpregs_active)
-		copy_kernel_to_fpregs(active_fpstate(fpu));
+		copy_kernel_to_fpregs(&fpu->state);
 	else
 		__fpregs_deactivate_hw();
 
@@ -192,7 +192,7 @@ void fpu__save(struct fpu *fpu)
 	if (fpu->fpregs_active) {
 		if (!copy_fpregs_to_fpstate(fpu)) {
 			if (use_eager_fpu())
-				copy_kernel_to_fpregs(active_fpstate(fpu));
+				copy_kernel_to_fpregs(&fpu->state);
 			else
 				fpregs_deactivate(fpu);
 		}
@@ -244,13 +244,8 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 	dst_fpu->counter = 0;
 	dst_fpu->fpregs_active = 0;
 	dst_fpu->last_cpu = -1;
-#ifdef CONFIG_IPIPE
-	/* Must be set before FPU context is copied. */
-	dst_fpu->active_state = &dst_fpu->state;
-#endif
 
-	if (!IS_ENABLED(CONFIG_IPIPE) &&
-	    (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU)))
+	if (!src_fpu->fpstate_active || !static_cpu_has(X86_FEATURE_FPU))
 		return 0;
 
 	WARN_ON_FPU(src_fpu != &current->thread.fpu);
@@ -283,7 +278,7 @@ int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
 		       fpu_kernel_xstate_size);
 
 		if (use_eager_fpu())
-			copy_kernel_to_fpregs(active_fpstate(src_fpu));
+			copy_kernel_to_fpregs(&src_fpu->state);
 		else
 			fpregs_deactivate(src_fpu);
 	}
@@ -430,7 +425,7 @@ void fpu__current_fpstate_write_end(void)
 	 * an XRSTOR if they are active.
 	 */
 	if (fpregs_active())
-		copy_kernel_to_fpregs(active_fpstate(fpu));
+		copy_kernel_to_fpregs(&fpu->state);
 
 	/*
 	 * Our update is done and the fpregs/fpstate are in sync
@@ -460,7 +455,7 @@ void fpu__restore(struct fpu *fpu)
 	kernel_fpu_disable();
 	trace_x86_fpu_before_restore(fpu);
 	fpregs_activate(fpu);
-	copy_kernel_to_fpregs(active_fpstate(fpu));
+	copy_kernel_to_fpregs(&fpu->state);
 	fpu->counter++;
 	trace_x86_fpu_after_restore(fpu);
 	kernel_fpu_enable();
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 3eee35d3d6f8..a1fc061d03e1 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -42,9 +42,6 @@ static void fpu__init_cpu_generic(void)
 		cr0 |= X86_CR0_EM;
 	write_cr0(cr0);
 
-#ifdef CONFIG_IPIPE
-	current->thread.fpu.active_state = &current->thread.fpu.state;
-#endif
 	/* Flush out any pending x87 state: */
 #ifdef CONFIG_MATH_EMULATION
 	if (!boot_cpu_has(X86_FEATURE_FPU))
@@ -329,7 +326,7 @@ static void __init fpu__init_system_ctx_switch(void)
 		eagerfpu = ENABLE;
 
 	if (IS_ENABLED(CONFIG_IPIPE))
-		eagerfpu = DISABLE;
+		eagerfpu = ENABLE;
 
 	if (eagerfpu == ENABLE)
 		setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
-- 
2.19.0



^ permalink raw reply related	[flat|nested] 12+ messages in thread

end of thread, other threads:[~2018-09-26  8:56 UTC | newest]

Thread overview: 12+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-09-14 16:14 [Xenomai] [PATCHv2 1/4] cobalt/x86: add support for eager fpu handling Henning Schild
2018-09-14 16:14 ` [Xenomai] [PATCH] gitignore: add build output for in-tree builds Henning Schild
2018-09-14 16:16   ` Henning Schild
2018-09-14 16:14 ` [Xenomai] [IPIPE] [PATCHv2] x86: make fpu switching eager Henning Schild
2018-09-25 11:20   ` Jan Kiszka
2018-09-26  8:45     ` Henning Schild
2018-09-26  8:56       ` Jan Kiszka
2018-09-14 16:14 ` [Xenomai] [PATCHv2 2/4] cobalt/x86: add ipipe-4.4 eager fpu support Henning Schild
2018-09-14 16:14 ` [Xenomai] [PATCHv2 3/4] cobalt: fixup for kernel 4.14+ Henning Schild
2018-09-14 16:14 ` [Xenomai] [PATCHv2 4/4] cobalt/x86: add ipipe-4.14 eager fpu support Henning Schild
  -- strict thread matches above, loose matches on Subject: below --
2018-09-14 15:10 [Xenomai] [PATCHv2 1/4] cobalt/x86: add support for eager fpu handling Henning Schild
2018-09-14 15:10 ` [Xenomai] [IPIPE] [PATCHv2] x86: make fpu switching eager Henning Schild
2018-09-14 15:13   ` Henning Schild

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.