All of lore.kernel.org
 help / color / mirror / Atom feed
From: Palmer Dabbelt <palmer@dabbelt.com>
To: peterz@infradead.org
To: mingo@redhat.com
To: mcgrof@kernel.org
To: viro@zeniv.linux.org.uk
To: sfr@canb.auug.org.au
To: nicolas.dichtel@6wind.com
To: rmk+kernel@armlinux.org.uk
To: msalter@redhat.com
To: tklauser@distanz.ch
To: will.deacon@arm.com
To: james.hogan@imgtec.com
To: paul.gortmaker@windriver.com
To: linux@roeck-us.net
To: linux-kernel@vger.kernel.org
To: linux-arch@vger.kernel.org
To: albert@sifive.com
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Subject: [PATCH 5/9] RISC-V: Task implementation
Date: Wed, 28 Jun 2017 11:55:34 -0700	[thread overview]
Message-ID: <20170628185538.1804-6-palmer@dabbelt.com> (raw)
In-Reply-To: <20170628185538.1804-1-palmer@dabbelt.com>

This patch contains the implementation of tasks on RISC-V, most of which
is involved in task switching.

Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
---
 arch/riscv/include/asm/asm-offsets.h |   1 +
 arch/riscv/include/asm/current.h     |  43 ++++
 arch/riscv/include/asm/kprobes.h     |  22 ++
 arch/riscv/include/asm/processor.h   | 102 ++++++++
 arch/riscv/include/asm/switch_to.h   |  69 ++++++
 arch/riscv/include/asm/thread_info.h |  96 ++++++++
 arch/riscv/kernel/asm-offsets.c      | 316 +++++++++++++++++++++++++
 arch/riscv/kernel/entry.S            | 436 +++++++++++++++++++++++++++++++++++
 arch/riscv/kernel/process.c          | 131 +++++++++++
 9 files changed, 1216 insertions(+)
 create mode 100644 arch/riscv/include/asm/asm-offsets.h
 create mode 100644 arch/riscv/include/asm/current.h
 create mode 100644 arch/riscv/include/asm/kprobes.h
 create mode 100644 arch/riscv/include/asm/processor.h
 create mode 100644 arch/riscv/include/asm/switch_to.h
 create mode 100644 arch/riscv/include/asm/thread_info.h
 create mode 100644 arch/riscv/kernel/asm-offsets.c
 create mode 100644 arch/riscv/kernel/entry.S
 create mode 100644 arch/riscv/kernel/process.c

diff --git a/arch/riscv/include/asm/asm-offsets.h b/arch/riscv/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/riscv/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/riscv/include/asm/current.h b/arch/riscv/include/asm/current.h
new file mode 100644
index 000000000000..032a24ba5308
--- /dev/null
+++ b/arch/riscv/include/asm/current.h
@@ -0,0 +1,43 @@
+/*
+ * Based on arm/arm64/include/asm/current.h
+ *
+ * Copyright (C) 2016 ARM
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+
+#ifndef __ASM_CURRENT_H
+#define __ASM_CURRENT_H
+
+#include <linux/compiler.h>
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+/* This only works because "struct thread_info" is at offset 0 from "struct
+ * task_struct".  This constraint seems to be necessary on other architectures
+ * as well, but __switch_to enforces it.  We can't check TASK_TI here because
+ * <asm/asm-offsets.h> includes this, and I can't get the definition of "struct
+ * task_struct" here due to some header ordering problems.
+ */
+static __always_inline struct task_struct *get_current(void)
+{
+	register struct task_struct *tp __asm__("tp");
+	return tp;
+}
+
+#define current get_current()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_CURRENT_H */
diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h
new file mode 100644
index 000000000000..1190de7a0f74
--- /dev/null
+++ b/arch/riscv/include/asm/kprobes.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+
+#ifndef ASM_RISCV_KPROBES_H
+#define ASM_RISCV_KPROBES_H
+
+#ifdef CONFIG_KPROBES
+#error "RISC-V doesn't skpport CONFIG_KPROBES"
+#endif
+
+#endif
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
new file mode 100644
index 000000000000..65aa014db9b4
--- /dev/null
+++ b/arch/riscv/include/asm/processor.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_PROCESSOR_H
+#define _ASM_RISCV_PROCESSOR_H
+
+#include <linux/const.h>
+
+#include <asm/ptrace.h>
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE	PAGE_ALIGN(TASK_SIZE >> 1)
+
+#ifdef __KERNEL__
+#define STACK_TOP		TASK_SIZE
+#define STACK_TOP_MAX		STACK_TOP
+#define STACK_ALIGN		16
+#endif /* __KERNEL__ */
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+struct pt_regs;
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr()	({ __label__ _l; _l: &&_l; })
+
+/* CPU-specific state of a task */
+struct thread_struct {
+	/* Callee-saved registers */
+	unsigned long ra;
+	unsigned long sp;	/* Kernel mode stack */
+	unsigned long s[12];	/* s[0]: frame pointer */
+	struct __riscv_d_ext_state fstate;
+};
+
+#define INIT_THREAD {					\
+	.sp = sizeof(init_stack) + (long)&init_stack,	\
+}
+
+/* Return saved (kernel) PC of a blocked thread. */
+#define thread_saved_pc(t)	((t)->thread.ra)
+#define thread_saved_sp(t)	((t)->thread.sp)
+#define thread_saved_fp(t)	((t)->thread.s[0])
+
+#define task_pt_regs(tsk)						\
+	((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE		\
+			    - ALIGN(sizeof(struct pt_regs), STACK_ALIGN)))
+
+#define KSTK_EIP(tsk)		(task_pt_regs(tsk)->sepc)
+#define KSTK_ESP(tsk)		(task_pt_regs(tsk)->sp)
+
+
+/* Do necessary setup to start up a newly executed thread. */
+extern void start_thread(struct pt_regs *regs,
+			unsigned long pc, unsigned long sp);
+
+/* Free all resources held by a thread. */
+static inline void release_thread(struct task_struct *dead_task)
+{
+}
+
+extern unsigned long get_wchan(struct task_struct *p);
+
+
+static inline void cpu_relax(void)
+{
+#ifdef __riscv_muldiv
+	int dummy;
+	/* In lieu of a halt instruction, induce a long-latency stall. */
+	__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+#endif
+	barrier();
+}
+
+static inline void wait_for_interrupt(void)
+{
+	__asm__ __volatile__ ("wfi");
+}
+
+struct device_node;
+extern int riscv_of_processor_hart(struct device_node *node);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PROCESSOR_H */
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
new file mode 100644
index 000000000000..dd6b05bff75b
--- /dev/null
+++ b/arch/riscv/include/asm/switch_to.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_SWITCH_TO_H
+#define _ASM_RISCV_SWITCH_TO_H
+
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/csr.h>
+
+extern void __fstate_save(struct task_struct *save_to);
+extern void __fstate_restore(struct task_struct *restore_from);
+
+static inline void __fstate_clean(struct pt_regs *regs)
+{
+	regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
+}
+
+static inline void fstate_save(struct task_struct *task,
+			       struct pt_regs *regs)
+{
+	if ((regs->sstatus & SR_FS) == SR_FS_DIRTY) {
+		__fstate_save(task);
+		__fstate_clean(regs);
+	}
+}
+
+static inline void fstate_restore(struct task_struct *task,
+				  struct pt_regs *regs)
+{
+	if ((regs->sstatus & SR_FS) != SR_FS_OFF) {
+		__fstate_restore(task);
+		__fstate_clean(regs);
+	}
+}
+
+static inline void __switch_to_aux(struct task_struct *prev,
+				   struct task_struct *next)
+{
+	struct pt_regs *regs;
+
+	regs = task_pt_regs(prev);
+	if (unlikely(regs->sstatus & SR_SD))
+		fstate_save(prev, regs);
+	fstate_restore(next, task_pt_regs(next));
+}
+
+extern struct task_struct *__switch_to(struct task_struct *,
+				       struct task_struct *);
+
+#define switch_to(prev, next, last)			\
+do {							\
+	struct task_struct *__prev = (prev);		\
+	struct task_struct *__next = (next);		\
+	__switch_to_aux(__prev, __next);		\
+	((last) = __switch_to(__prev, __next));		\
+} while (0)
+
+#endif /* _ASM_RISCV_SWITCH_TO_H */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
new file mode 100644
index 000000000000..f05b7d68c1ad
--- /dev/null
+++ b/arch/riscv/include/asm/thread_info.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_THREAD_INFO_H
+#define _ASM_RISCV_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#include <asm/page.h>
+#include <linux/const.h>
+
+/* thread information allocation */
+#define THREAD_SIZE_ORDER	(1)
+#define THREAD_SIZE		(PAGE_SIZE << THREAD_SIZE_ORDER)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/processor.h>
+#include <asm/csr.h>
+
+typedef unsigned long mm_segment_t;
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - if the members of this struct changes, the assembly constants
+ *   in asm-offsets.c must be updated accordingly
+ * - thread_info is included in task_struct at an offset of 0.  This means that
+ *   tp points to both thread_info and task_struct.
+ */
+struct thread_info {
+	unsigned long		flags;		/* low level flags */
+	int                     preempt_count;  /* 0=>preemptible, <0=>BUG */
+	mm_segment_t		addr_limit;
+	/* These stack pointers are overwritten on every system call or
+	 * exception.  SP is also saved to the stack it can be recovered when
+	 * overwritten.
+	 */
+	long			kernel_sp;	/* Kernel stack pointer */
+	long			user_sp;	/* User stack pointer */
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk)			\
+{						\
+	.flags		= 0,			\
+	.preempt_count	= INIT_PREEMPT_COUNT,	\
+	.addr_limit	= KERNEL_DS,		\
+}
+
+#define init_stack		(init_thread_union.stack)
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ *   access
+ * - pending work-to-be-done flags are in lowest half-word
+ * - other flags in upper half-word(s)
+ */
+#define TIF_SYSCALL_TRACE	0	/* syscall trace active */
+#define TIF_NOTIFY_RESUME	1	/* callback before returning to user */
+#define TIF_SIGPENDING		2	/* signal pending */
+#define TIF_NEED_RESCHED	3	/* rescheduling necessary */
+#define TIF_RESTORE_SIGMASK	4	/* restore signal mask in do_signal() */
+#define TIF_MEMDIE		5	/* is terminating due to OOM killer */
+#define TIF_SYSCALL_TRACEPOINT  6       /* syscall tracepoint instrumentation */
+
+#define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
+
+#define _TIF_WORK_MASK \
+	(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
new file mode 100644
index 000000000000..2ead5037528c
--- /dev/null
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/kbuild.h>
+#include <linux/sched.h>
+#include <asm/thread_info.h>
+#include <asm/ptrace.h>
+
+void asm_offsets(void)
+{
+	OFFSET(TASK_THREAD_RA, task_struct, thread.ra);
+	OFFSET(TASK_THREAD_SP, task_struct, thread.sp);
+	OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]);
+	OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]);
+	OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]);
+	OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]);
+	OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]);
+	OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]);
+	OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]);
+	OFFSET(TASK_THREAD_S7, task_struct, thread.s[7]);
+	OFFSET(TASK_THREAD_S8, task_struct, thread.s[8]);
+	OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]);
+	OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]);
+	OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
+	OFFSET(TASK_THREAD_SP, task_struct, thread.sp);
+	OFFSET(TASK_STACK, task_struct, stack);
+	OFFSET(TASK_TI, task_struct, thread_info);
+	OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
+	OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
+	OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
+
+	OFFSET(TASK_THREAD_F0,  task_struct, thread.fstate.f[0]);
+	OFFSET(TASK_THREAD_F1,  task_struct, thread.fstate.f[1]);
+	OFFSET(TASK_THREAD_F2,  task_struct, thread.fstate.f[2]);
+	OFFSET(TASK_THREAD_F3,  task_struct, thread.fstate.f[3]);
+	OFFSET(TASK_THREAD_F4,  task_struct, thread.fstate.f[4]);
+	OFFSET(TASK_THREAD_F5,  task_struct, thread.fstate.f[5]);
+	OFFSET(TASK_THREAD_F6,  task_struct, thread.fstate.f[6]);
+	OFFSET(TASK_THREAD_F7,  task_struct, thread.fstate.f[7]);
+	OFFSET(TASK_THREAD_F8,  task_struct, thread.fstate.f[8]);
+	OFFSET(TASK_THREAD_F9,  task_struct, thread.fstate.f[9]);
+	OFFSET(TASK_THREAD_F10, task_struct, thread.fstate.f[10]);
+	OFFSET(TASK_THREAD_F11, task_struct, thread.fstate.f[11]);
+	OFFSET(TASK_THREAD_F12, task_struct, thread.fstate.f[12]);
+	OFFSET(TASK_THREAD_F13, task_struct, thread.fstate.f[13]);
+	OFFSET(TASK_THREAD_F14, task_struct, thread.fstate.f[14]);
+	OFFSET(TASK_THREAD_F15, task_struct, thread.fstate.f[15]);
+	OFFSET(TASK_THREAD_F16, task_struct, thread.fstate.f[16]);
+	OFFSET(TASK_THREAD_F17, task_struct, thread.fstate.f[17]);
+	OFFSET(TASK_THREAD_F18, task_struct, thread.fstate.f[18]);
+	OFFSET(TASK_THREAD_F19, task_struct, thread.fstate.f[19]);
+	OFFSET(TASK_THREAD_F20, task_struct, thread.fstate.f[20]);
+	OFFSET(TASK_THREAD_F21, task_struct, thread.fstate.f[21]);
+	OFFSET(TASK_THREAD_F22, task_struct, thread.fstate.f[22]);
+	OFFSET(TASK_THREAD_F23, task_struct, thread.fstate.f[23]);
+	OFFSET(TASK_THREAD_F24, task_struct, thread.fstate.f[24]);
+	OFFSET(TASK_THREAD_F25, task_struct, thread.fstate.f[25]);
+	OFFSET(TASK_THREAD_F26, task_struct, thread.fstate.f[26]);
+	OFFSET(TASK_THREAD_F27, task_struct, thread.fstate.f[27]);
+	OFFSET(TASK_THREAD_F28, task_struct, thread.fstate.f[28]);
+	OFFSET(TASK_THREAD_F29, task_struct, thread.fstate.f[29]);
+	OFFSET(TASK_THREAD_F30, task_struct, thread.fstate.f[30]);
+	OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]);
+	OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr);
+
+	DEFINE(PT_SIZE, sizeof(struct pt_regs));
+	OFFSET(PT_SEPC, pt_regs, sepc);
+	OFFSET(PT_RA, pt_regs, ra);
+	OFFSET(PT_FP, pt_regs, s0);
+	OFFSET(PT_S0, pt_regs, s0);
+	OFFSET(PT_S1, pt_regs, s1);
+	OFFSET(PT_S2, pt_regs, s2);
+	OFFSET(PT_S3, pt_regs, s3);
+	OFFSET(PT_S4, pt_regs, s4);
+	OFFSET(PT_S5, pt_regs, s5);
+	OFFSET(PT_S6, pt_regs, s6);
+	OFFSET(PT_S7, pt_regs, s7);
+	OFFSET(PT_S8, pt_regs, s8);
+	OFFSET(PT_S9, pt_regs, s9);
+	OFFSET(PT_S10, pt_regs, s10);
+	OFFSET(PT_S11, pt_regs, s11);
+	OFFSET(PT_SP, pt_regs, sp);
+	OFFSET(PT_TP, pt_regs, tp);
+	OFFSET(PT_A0, pt_regs, a0);
+	OFFSET(PT_A1, pt_regs, a1);
+	OFFSET(PT_A2, pt_regs, a2);
+	OFFSET(PT_A3, pt_regs, a3);
+	OFFSET(PT_A4, pt_regs, a4);
+	OFFSET(PT_A5, pt_regs, a5);
+	OFFSET(PT_A6, pt_regs, a6);
+	OFFSET(PT_A7, pt_regs, a7);
+	OFFSET(PT_T0, pt_regs, t0);
+	OFFSET(PT_T1, pt_regs, t1);
+	OFFSET(PT_T2, pt_regs, t2);
+	OFFSET(PT_T3, pt_regs, t3);
+	OFFSET(PT_T4, pt_regs, t4);
+	OFFSET(PT_T5, pt_regs, t5);
+	OFFSET(PT_T6, pt_regs, t6);
+	OFFSET(PT_GP, pt_regs, gp);
+	OFFSET(PT_SSTATUS, pt_regs, sstatus);
+	OFFSET(PT_SBADADDR, pt_regs, sbadaddr);
+	OFFSET(PT_SCAUSE, pt_regs, scause);
+
+	/* THREAD_{F,X}* might be larger than a S-type offset can handle, but
+	 * these are used in performance-sensitive assembly so we can't resort
+	 * to loading the long immediate every time.
+	 */
+	DEFINE(TASK_THREAD_RA_RA,
+		  offsetof(struct task_struct, thread.ra)
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_SP_RA,
+		  offsetof(struct task_struct, thread.sp)
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S0_RA,
+		  offsetof(struct task_struct, thread.s[0])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S1_RA,
+		  offsetof(struct task_struct, thread.s[1])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S2_RA,
+		  offsetof(struct task_struct, thread.s[2])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S3_RA,
+		  offsetof(struct task_struct, thread.s[3])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S4_RA,
+		  offsetof(struct task_struct, thread.s[4])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S5_RA,
+		  offsetof(struct task_struct, thread.s[5])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S6_RA,
+		  offsetof(struct task_struct, thread.s[6])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S7_RA,
+		  offsetof(struct task_struct, thread.s[7])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S8_RA,
+		  offsetof(struct task_struct, thread.s[8])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S9_RA,
+		  offsetof(struct task_struct, thread.s[9])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S10_RA,
+		  offsetof(struct task_struct, thread.s[10])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S11_RA,
+		  offsetof(struct task_struct, thread.s[11])
+		- offsetof(struct task_struct, thread.ra)
+	);
+
+	DEFINE(TASK_THREAD_F0_F0,
+		  offsetof(struct task_struct, thread.fstate.f[0])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F1_F0,
+		  offsetof(struct task_struct, thread.fstate.f[1])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F2_F0,
+		  offsetof(struct task_struct, thread.fstate.f[2])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F3_F0,
+		  offsetof(struct task_struct, thread.fstate.f[3])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F4_F0,
+		  offsetof(struct task_struct, thread.fstate.f[4])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F5_F0,
+		  offsetof(struct task_struct, thread.fstate.f[5])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F6_F0,
+		  offsetof(struct task_struct, thread.fstate.f[6])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F7_F0,
+		  offsetof(struct task_struct, thread.fstate.f[7])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F8_F0,
+		  offsetof(struct task_struct, thread.fstate.f[8])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F9_F0,
+		  offsetof(struct task_struct, thread.fstate.f[9])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F10_F0,
+		  offsetof(struct task_struct, thread.fstate.f[10])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F11_F0,
+		  offsetof(struct task_struct, thread.fstate.f[11])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F12_F0,
+		  offsetof(struct task_struct, thread.fstate.f[12])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F13_F0,
+		  offsetof(struct task_struct, thread.fstate.f[13])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F14_F0,
+		  offsetof(struct task_struct, thread.fstate.f[14])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F15_F0,
+		  offsetof(struct task_struct, thread.fstate.f[15])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F16_F0,
+		  offsetof(struct task_struct, thread.fstate.f[16])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F17_F0,
+		  offsetof(struct task_struct, thread.fstate.f[17])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F18_F0,
+		  offsetof(struct task_struct, thread.fstate.f[18])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F19_F0,
+		  offsetof(struct task_struct, thread.fstate.f[19])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F20_F0,
+		  offsetof(struct task_struct, thread.fstate.f[20])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F21_F0,
+		  offsetof(struct task_struct, thread.fstate.f[21])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F22_F0,
+		  offsetof(struct task_struct, thread.fstate.f[22])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F23_F0,
+		  offsetof(struct task_struct, thread.fstate.f[23])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F24_F0,
+		  offsetof(struct task_struct, thread.fstate.f[24])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F25_F0,
+		  offsetof(struct task_struct, thread.fstate.f[25])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F26_F0,
+		  offsetof(struct task_struct, thread.fstate.f[26])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F27_F0,
+		  offsetof(struct task_struct, thread.fstate.f[27])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F28_F0,
+		  offsetof(struct task_struct, thread.fstate.f[28])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F29_F0,
+		  offsetof(struct task_struct, thread.fstate.f[29])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F30_F0,
+		  offsetof(struct task_struct, thread.fstate.f[30])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F31_F0,
+		  offsetof(struct task_struct, thread.fstate.f[31])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_FCSR_F0,
+		  offsetof(struct task_struct, thread.fstate.fcsr)
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+
+	/* The assembler needs access to THREAD_SIZE as well. */
+	DEFINE(ASM_THREAD_SIZE, THREAD_SIZE);
+
+	/* We allocate a pt_regs on the stack when entering the kernel.  This
+	 * ensures the alignment is sane.
+	 */
+	DEFINE(PT_SIZE_ON_STACK, ALIGN(sizeof(struct pt_regs), STACK_ALIGN));
+}
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
new file mode 100644
index 000000000000..aa4593c2eaef
--- /dev/null
+++ b/arch/riscv/kernel/entry.S
@@ -0,0 +1,436 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+
+#include <asm/asm.h>
+#include <asm/csr.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+
+	.text
+	.altmacro
+
+/* Prepares to enter a system call or exception by saving all registers to the
+ * stack.
+ */
+	.macro SAVE_ALL
+	LOCAL _restore_kernel_tpsp
+	LOCAL _save_context
+
+	/* If coming from userspace, preserve the user thread pointer and load
+	   the kernel thread pointer.  If we came from the kernel, sscratch
+	   will contain 0, and we should continue on the current TP. */
+	csrrw tp, sscratch, tp
+	bnez tp, _save_context
+
+_restore_kernel_tpsp:
+	csrr tp, sscratch
+	REG_S sp, TASK_TI_KERNEL_SP(tp)
+_save_context:
+	REG_S sp, TASK_TI_USER_SP(tp)
+	REG_L sp, TASK_TI_KERNEL_SP(tp)
+	addi sp, sp, -(PT_SIZE_ON_STACK)
+	REG_S x1,  PT_RA(sp)
+	REG_S x3,  PT_GP(sp)
+	REG_S x5,  PT_T0(sp)
+	REG_S x6,  PT_T1(sp)
+	REG_S x7,  PT_T2(sp)
+	REG_S x8,  PT_S0(sp)
+	REG_S x9,  PT_S1(sp)
+	REG_S x10, PT_A0(sp)
+	REG_S x11, PT_A1(sp)
+	REG_S x12, PT_A2(sp)
+	REG_S x13, PT_A3(sp)
+	REG_S x14, PT_A4(sp)
+	REG_S x15, PT_A5(sp)
+	REG_S x16, PT_A6(sp)
+	REG_S x17, PT_A7(sp)
+	REG_S x18, PT_S2(sp)
+	REG_S x19, PT_S3(sp)
+	REG_S x20, PT_S4(sp)
+	REG_S x21, PT_S5(sp)
+	REG_S x22, PT_S6(sp)
+	REG_S x23, PT_S7(sp)
+	REG_S x24, PT_S8(sp)
+	REG_S x25, PT_S9(sp)
+	REG_S x26, PT_S10(sp)
+	REG_S x27, PT_S11(sp)
+	REG_S x28, PT_T3(sp)
+	REG_S x29, PT_T4(sp)
+	REG_S x30, PT_T5(sp)
+	REG_S x31, PT_T6(sp)
+
+	/* Disable FPU to detect illegal usage of
+	   floating point in kernel space */
+	li t0, SR_FS
+
+	REG_L s0, TASK_TI_USER_SP(tp)
+	csrrc s1, sstatus, t0
+	csrr s2, sepc
+	csrr s3, sbadaddr
+	csrr s4, scause
+	csrr s5, sscratch
+	REG_S s0, PT_SP(sp)
+	REG_S s1, PT_SSTATUS(sp)
+	REG_S s2, PT_SEPC(sp)
+	REG_S s3, PT_SBADADDR(sp)
+	REG_S s4, PT_SCAUSE(sp)
+	REG_S s5, PT_TP(sp)
+	.endm
+
+/* Prepares to return from a system call or exception by restoring all
+ * registers from the stack.
+ */
+	.macro RESTORE_ALL
+	REG_L a0, PT_SSTATUS(sp)
+	REG_L a2, PT_SEPC(sp)
+	csrw sstatus, a0
+	csrw sepc, a2
+
+	REG_L x1,  PT_RA(sp)
+	REG_L x3,  PT_GP(sp)
+	REG_L x4,  PT_TP(sp)
+	REG_L x5,  PT_T0(sp)
+	REG_L x6,  PT_T1(sp)
+	REG_L x7,  PT_T2(sp)
+	REG_L x8,  PT_S0(sp)
+	REG_L x9,  PT_S1(sp)
+	REG_L x10, PT_A0(sp)
+	REG_L x11, PT_A1(sp)
+	REG_L x12, PT_A2(sp)
+	REG_L x13, PT_A3(sp)
+	REG_L x14, PT_A4(sp)
+	REG_L x15, PT_A5(sp)
+	REG_L x16, PT_A6(sp)
+	REG_L x17, PT_A7(sp)
+	REG_L x18, PT_S2(sp)
+	REG_L x19, PT_S3(sp)
+	REG_L x20, PT_S4(sp)
+	REG_L x21, PT_S5(sp)
+	REG_L x22, PT_S6(sp)
+	REG_L x23, PT_S7(sp)
+	REG_L x24, PT_S8(sp)
+	REG_L x25, PT_S9(sp)
+	REG_L x26, PT_S10(sp)
+	REG_L x27, PT_S11(sp)
+	REG_L x28, PT_T3(sp)
+	REG_L x29, PT_T4(sp)
+	REG_L x30, PT_T5(sp)
+	REG_L x31, PT_T6(sp)
+
+	REG_L x2,  PT_SP(sp)
+	.endm
+
+ENTRY(handle_exception)
+	SAVE_ALL
+
+	/* Set sscratch register to 0, so that if a recursive exception
+	   occurs, the exception vector knows it came from the kernel */
+	csrw sscratch, x0
+
+	la gp, __global_pointer$
+
+	la ra, ret_from_exception
+	/* MSB of cause differentiates between
+	   interrupts and exceptions */
+	bge s4, zero, 1f
+
+	/* Handle interrupts */
+	slli a0, s4, 1
+	srli a0, a0, 1
+	move a1, sp /* pt_regs */
+	tail do_IRQ
+1:
+	/* Handle syscalls */
+	li t0, EXC_SYSCALL
+	beq s4, t0, handle_syscall
+
+	/* Handle other exceptions */
+	slli t0, s4, RISCV_LGPTR
+	la t1, excp_vect_table
+	la t2, excp_vect_table_end
+	move a0, sp /* pt_regs */
+	add t0, t1, t0
+	/* Check if exception code lies within bounds */
+	bgeu t0, t2, 1f
+	REG_L t0, 0(t0)
+	jr t0
+1:
+	tail do_trap_unknown
+
+handle_syscall:
+	/* Advance SEPC to avoid executing the original
+	   scall instruction on sret */
+	addi s2, s2, 0x4
+	REG_S s2, PT_SEPC(sp)
+	/* System calls run with interrupts enabled */
+	csrs sstatus, SR_IE
+	/* Trace syscalls, but only if requested by the user. */
+	REG_L t0, TASK_TI_FLAGS(tp)
+	andi t0, t0, _TIF_SYSCALL_TRACE
+	bnez t0, handle_syscall_trace_enter
+check_syscall_nr:
+	/* Check to make sure we don't jump to a bogus syscall number. */
+	li t0, __NR_syscalls
+	la s0, sys_ni_syscall
+	/* Syscall number held in a7 */
+	bgeu a7, t0, 1f
+	la s0, sys_call_table
+	slli t0, a7, RISCV_LGPTR
+	add s0, s0, t0
+	REG_L s0, 0(s0)
+1:
+	jalr s0
+
+ret_from_syscall:
+	/* Set user a0 to kernel a0 */
+	REG_S a0, PT_A0(sp)
+	/* Trace syscalls, but only if requested by the user. */
+	REG_L t0, TASK_TI_FLAGS(tp)
+	andi t0, t0, _TIF_SYSCALL_TRACE
+	bnez t0, handle_syscall_trace_exit
+
+ret_from_exception:
+	REG_L s0, PT_SSTATUS(sp)
+	csrc sstatus, SR_IE
+	andi s0, s0, SR_PS
+	bnez s0, restore_all
+
+resume_userspace:
+	/* Interrupts must be disabled here so flags are checked atomically */
+	REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
+	andi s1, s0, _TIF_WORK_MASK
+	bnez s1, work_pending
+
+	/* Save unwound kernel stack pointer in thread_info */
+	addi s0, sp, PT_SIZE_ON_STACK
+	REG_S s0, TASK_TI_KERNEL_SP(tp)
+
+	/* Save TP into sscratch, so we can find the kernel data structures
+	 * again. */
+	csrw sscratch, tp
+
+restore_all:
+	RESTORE_ALL
+	sret
+
+work_pending:
+	/* Enter slow path for supplementary processing */
+	la ra, ret_from_exception
+	andi s1, s0, _TIF_NEED_RESCHED
+	bnez s1, work_resched
+work_notifysig:
+	/* Handle pending signals and notify-resume requests */
+	csrs sstatus, SR_IE /* Enable interrupts for do_notify_resume() */
+	move a0, sp /* pt_regs */
+	move a1, s0 /* current_thread_info->flags */
+	tail do_notify_resume
+work_resched:
+	tail schedule
+
+/* Slow paths for ptrace. */
+handle_syscall_trace_enter:
+	move a0, sp
+	call do_syscall_trace_enter
+	REG_L a0, PT_A0(sp)
+	REG_L a1, PT_A1(sp)
+	REG_L a2, PT_A2(sp)
+	REG_L a3, PT_A3(sp)
+	REG_L a4, PT_A4(sp)
+	REG_L a5, PT_A5(sp)
+	REG_L a6, PT_A6(sp)
+	REG_L a7, PT_A7(sp)
+	j check_syscall_nr
+handle_syscall_trace_exit:
+	move a0, sp
+	call do_syscall_trace_exit
+	j ret_from_exception
+
+END(handle_exception)
+
+ENTRY(ret_from_fork)
+	la ra, ret_from_exception
+	tail schedule_tail
+ENDPROC(ret_from_fork)
+
+ENTRY(ret_from_kernel_thread)
+	call schedule_tail
+	/* Call fn(arg) */
+	la ra, ret_from_exception
+	move a0, s1
+	jr s0
+ENDPROC(ret_from_kernel_thread)
+
+
+/*
+ * Integer register context switch
+ * The callee-saved registers must be saved and restored.
+ *
+ *   a0: previous task_struct (must be preserved across the switch)
+ *   a1: next task_struct
+ */
+ENTRY(__switch_to)
+	/* Save context into prev->thread */
+	li    a2,  TASK_THREAD_RA
+	add   a0, a0, a2
+	add   a2, a1, a2
+	REG_S ra,  TASK_THREAD_RA_RA(a0)
+	REG_S sp,  TASK_THREAD_SP_RA(a0)
+	REG_S s0,  TASK_THREAD_S0_RA(a0)
+	REG_S s1,  TASK_THREAD_S1_RA(a0)
+	REG_S s2,  TASK_THREAD_S2_RA(a0)
+	REG_S s3,  TASK_THREAD_S3_RA(a0)
+	REG_S s4,  TASK_THREAD_S4_RA(a0)
+	REG_S s5,  TASK_THREAD_S5_RA(a0)
+	REG_S s6,  TASK_THREAD_S6_RA(a0)
+	REG_S s7,  TASK_THREAD_S7_RA(a0)
+	REG_S s8,  TASK_THREAD_S8_RA(a0)
+	REG_S s9,  TASK_THREAD_S9_RA(a0)
+	REG_S s10, TASK_THREAD_S10_RA(a0)
+	REG_S s11, TASK_THREAD_S11_RA(a0)
+	/* Restore context from next->thread */
+	REG_L ra,  TASK_THREAD_RA_RA(a2)
+	REG_L sp,  TASK_THREAD_SP_RA(a2)
+	REG_L s0,  TASK_THREAD_S0_RA(a2)
+	REG_L s1,  TASK_THREAD_S1_RA(a2)
+	REG_L s2,  TASK_THREAD_S2_RA(a2)
+	REG_L s3,  TASK_THREAD_S3_RA(a2)
+	REG_L s4,  TASK_THREAD_S4_RA(a2)
+	REG_L s5,  TASK_THREAD_S5_RA(a2)
+	REG_L s6,  TASK_THREAD_S6_RA(a2)
+	REG_L s7,  TASK_THREAD_S7_RA(a2)
+	REG_L s8,  TASK_THREAD_S8_RA(a2)
+	REG_L s9,  TASK_THREAD_S9_RA(a2)
+	REG_L s10, TASK_THREAD_S10_RA(a2)
+	REG_L s11, TASK_THREAD_S11_RA(a2)
+#if TASK_TI != 0
+#error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work."
+	addi tp, a1, TASK_TI
+#else
+	move tp, a1
+#endif
+	ret
+ENDPROC(__switch_to)
+
+ENTRY(__fstate_save)
+	li  a2,  TASK_THREAD_F0
+	add a0, a0, a2
+	li t1, SR_FS
+	csrs sstatus, t1
+	frcsr t0
+	fsd f0,  TASK_THREAD_F0_F0(a0)
+	fsd f1,  TASK_THREAD_F1_F0(a0)
+	fsd f2,  TASK_THREAD_F2_F0(a0)
+	fsd f3,  TASK_THREAD_F3_F0(a0)
+	fsd f4,  TASK_THREAD_F4_F0(a0)
+	fsd f5,  TASK_THREAD_F5_F0(a0)
+	fsd f6,  TASK_THREAD_F6_F0(a0)
+	fsd f7,  TASK_THREAD_F7_F0(a0)
+	fsd f8,  TASK_THREAD_F8_F0(a0)
+	fsd f9,  TASK_THREAD_F9_F0(a0)
+	fsd f10, TASK_THREAD_F10_F0(a0)
+	fsd f11, TASK_THREAD_F11_F0(a0)
+	fsd f12, TASK_THREAD_F12_F0(a0)
+	fsd f13, TASK_THREAD_F13_F0(a0)
+	fsd f14, TASK_THREAD_F14_F0(a0)
+	fsd f15, TASK_THREAD_F15_F0(a0)
+	fsd f16, TASK_THREAD_F16_F0(a0)
+	fsd f17, TASK_THREAD_F17_F0(a0)
+	fsd f18, TASK_THREAD_F18_F0(a0)
+	fsd f19, TASK_THREAD_F19_F0(a0)
+	fsd f20, TASK_THREAD_F20_F0(a0)
+	fsd f21, TASK_THREAD_F21_F0(a0)
+	fsd f22, TASK_THREAD_F22_F0(a0)
+	fsd f23, TASK_THREAD_F23_F0(a0)
+	fsd f24, TASK_THREAD_F24_F0(a0)
+	fsd f25, TASK_THREAD_F25_F0(a0)
+	fsd f26, TASK_THREAD_F26_F0(a0)
+	fsd f27, TASK_THREAD_F27_F0(a0)
+	fsd f28, TASK_THREAD_F28_F0(a0)
+	fsd f29, TASK_THREAD_F29_F0(a0)
+	fsd f30, TASK_THREAD_F30_F0(a0)
+	fsd f31, TASK_THREAD_F31_F0(a0)
+	sw t0, TASK_THREAD_FCSR_F0(a0)
+	csrc sstatus, t1
+	ret
+ENDPROC(__fstate_save)
+
+ENTRY(__fstate_restore)
+	li  a2,  TASK_THREAD_F0
+	add a0, a0, a2
+	li t1, SR_FS
+	lw t0, TASK_THREAD_FCSR_F0(a0)
+	csrs sstatus, t1
+	fld f0,  TASK_THREAD_F0_F0(a0)
+	fld f1,  TASK_THREAD_F1_F0(a0)
+	fld f2,  TASK_THREAD_F2_F0(a0)
+	fld f3,  TASK_THREAD_F3_F0(a0)
+	fld f4,  TASK_THREAD_F4_F0(a0)
+	fld f5,  TASK_THREAD_F5_F0(a0)
+	fld f6,  TASK_THREAD_F6_F0(a0)
+	fld f7,  TASK_THREAD_F7_F0(a0)
+	fld f8,  TASK_THREAD_F8_F0(a0)
+	fld f9,  TASK_THREAD_F9_F0(a0)
+	fld f10, TASK_THREAD_F10_F0(a0)
+	fld f11, TASK_THREAD_F11_F0(a0)
+	fld f12, TASK_THREAD_F12_F0(a0)
+	fld f13, TASK_THREAD_F13_F0(a0)
+	fld f14, TASK_THREAD_F14_F0(a0)
+	fld f15, TASK_THREAD_F15_F0(a0)
+	fld f16, TASK_THREAD_F16_F0(a0)
+	fld f17, TASK_THREAD_F17_F0(a0)
+	fld f18, TASK_THREAD_F18_F0(a0)
+	fld f19, TASK_THREAD_F19_F0(a0)
+	fld f20, TASK_THREAD_F20_F0(a0)
+	fld f21, TASK_THREAD_F21_F0(a0)
+	fld f22, TASK_THREAD_F22_F0(a0)
+	fld f23, TASK_THREAD_F23_F0(a0)
+	fld f24, TASK_THREAD_F24_F0(a0)
+	fld f25, TASK_THREAD_F25_F0(a0)
+	fld f26, TASK_THREAD_F26_F0(a0)
+	fld f27, TASK_THREAD_F27_F0(a0)
+	fld f28, TASK_THREAD_F28_F0(a0)
+	fld f29, TASK_THREAD_F29_F0(a0)
+	fld f30, TASK_THREAD_F30_F0(a0)
+	fld f31, TASK_THREAD_F31_F0(a0)
+	fscsr t0
+	csrc sstatus, t1
+	ret
+ENDPROC(__fstate_restore)
+
+
+	.section ".rodata"
+	/* Exception vector table */
+ENTRY(excp_vect_table)
+	RISCV_PTR do_trap_insn_misaligned
+	RISCV_PTR do_trap_insn_fault
+	RISCV_PTR do_trap_insn_illegal
+	RISCV_PTR do_trap_break
+	RISCV_PTR do_trap_load_misaligned
+	RISCV_PTR do_trap_load_fault
+	RISCV_PTR do_trap_store_misaligned
+	RISCV_PTR do_trap_store_fault
+	RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
+	RISCV_PTR do_trap_ecall_s
+	RISCV_PTR do_trap_unknown
+	RISCV_PTR do_trap_ecall_m
+	RISCV_PTR do_page_fault   /* instruction page fault */
+	RISCV_PTR do_page_fault   /* load page fault */
+	RISCV_PTR do_trap_unknown
+	RISCV_PTR do_page_fault   /* store page fault */
+excp_vect_table_end:
+END(excp_vect_table)
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
new file mode 100644
index 000000000000..b13d3ea3bf79
--- /dev/null
+++ b/arch/riscv/kernel/process.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
+ *  Chen Liqin <liqin.chen@sunplusct.com>
+ *  Lennox Wu <lennox.wu@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/tick.h>
+#include <linux/ptrace.h>
+
+#include <asm/unistd.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include <asm/csr.h>
+#include <asm/string.h>
+#include <asm/switch_to.h>
+
+extern asmlinkage void ret_from_fork(void);
+extern asmlinkage void ret_from_kernel_thread(void);
+
+void arch_cpu_idle(void)
+{
+	wait_for_interrupt();
+	local_irq_enable();
+}
+
+void show_regs(struct pt_regs *regs)
+{
+	show_regs_print_info(KERN_DEFAULT);
+
+	printk(KERN_CONT "sepc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
+		regs->sepc, regs->ra, regs->sp);
+	printk(KERN_CONT " gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
+		regs->gp, regs->tp, regs->t0);
+	printk(KERN_CONT " t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
+		regs->t1, regs->t2, regs->s0);
+	printk(KERN_CONT " s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n",
+		regs->s1, regs->a0, regs->a1);
+	printk(KERN_CONT " a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n",
+		regs->a2, regs->a3, regs->a4);
+	printk(KERN_CONT " a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n",
+		regs->a5, regs->a6, regs->a7);
+	printk(KERN_CONT " s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n",
+		regs->s2, regs->s3, regs->s4);
+	printk(KERN_CONT " s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n",
+		regs->s5, regs->s6, regs->s7);
+	printk(KERN_CONT " s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n",
+		regs->s8, regs->s9, regs->s10);
+	printk(KERN_CONT " s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n",
+		regs->s11, regs->t3, regs->t4);
+	printk(KERN_CONT " t5 : " REG_FMT " t6 : " REG_FMT "\n",
+		regs->t5, regs->t6);
+
+	printk(KERN_CONT "sstatus: " REG_FMT " sbadaddr: " REG_FMT " scause: " REG_FMT "\n",
+		regs->sstatus, regs->sbadaddr, regs->scause);
+}
+
+void start_thread(struct pt_regs *regs, unsigned long pc,
+	unsigned long sp)
+{
+	regs->sstatus = SR_PIE /* User mode, irqs on */ | SR_FS_INITIAL;
+#ifndef CONFIG_RV_PUM
+	regs->sstatus |= SR_SUM;
+#endif
+	regs->sepc = pc;
+	regs->sp = sp;
+	set_fs(USER_DS);
+}
+
+void flush_thread(void)
+{
+	/* Reset FPU context
+	 *	frm: round to nearest, ties to even (IEEE default)
+	 *	fflags: accrued exceptions cleared
+	 */
+	memset(&current->thread.fstate, 0, sizeof(current->thread.fstate));
+}
+
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+	fstate_save(src, task_pt_regs(src));
+	*dst = *src;
+	return 0;
+}
+
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+	unsigned long arg, struct task_struct *p)
+{
+	struct pt_regs *childregs = task_pt_regs(p);
+
+	/* p->thread holds context to be restored by __switch_to() */
+	if (unlikely(p->flags & PF_KTHREAD)) {
+		/* Kernel thread */
+		const register unsigned long gp __asm__ ("gp");
+		memset(childregs, 0, sizeof(struct pt_regs));
+		childregs->gp = gp;
+		childregs->sstatus = SR_PS | SR_PIE; /* Supervisor, irqs on */
+
+		p->thread.ra = (unsigned long)ret_from_kernel_thread;
+		p->thread.s[0] = usp; /* fn */
+		p->thread.s[1] = arg;
+	} else {
+		*childregs = *(current_pt_regs());
+		if (usp) /* User fork */
+			childregs->sp = usp;
+		if (clone_flags & CLONE_SETTLS)
+			childregs->tp = childregs->a5;
+		childregs->a0 = 0; /* Return value of fork() */
+		p->thread.ra = (unsigned long)ret_from_fork;
+	}
+	p->thread.sp = (unsigned long)childregs; /* kernel sp */
+	return 0;
+}
-- 
2.13.0

WARNING: multiple messages have this Message-ID (diff)
From: Palmer Dabbelt <palmer@dabbelt.com>
To: peterz@infradead.org, mingo@redhat.com, mcgrof@kernel.org,
	viro@zeniv.linux.org.uk, sfr@canb.auug.org.au,
	nicolas.dichtel@6wind.com, rmk+kernel@armlinux.org.uk,
	msalter@redhat.com, tklauser@distanz.ch, will.deacon@arm.com,
	james.hogan@imgtec.com, paul.gortmaker@windriver.com,
	linux@roeck-us.net, linux-kernel@vger.kernel.org,
	linux-arch@vger.kernel.org, albert@sifive.com
Cc: Palmer Dabbelt <palmer@dabbelt.com>
Subject: [PATCH 5/9] RISC-V: Task implementation
Date: Wed, 28 Jun 2017 11:55:34 -0700	[thread overview]
Message-ID: <20170628185538.1804-6-palmer@dabbelt.com> (raw)
In-Reply-To: <20170628185538.1804-1-palmer@dabbelt.com>

This patch contains the implementation of tasks on RISC-V, most of which
is involved in task switching.

Signed-off-by: Palmer Dabbelt <palmer@dabbelt.com>
---
 arch/riscv/include/asm/asm-offsets.h |   1 +
 arch/riscv/include/asm/current.h     |  43 ++++
 arch/riscv/include/asm/kprobes.h     |  22 ++
 arch/riscv/include/asm/processor.h   | 102 ++++++++
 arch/riscv/include/asm/switch_to.h   |  69 ++++++
 arch/riscv/include/asm/thread_info.h |  96 ++++++++
 arch/riscv/kernel/asm-offsets.c      | 316 +++++++++++++++++++++++++
 arch/riscv/kernel/entry.S            | 436 +++++++++++++++++++++++++++++++++++
 arch/riscv/kernel/process.c          | 131 +++++++++++
 9 files changed, 1216 insertions(+)
 create mode 100644 arch/riscv/include/asm/asm-offsets.h
 create mode 100644 arch/riscv/include/asm/current.h
 create mode 100644 arch/riscv/include/asm/kprobes.h
 create mode 100644 arch/riscv/include/asm/processor.h
 create mode 100644 arch/riscv/include/asm/switch_to.h
 create mode 100644 arch/riscv/include/asm/thread_info.h
 create mode 100644 arch/riscv/kernel/asm-offsets.c
 create mode 100644 arch/riscv/kernel/entry.S
 create mode 100644 arch/riscv/kernel/process.c

diff --git a/arch/riscv/include/asm/asm-offsets.h b/arch/riscv/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/riscv/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/riscv/include/asm/current.h b/arch/riscv/include/asm/current.h
new file mode 100644
index 000000000000..032a24ba5308
--- /dev/null
+++ b/arch/riscv/include/asm/current.h
@@ -0,0 +1,43 @@
+/*
+ * Based on arm/arm64/include/asm/current.h
+ *
+ * Copyright (C) 2016 ARM
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+
+#ifndef __ASM_CURRENT_H
+#define __ASM_CURRENT_H
+
+#include <linux/compiler.h>
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+/* This only works because "struct thread_info" is at offset 0 from "struct
+ * task_struct".  This constraint seems to be necessary on other architectures
+ * as well, but __switch_to enforces it.  We can't check TASK_TI here because
+ * <asm/asm-offsets.h> includes this, and I can't get the definition of "struct
+ * task_struct" here due to some header ordering problems.
+ */
+static __always_inline struct task_struct *get_current(void)
+{
+	register struct task_struct *tp __asm__("tp");
+	return tp;
+}
+
+#define current get_current()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_CURRENT_H */
diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h
new file mode 100644
index 000000000000..1190de7a0f74
--- /dev/null
+++ b/arch/riscv/include/asm/kprobes.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+
+#ifndef ASM_RISCV_KPROBES_H
+#define ASM_RISCV_KPROBES_H
+
+#ifdef CONFIG_KPROBES
+#error "RISC-V doesn't skpport CONFIG_KPROBES"
+#endif
+
+#endif
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
new file mode 100644
index 000000000000..65aa014db9b4
--- /dev/null
+++ b/arch/riscv/include/asm/processor.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_PROCESSOR_H
+#define _ASM_RISCV_PROCESSOR_H
+
+#include <linux/const.h>
+
+#include <asm/ptrace.h>
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE	PAGE_ALIGN(TASK_SIZE >> 1)
+
+#ifdef __KERNEL__
+#define STACK_TOP		TASK_SIZE
+#define STACK_TOP_MAX		STACK_TOP
+#define STACK_ALIGN		16
+#endif /* __KERNEL__ */
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+struct pt_regs;
+
+/*
+ * Default implementation of macro that returns current
+ * instruction pointer ("program counter").
+ */
+#define current_text_addr()	({ __label__ _l; _l: &&_l; })
+
+/* CPU-specific state of a task */
+struct thread_struct {
+	/* Callee-saved registers */
+	unsigned long ra;
+	unsigned long sp;	/* Kernel mode stack */
+	unsigned long s[12];	/* s[0]: frame pointer */
+	struct __riscv_d_ext_state fstate;
+};
+
+#define INIT_THREAD {					\
+	.sp = sizeof(init_stack) + (long)&init_stack,	\
+}
+
+/* Return saved (kernel) PC of a blocked thread. */
+#define thread_saved_pc(t)	((t)->thread.ra)
+#define thread_saved_sp(t)	((t)->thread.sp)
+#define thread_saved_fp(t)	((t)->thread.s[0])
+
+#define task_pt_regs(tsk)						\
+	((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE		\
+			    - ALIGN(sizeof(struct pt_regs), STACK_ALIGN)))
+
+#define KSTK_EIP(tsk)		(task_pt_regs(tsk)->sepc)
+#define KSTK_ESP(tsk)		(task_pt_regs(tsk)->sp)
+
+
+/* Do necessary setup to start up a newly executed thread. */
+extern void start_thread(struct pt_regs *regs,
+			unsigned long pc, unsigned long sp);
+
+/* Free all resources held by a thread. */
+static inline void release_thread(struct task_struct *dead_task)
+{
+}
+
+extern unsigned long get_wchan(struct task_struct *p);
+
+
+static inline void cpu_relax(void)
+{
+#ifdef __riscv_muldiv
+	int dummy;
+	/* In lieu of a halt instruction, induce a long-latency stall. */
+	__asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+#endif
+	barrier();
+}
+
+static inline void wait_for_interrupt(void)
+{
+	__asm__ __volatile__ ("wfi");
+}
+
+struct device_node;
+extern int riscv_of_processor_hart(struct device_node *node);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PROCESSOR_H */
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
new file mode 100644
index 000000000000..dd6b05bff75b
--- /dev/null
+++ b/arch/riscv/include/asm/switch_to.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_SWITCH_TO_H
+#define _ASM_RISCV_SWITCH_TO_H
+
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/csr.h>
+
+extern void __fstate_save(struct task_struct *save_to);
+extern void __fstate_restore(struct task_struct *restore_from);
+
+static inline void __fstate_clean(struct pt_regs *regs)
+{
+	regs->sstatus |= (regs->sstatus & ~(SR_FS)) | SR_FS_CLEAN;
+}
+
+static inline void fstate_save(struct task_struct *task,
+			       struct pt_regs *regs)
+{
+	if ((regs->sstatus & SR_FS) == SR_FS_DIRTY) {
+		__fstate_save(task);
+		__fstate_clean(regs);
+	}
+}
+
+static inline void fstate_restore(struct task_struct *task,
+				  struct pt_regs *regs)
+{
+	if ((regs->sstatus & SR_FS) != SR_FS_OFF) {
+		__fstate_restore(task);
+		__fstate_clean(regs);
+	}
+}
+
+static inline void __switch_to_aux(struct task_struct *prev,
+				   struct task_struct *next)
+{
+	struct pt_regs *regs;
+
+	regs = task_pt_regs(prev);
+	if (unlikely(regs->sstatus & SR_SD))
+		fstate_save(prev, regs);
+	fstate_restore(next, task_pt_regs(next));
+}
+
+extern struct task_struct *__switch_to(struct task_struct *,
+				       struct task_struct *);
+
+#define switch_to(prev, next, last)			\
+do {							\
+	struct task_struct *__prev = (prev);		\
+	struct task_struct *__next = (next);		\
+	__switch_to_aux(__prev, __next);		\
+	((last) = __switch_to(__prev, __next));		\
+} while (0)
+
+#endif /* _ASM_RISCV_SWITCH_TO_H */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
new file mode 100644
index 000000000000..f05b7d68c1ad
--- /dev/null
+++ b/arch/riscv/include/asm/thread_info.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#ifndef _ASM_RISCV_THREAD_INFO_H
+#define _ASM_RISCV_THREAD_INFO_H
+
+#ifdef __KERNEL__
+
+#include <asm/page.h>
+#include <linux/const.h>
+
+/* thread information allocation */
+#define THREAD_SIZE_ORDER	(1)
+#define THREAD_SIZE		(PAGE_SIZE << THREAD_SIZE_ORDER)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/processor.h>
+#include <asm/csr.h>
+
+typedef unsigned long mm_segment_t;
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - if the members of this struct changes, the assembly constants
+ *   in asm-offsets.c must be updated accordingly
+ * - thread_info is included in task_struct at an offset of 0.  This means that
+ *   tp points to both thread_info and task_struct.
+ */
+struct thread_info {
+	unsigned long		flags;		/* low level flags */
+	int                     preempt_count;  /* 0=>preemptible, <0=>BUG */
+	mm_segment_t		addr_limit;
+	/* These stack pointers are overwritten on every system call or
+	 * exception.  SP is also saved to the stack it can be recovered when
+	 * overwritten.
+	 */
+	long			kernel_sp;	/* Kernel stack pointer */
+	long			user_sp;	/* User stack pointer */
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk)			\
+{						\
+	.flags		= 0,			\
+	.preempt_count	= INIT_PREEMPT_COUNT,	\
+	.addr_limit	= KERNEL_DS,		\
+}
+
+#define init_stack		(init_thread_union.stack)
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ *   access
+ * - pending work-to-be-done flags are in lowest half-word
+ * - other flags in upper half-word(s)
+ */
+#define TIF_SYSCALL_TRACE	0	/* syscall trace active */
+#define TIF_NOTIFY_RESUME	1	/* callback before returning to user */
+#define TIF_SIGPENDING		2	/* signal pending */
+#define TIF_NEED_RESCHED	3	/* rescheduling necessary */
+#define TIF_RESTORE_SIGMASK	4	/* restore signal mask in do_signal() */
+#define TIF_MEMDIE		5	/* is terminating due to OOM killer */
+#define TIF_SYSCALL_TRACEPOINT  6       /* syscall tracepoint instrumentation */
+
+#define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
+
+#define _TIF_WORK_MASK \
+	(_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED)
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
new file mode 100644
index 000000000000..2ead5037528c
--- /dev/null
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -0,0 +1,316 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/kbuild.h>
+#include <linux/sched.h>
+#include <asm/thread_info.h>
+#include <asm/ptrace.h>
+
+void asm_offsets(void)
+{
+	OFFSET(TASK_THREAD_RA, task_struct, thread.ra);
+	OFFSET(TASK_THREAD_SP, task_struct, thread.sp);
+	OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]);
+	OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]);
+	OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]);
+	OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]);
+	OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]);
+	OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]);
+	OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]);
+	OFFSET(TASK_THREAD_S7, task_struct, thread.s[7]);
+	OFFSET(TASK_THREAD_S8, task_struct, thread.s[8]);
+	OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]);
+	OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]);
+	OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
+	OFFSET(TASK_THREAD_SP, task_struct, thread.sp);
+	OFFSET(TASK_STACK, task_struct, stack);
+	OFFSET(TASK_TI, task_struct, thread_info);
+	OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
+	OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
+	OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
+
+	OFFSET(TASK_THREAD_F0,  task_struct, thread.fstate.f[0]);
+	OFFSET(TASK_THREAD_F1,  task_struct, thread.fstate.f[1]);
+	OFFSET(TASK_THREAD_F2,  task_struct, thread.fstate.f[2]);
+	OFFSET(TASK_THREAD_F3,  task_struct, thread.fstate.f[3]);
+	OFFSET(TASK_THREAD_F4,  task_struct, thread.fstate.f[4]);
+	OFFSET(TASK_THREAD_F5,  task_struct, thread.fstate.f[5]);
+	OFFSET(TASK_THREAD_F6,  task_struct, thread.fstate.f[6]);
+	OFFSET(TASK_THREAD_F7,  task_struct, thread.fstate.f[7]);
+	OFFSET(TASK_THREAD_F8,  task_struct, thread.fstate.f[8]);
+	OFFSET(TASK_THREAD_F9,  task_struct, thread.fstate.f[9]);
+	OFFSET(TASK_THREAD_F10, task_struct, thread.fstate.f[10]);
+	OFFSET(TASK_THREAD_F11, task_struct, thread.fstate.f[11]);
+	OFFSET(TASK_THREAD_F12, task_struct, thread.fstate.f[12]);
+	OFFSET(TASK_THREAD_F13, task_struct, thread.fstate.f[13]);
+	OFFSET(TASK_THREAD_F14, task_struct, thread.fstate.f[14]);
+	OFFSET(TASK_THREAD_F15, task_struct, thread.fstate.f[15]);
+	OFFSET(TASK_THREAD_F16, task_struct, thread.fstate.f[16]);
+	OFFSET(TASK_THREAD_F17, task_struct, thread.fstate.f[17]);
+	OFFSET(TASK_THREAD_F18, task_struct, thread.fstate.f[18]);
+	OFFSET(TASK_THREAD_F19, task_struct, thread.fstate.f[19]);
+	OFFSET(TASK_THREAD_F20, task_struct, thread.fstate.f[20]);
+	OFFSET(TASK_THREAD_F21, task_struct, thread.fstate.f[21]);
+	OFFSET(TASK_THREAD_F22, task_struct, thread.fstate.f[22]);
+	OFFSET(TASK_THREAD_F23, task_struct, thread.fstate.f[23]);
+	OFFSET(TASK_THREAD_F24, task_struct, thread.fstate.f[24]);
+	OFFSET(TASK_THREAD_F25, task_struct, thread.fstate.f[25]);
+	OFFSET(TASK_THREAD_F26, task_struct, thread.fstate.f[26]);
+	OFFSET(TASK_THREAD_F27, task_struct, thread.fstate.f[27]);
+	OFFSET(TASK_THREAD_F28, task_struct, thread.fstate.f[28]);
+	OFFSET(TASK_THREAD_F29, task_struct, thread.fstate.f[29]);
+	OFFSET(TASK_THREAD_F30, task_struct, thread.fstate.f[30]);
+	OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]);
+	OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr);
+
+	DEFINE(PT_SIZE, sizeof(struct pt_regs));
+	OFFSET(PT_SEPC, pt_regs, sepc);
+	OFFSET(PT_RA, pt_regs, ra);
+	OFFSET(PT_FP, pt_regs, s0);
+	OFFSET(PT_S0, pt_regs, s0);
+	OFFSET(PT_S1, pt_regs, s1);
+	OFFSET(PT_S2, pt_regs, s2);
+	OFFSET(PT_S3, pt_regs, s3);
+	OFFSET(PT_S4, pt_regs, s4);
+	OFFSET(PT_S5, pt_regs, s5);
+	OFFSET(PT_S6, pt_regs, s6);
+	OFFSET(PT_S7, pt_regs, s7);
+	OFFSET(PT_S8, pt_regs, s8);
+	OFFSET(PT_S9, pt_regs, s9);
+	OFFSET(PT_S10, pt_regs, s10);
+	OFFSET(PT_S11, pt_regs, s11);
+	OFFSET(PT_SP, pt_regs, sp);
+	OFFSET(PT_TP, pt_regs, tp);
+	OFFSET(PT_A0, pt_regs, a0);
+	OFFSET(PT_A1, pt_regs, a1);
+	OFFSET(PT_A2, pt_regs, a2);
+	OFFSET(PT_A3, pt_regs, a3);
+	OFFSET(PT_A4, pt_regs, a4);
+	OFFSET(PT_A5, pt_regs, a5);
+	OFFSET(PT_A6, pt_regs, a6);
+	OFFSET(PT_A7, pt_regs, a7);
+	OFFSET(PT_T0, pt_regs, t0);
+	OFFSET(PT_T1, pt_regs, t1);
+	OFFSET(PT_T2, pt_regs, t2);
+	OFFSET(PT_T3, pt_regs, t3);
+	OFFSET(PT_T4, pt_regs, t4);
+	OFFSET(PT_T5, pt_regs, t5);
+	OFFSET(PT_T6, pt_regs, t6);
+	OFFSET(PT_GP, pt_regs, gp);
+	OFFSET(PT_SSTATUS, pt_regs, sstatus);
+	OFFSET(PT_SBADADDR, pt_regs, sbadaddr);
+	OFFSET(PT_SCAUSE, pt_regs, scause);
+
+	/* THREAD_{F,X}* might be larger than a S-type offset can handle, but
+	 * these are used in performance-sensitive assembly so we can't resort
+	 * to loading the long immediate every time.
+	 */
+	DEFINE(TASK_THREAD_RA_RA,
+		  offsetof(struct task_struct, thread.ra)
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_SP_RA,
+		  offsetof(struct task_struct, thread.sp)
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S0_RA,
+		  offsetof(struct task_struct, thread.s[0])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S1_RA,
+		  offsetof(struct task_struct, thread.s[1])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S2_RA,
+		  offsetof(struct task_struct, thread.s[2])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S3_RA,
+		  offsetof(struct task_struct, thread.s[3])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S4_RA,
+		  offsetof(struct task_struct, thread.s[4])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S5_RA,
+		  offsetof(struct task_struct, thread.s[5])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S6_RA,
+		  offsetof(struct task_struct, thread.s[6])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S7_RA,
+		  offsetof(struct task_struct, thread.s[7])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S8_RA,
+		  offsetof(struct task_struct, thread.s[8])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S9_RA,
+		  offsetof(struct task_struct, thread.s[9])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S10_RA,
+		  offsetof(struct task_struct, thread.s[10])
+		- offsetof(struct task_struct, thread.ra)
+	);
+	DEFINE(TASK_THREAD_S11_RA,
+		  offsetof(struct task_struct, thread.s[11])
+		- offsetof(struct task_struct, thread.ra)
+	);
+
+	DEFINE(TASK_THREAD_F0_F0,
+		  offsetof(struct task_struct, thread.fstate.f[0])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F1_F0,
+		  offsetof(struct task_struct, thread.fstate.f[1])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F2_F0,
+		  offsetof(struct task_struct, thread.fstate.f[2])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F3_F0,
+		  offsetof(struct task_struct, thread.fstate.f[3])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F4_F0,
+		  offsetof(struct task_struct, thread.fstate.f[4])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F5_F0,
+		  offsetof(struct task_struct, thread.fstate.f[5])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F6_F0,
+		  offsetof(struct task_struct, thread.fstate.f[6])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F7_F0,
+		  offsetof(struct task_struct, thread.fstate.f[7])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F8_F0,
+		  offsetof(struct task_struct, thread.fstate.f[8])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F9_F0,
+		  offsetof(struct task_struct, thread.fstate.f[9])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F10_F0,
+		  offsetof(struct task_struct, thread.fstate.f[10])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F11_F0,
+		  offsetof(struct task_struct, thread.fstate.f[11])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F12_F0,
+		  offsetof(struct task_struct, thread.fstate.f[12])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F13_F0,
+		  offsetof(struct task_struct, thread.fstate.f[13])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F14_F0,
+		  offsetof(struct task_struct, thread.fstate.f[14])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F15_F0,
+		  offsetof(struct task_struct, thread.fstate.f[15])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F16_F0,
+		  offsetof(struct task_struct, thread.fstate.f[16])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F17_F0,
+		  offsetof(struct task_struct, thread.fstate.f[17])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F18_F0,
+		  offsetof(struct task_struct, thread.fstate.f[18])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F19_F0,
+		  offsetof(struct task_struct, thread.fstate.f[19])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F20_F0,
+		  offsetof(struct task_struct, thread.fstate.f[20])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F21_F0,
+		  offsetof(struct task_struct, thread.fstate.f[21])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F22_F0,
+		  offsetof(struct task_struct, thread.fstate.f[22])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F23_F0,
+		  offsetof(struct task_struct, thread.fstate.f[23])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F24_F0,
+		  offsetof(struct task_struct, thread.fstate.f[24])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F25_F0,
+		  offsetof(struct task_struct, thread.fstate.f[25])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F26_F0,
+		  offsetof(struct task_struct, thread.fstate.f[26])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F27_F0,
+		  offsetof(struct task_struct, thread.fstate.f[27])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F28_F0,
+		  offsetof(struct task_struct, thread.fstate.f[28])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F29_F0,
+		  offsetof(struct task_struct, thread.fstate.f[29])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F30_F0,
+		  offsetof(struct task_struct, thread.fstate.f[30])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_F31_F0,
+		  offsetof(struct task_struct, thread.fstate.f[31])
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+	DEFINE(TASK_THREAD_FCSR_F0,
+		  offsetof(struct task_struct, thread.fstate.fcsr)
+		- offsetof(struct task_struct, thread.fstate.f[0])
+	);
+
+	/* The assembler needs access to THREAD_SIZE as well. */
+	DEFINE(ASM_THREAD_SIZE, THREAD_SIZE);
+
+	/* We allocate a pt_regs on the stack when entering the kernel.  This
+	 * ensures the alignment is sane.
+	 */
+	DEFINE(PT_SIZE_ON_STACK, ALIGN(sizeof(struct pt_regs), STACK_ALIGN));
+}
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
new file mode 100644
index 000000000000..aa4593c2eaef
--- /dev/null
+++ b/arch/riscv/kernel/entry.S
@@ -0,0 +1,436 @@
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ *   This program is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License
+ *   as published by the Free Software Foundation, version 2.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+
+#include <asm/asm.h>
+#include <asm/csr.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+
+	.text
+	.altmacro
+
+/* Prepares to enter a system call or exception by saving all registers to the
+ * stack.
+ */
+	.macro SAVE_ALL
+	LOCAL _restore_kernel_tpsp
+	LOCAL _save_context
+
+	/* If coming from userspace, preserve the user thread pointer and load
+	   the kernel thread pointer.  If we came from the kernel, sscratch
+	   will contain 0, and we should continue on the current TP. */
+	csrrw tp, sscratch, tp
+	bnez tp, _save_context
+
+_restore_kernel_tpsp:
+	csrr tp, sscratch
+	REG_S sp, TASK_TI_KERNEL_SP(tp)
+_save_context:
+	REG_S sp, TASK_TI_USER_SP(tp)
+	REG_L sp, TASK_TI_KERNEL_SP(tp)
+	addi sp, sp, -(PT_SIZE_ON_STACK)
+	REG_S x1,  PT_RA(sp)
+	REG_S x3,  PT_GP(sp)
+	REG_S x5,  PT_T0(sp)
+	REG_S x6,  PT_T1(sp)
+	REG_S x7,  PT_T2(sp)
+	REG_S x8,  PT_S0(sp)
+	REG_S x9,  PT_S1(sp)
+	REG_S x10, PT_A0(sp)
+	REG_S x11, PT_A1(sp)
+	REG_S x12, PT_A2(sp)
+	REG_S x13, PT_A3(sp)
+	REG_S x14, PT_A4(sp)
+	REG_S x15, PT_A5(sp)
+	REG_S x16, PT_A6(sp)
+	REG_S x17, PT_A7(sp)
+	REG_S x18, PT_S2(sp)
+	REG_S x19, PT_S3(sp)
+	REG_S x20, PT_S4(sp)
+	REG_S x21, PT_S5(sp)
+	REG_S x22, PT_S6(sp)
+	REG_S x23, PT_S7(sp)
+	REG_S x24, PT_S8(sp)
+	REG_S x25, PT_S9(sp)
+	REG_S x26, PT_S10(sp)
+	REG_S x27, PT_S11(sp)
+	REG_S x28, PT_T3(sp)
+	REG_S x29, PT_T4(sp)
+	REG_S x30, PT_T5(sp)
+	REG_S x31, PT_T6(sp)
+
+	/* Disable FPU to detect illegal usage of
+	   floating point in kernel space */
+	li t0, SR_FS
+
+	REG_L s0, TASK_TI_USER_SP(tp)
+	csrrc s1, sstatus, t0
+	csrr s2, sepc
+	csrr s3, sbadaddr
+	csrr s4, scause
+	csrr s5, sscratch
+	REG_S s0, PT_SP(sp)
+	REG_S s1, PT_SSTATUS(sp)
+	REG_S s2, PT_SEPC(sp)
+	REG_S s3, PT_SBADADDR(sp)
+	REG_S s4, PT_SCAUSE(sp)
+	REG_S s5, PT_TP(sp)
+	.endm
+
+/* Prepares to return from a system call or exception by restoring all
+ * registers from the stack.
+ */
+	.macro RESTORE_ALL
+	REG_L a0, PT_SSTATUS(sp)
+	REG_L a2, PT_SEPC(sp)
+	csrw sstatus, a0
+	csrw sepc, a2
+
+	REG_L x1,  PT_RA(sp)
+	REG_L x3,  PT_GP(sp)
+	REG_L x4,  PT_TP(sp)
+	REG_L x5,  PT_T0(sp)
+	REG_L x6,  PT_T1(sp)
+	REG_L x7,  PT_T2(sp)
+	REG_L x8,  PT_S0(sp)
+	REG_L x9,  PT_S1(sp)
+	REG_L x10, PT_A0(sp)
+	REG_L x11, PT_A1(sp)
+	REG_L x12, PT_A2(sp)
+	REG_L x13, PT_A3(sp)
+	REG_L x14, PT_A4(sp)
+	REG_L x15, PT_A5(sp)
+	REG_L x16, PT_A6(sp)
+	REG_L x17, PT_A7(sp)
+	REG_L x18, PT_S2(sp)
+	REG_L x19, PT_S3(sp)
+	REG_L x20, PT_S4(sp)
+	REG_L x21, PT_S5(sp)
+	REG_L x22, PT_S6(sp)
+	REG_L x23, PT_S7(sp)
+	REG_L x24, PT_S8(sp)
+	REG_L x25, PT_S9(sp)
+	REG_L x26, PT_S10(sp)
+	REG_L x27, PT_S11(sp)
+	REG_L x28, PT_T3(sp)
+	REG_L x29, PT_T4(sp)
+	REG_L x30, PT_T5(sp)
+	REG_L x31, PT_T6(sp)
+
+	REG_L x2,  PT_SP(sp)
+	.endm
+
+ENTRY(handle_exception)
+	SAVE_ALL
+
+	/* Set sscratch register to 0, so that if a recursive exception
+	   occurs, the exception vector knows it came from the kernel */
+	csrw sscratch, x0
+
+	la gp, __global_pointer$
+
+	la ra, ret_from_exception
+	/* MSB of cause differentiates between
+	   interrupts and exceptions */
+	bge s4, zero, 1f
+
+	/* Handle interrupts */
+	slli a0, s4, 1
+	srli a0, a0, 1
+	move a1, sp /* pt_regs */
+	tail do_IRQ
+1:
+	/* Handle syscalls */
+	li t0, EXC_SYSCALL
+	beq s4, t0, handle_syscall
+
+	/* Handle other exceptions */
+	slli t0, s4, RISCV_LGPTR
+	la t1, excp_vect_table
+	la t2, excp_vect_table_end
+	move a0, sp /* pt_regs */
+	add t0, t1, t0
+	/* Check if exception code lies within bounds */
+	bgeu t0, t2, 1f
+	REG_L t0, 0(t0)
+	jr t0
+1:
+	tail do_trap_unknown
+
+handle_syscall:
+	/* Advance SEPC to avoid executing the original
+	   scall instruction on sret */
+	addi s2, s2, 0x4
+	REG_S s2, PT_SEPC(sp)
+	/* System calls run with interrupts enabled */
+	csrs sstatus, SR_IE
+	/* Trace syscalls, but only if requested by the user. */
+	REG_L t0, TASK_TI_FLAGS(tp)
+	andi t0, t0, _TIF_SYSCALL_TRACE
+	bnez t0, handle_syscall_trace_enter
+check_syscall_nr:
+	/* Check to make sure we don't jump to a bogus syscall number. */
+	li t0, __NR_syscalls
+	la s0, sys_ni_syscall
+	/* Syscall number held in a7 */
+	bgeu a7, t0, 1f
+	la s0, sys_call_table
+	slli t0, a7, RISCV_LGPTR
+	add s0, s0, t0
+	REG_L s0, 0(s0)
+1:
+	jalr s0
+
+ret_from_syscall:
+	/* Set user a0 to kernel a0 */
+	REG_S a0, PT_A0(sp)
+	/* Trace syscalls, but only if requested by the user. */
+	REG_L t0, TASK_TI_FLAGS(tp)
+	andi t0, t0, _TIF_SYSCALL_TRACE
+	bnez t0, handle_syscall_trace_exit
+
+ret_from_exception:
+	REG_L s0, PT_SSTATUS(sp)
+	csrc sstatus, SR_IE
+	andi s0, s0, SR_PS
+	bnez s0, restore_all
+
+resume_userspace:
+	/* Interrupts must be disabled here so flags are checked atomically */
+	REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
+	andi s1, s0, _TIF_WORK_MASK
+	bnez s1, work_pending
+
+	/* Save unwound kernel stack pointer in thread_info */
+	addi s0, sp, PT_SIZE_ON_STACK
+	REG_S s0, TASK_TI_KERNEL_SP(tp)
+
+	/* Save TP into sscratch, so we can find the kernel data structures
+	 * again. */
+	csrw sscratch, tp
+
+restore_all:
+	RESTORE_ALL
+	sret
+
+work_pending:
+	/* Enter slow path for supplementary processing */
+	la ra, ret_from_exception
+	andi s1, s0, _TIF_NEED_RESCHED
+	bnez s1, work_resched
+work_notifysig:
+	/* Handle pending signals and notify-resume requests */
+	csrs sstatus, SR_IE /* Enable interrupts for do_notify_resume() */
+	move a0, sp /* pt_regs */
+	move a1, s0 /* current_thread_info->flags */
+	tail do_notify_resume
+work_resched:
+	tail schedule
+
+/* Slow paths for ptrace. */
+handle_syscall_trace_enter:
+	move a0, sp
+	call do_syscall_trace_enter
+	REG_L a0, PT_A0(sp)
+	REG_L a1, PT_A1(sp)
+	REG_L a2, PT_A2(sp)
+	REG_L a3, PT_A3(sp)
+	REG_L a4, PT_A4(sp)
+	REG_L a5, PT_A5(sp)
+	REG_L a6, PT_A6(sp)
+	REG_L a7, PT_A7(sp)
+	j check_syscall_nr
+handle_syscall_trace_exit:
+	move a0, sp
+	call do_syscall_trace_exit
+	j ret_from_exception
+
+END(handle_exception)
+
+ENTRY(ret_from_fork)
+	la ra, ret_from_exception
+	tail schedule_tail
+ENDPROC(ret_from_fork)
+
+ENTRY(ret_from_kernel_thread)
+	call schedule_tail
+	/* Call fn(arg) */
+	la ra, ret_from_exception
+	move a0, s1
+	jr s0
+ENDPROC(ret_from_kernel_thread)
+
+
+/*
+ * Integer register context switch
+ * The callee-saved registers must be saved and restored.
+ *
+ *   a0: previous task_struct (must be preserved across the switch)
+ *   a1: next task_struct
+ */
+ENTRY(__switch_to)
+	/* Save context into prev->thread */
+	li    a2,  TASK_THREAD_RA
+	add   a0, a0, a2
+	add   a2, a1, a2
+	REG_S ra,  TASK_THREAD_RA_RA(a0)
+	REG_S sp,  TASK_THREAD_SP_RA(a0)
+	REG_S s0,  TASK_THREAD_S0_RA(a0)
+	REG_S s1,  TASK_THREAD_S1_RA(a0)
+	REG_S s2,  TASK_THREAD_S2_RA(a0)
+	REG_S s3,  TASK_THREAD_S3_RA(a0)
+	REG_S s4,  TASK_THREAD_S4_RA(a0)
+	REG_S s5,  TASK_THREAD_S5_RA(a0)
+	REG_S s6,  TASK_THREAD_S6_RA(a0)
+	REG_S s7,  TASK_THREAD_S7_RA(a0)
+	REG_S s8,  TASK_THREAD_S8_RA(a0)
+	REG_S s9,  TASK_THREAD_S9_RA(a0)
+	REG_S s10, TASK_THREAD_S10_RA(a0)
+	REG_S s11, TASK_THREAD_S11_RA(a0)
+	/* Restore context from next->thread */
+	REG_L ra,  TASK_THREAD_RA_RA(a2)
+	REG_L sp,  TASK_THREAD_SP_RA(a2)
+	REG_L s0,  TASK_THREAD_S0_RA(a2)
+	REG_L s1,  TASK_THREAD_S1_RA(a2)
+	REG_L s2,  TASK_THREAD_S2_RA(a2)
+	REG_L s3,  TASK_THREAD_S3_RA(a2)
+	REG_L s4,  TASK_THREAD_S4_RA(a2)
+	REG_L s5,  TASK_THREAD_S5_RA(a2)
+	REG_L s6,  TASK_THREAD_S6_RA(a2)
+	REG_L s7,  TASK_THREAD_S7_RA(a2)
+	REG_L s8,  TASK_THREAD_S8_RA(a2)
+	REG_L s9,  TASK_THREAD_S9_RA(a2)
+	REG_L s10, TASK_THREAD_S10_RA(a2)
+	REG_L s11, TASK_THREAD_S11_RA(a2)
+#if TASK_TI != 0
+#error "TASK_TI != 0: tp will contain a 'struct thread_info', not a 'struct task_struct' so get_current() won't work."
+	addi tp, a1, TASK_TI
+#else
+	move tp, a1
+#endif
+	ret
+ENDPROC(__switch_to)
+
+ENTRY(__fstate_save)
+	li  a2,  TASK_THREAD_F0
+	add a0, a0, a2
+	li t1, SR_FS
+	csrs sstatus, t1
+	frcsr t0
+	fsd f0,  TASK_THREAD_F0_F0(a0)
+	fsd f1,  TASK_THREAD_F1_F0(a0)
+	fsd f2,  TASK_THREAD_F2_F0(a0)
+	fsd f3,  TASK_THREAD_F3_F0(a0)
+	fsd f4,  TASK_THREAD_F4_F0(a0)
+	fsd f5,  TASK_THREAD_F5_F0(a0)
+	fsd f6,  TASK_THREAD_F6_F0(a0)
+	fsd f7,  TASK_THREAD_F7_F0(a0)
+	fsd f8,  TASK_THREAD_F8_F0(a0)
+	fsd f9,  TASK_THREAD_F9_F0(a0)
+	fsd f10, TASK_THREAD_F10_F0(a0)
+	fsd f11, TASK_THREAD_F11_F0(a0)
+	fsd f12, TASK_THREAD_F12_F0(a0)
+	fsd f13, TASK_THREAD_F13_F0(a0)
+	fsd f14, TASK_THREAD_F14_F0(a0)
+	fsd f15, TASK_THREAD_F15_F0(a0)
+	fsd f16, TASK_THREAD_F16_F0(a0)
+	fsd f17, TASK_THREAD_F17_F0(a0)
+	fsd f18, TASK_THREAD_F18_F0(a0)
+	fsd f19, TASK_THREAD_F19_F0(a0)
+	fsd f20, TASK_THREAD_F20_F0(a0)
+	fsd f21, TASK_THREAD_F21_F0(a0)
+	fsd f22, TASK_THREAD_F22_F0(a0)
+	fsd f23, TASK_THREAD_F23_F0(a0)
+	fsd f24, TASK_THREAD_F24_F0(a0)
+	fsd f25, TASK_THREAD_F25_F0(a0)
+	fsd f26, TASK_THREAD_F26_F0(a0)
+	fsd f27, TASK_THREAD_F27_F0(a0)
+	fsd f28, TASK_THREAD_F28_F0(a0)
+	fsd f29, TASK_THREAD_F29_F0(a0)
+	fsd f30, TASK_THREAD_F30_F0(a0)
+	fsd f31, TASK_THREAD_F31_F0(a0)
+	sw t0, TASK_THREAD_FCSR_F0(a0)
+	csrc sstatus, t1
+	ret
+ENDPROC(__fstate_save)
+
+ENTRY(__fstate_restore)
+	li  a2,  TASK_THREAD_F0
+	add a0, a0, a2
+	li t1, SR_FS
+	lw t0, TASK_THREAD_FCSR_F0(a0)
+	csrs sstatus, t1
+	fld f0,  TASK_THREAD_F0_F0(a0)
+	fld f1,  TASK_THREAD_F1_F0(a0)
+	fld f2,  TASK_THREAD_F2_F0(a0)
+	fld f3,  TASK_THREAD_F3_F0(a0)
+	fld f4,  TASK_THREAD_F4_F0(a0)
+	fld f5,  TASK_THREAD_F5_F0(a0)
+	fld f6,  TASK_THREAD_F6_F0(a0)
+	fld f7,  TASK_THREAD_F7_F0(a0)
+	fld f8,  TASK_THREAD_F8_F0(a0)
+	fld f9,  TASK_THREAD_F9_F0(a0)
+	fld f10, TASK_THREAD_F10_F0(a0)
+	fld f11, TASK_THREAD_F11_F0(a0)
+	fld f12, TASK_THREAD_F12_F0(a0)
+	fld f13, TASK_THREAD_F13_F0(a0)
+	fld f14, TASK_THREAD_F14_F0(a0)
+	fld f15, TASK_THREAD_F15_F0(a0)
+	fld f16, TASK_THREAD_F16_F0(a0)
+	fld f17, TASK_THREAD_F17_F0(a0)
+	fld f18, TASK_THREAD_F18_F0(a0)
+	fld f19, TASK_THREAD_F19_F0(a0)
+	fld f20, TASK_THREAD_F20_F0(a0)
+	fld f21, TASK_THREAD_F21_F0(a0)
+	fld f22, TASK_THREAD_F22_F0(a0)
+	fld f23, TASK_THREAD_F23_F0(a0)
+	fld f24, TASK_THREAD_F24_F0(a0)
+	fld f25, TASK_THREAD_F25_F0(a0)
+	fld f26, TASK_THREAD_F26_F0(a0)
+	fld f27, TASK_THREAD_F27_F0(a0)
+	fld f28, TASK_THREAD_F28_F0(a0)
+	fld f29, TASK_THREAD_F29_F0(a0)
+	fld f30, TASK_THREAD_F30_F0(a0)
+	fld f31, TASK_THREAD_F31_F0(a0)
+	fscsr t0
+	csrc sstatus, t1
+	ret
+ENDPROC(__fstate_restore)
+
+
+	.section ".rodata"
+	/* Exception vector table */
+ENTRY(excp_vect_table)
+	RISCV_PTR do_trap_insn_misaligned
+	RISCV_PTR do_trap_insn_fault
+	RISCV_PTR do_trap_insn_illegal
+	RISCV_PTR do_trap_break
+	RISCV_PTR do_trap_load_misaligned
+	RISCV_PTR do_trap_load_fault
+	RISCV_PTR do_trap_store_misaligned
+	RISCV_PTR do_trap_store_fault
+	RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
+	RISCV_PTR do_trap_ecall_s
+	RISCV_PTR do_trap_unknown
+	RISCV_PTR do_trap_ecall_m
+	RISCV_PTR do_page_fault   /* instruction page fault */
+	RISCV_PTR do_page_fault   /* load page fault */
+	RISCV_PTR do_trap_unknown
+	RISCV_PTR do_page_fault   /* store page fault */
+excp_vect_table_end:
+END(excp_vect_table)
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
new file mode 100644
index 000000000000..b13d3ea3bf79
--- /dev/null
+++ b/arch/riscv/kernel/process.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
+ *  Chen Liqin <liqin.chen@sunplusct.com>
+ *  Lennox Wu <lennox.wu@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see the file COPYING, or write
+ * to the Free Software Foundation, Inc.,
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/tick.h>
+#include <linux/ptrace.h>
+
+#include <asm/unistd.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include <asm/csr.h>
+#include <asm/string.h>
+#include <asm/switch_to.h>
+
+extern asmlinkage void ret_from_fork(void);
+extern asmlinkage void ret_from_kernel_thread(void);
+
+void arch_cpu_idle(void)
+{
+	wait_for_interrupt();
+	local_irq_enable();
+}
+
+void show_regs(struct pt_regs *regs)
+{
+	show_regs_print_info(KERN_DEFAULT);
+
+	printk(KERN_CONT "sepc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
+		regs->sepc, regs->ra, regs->sp);
+	printk(KERN_CONT " gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
+		regs->gp, regs->tp, regs->t0);
+	printk(KERN_CONT " t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
+		regs->t1, regs->t2, regs->s0);
+	printk(KERN_CONT " s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n",
+		regs->s1, regs->a0, regs->a1);
+	printk(KERN_CONT " a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n",
+		regs->a2, regs->a3, regs->a4);
+	printk(KERN_CONT " a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n",
+		regs->a5, regs->a6, regs->a7);
+	printk(KERN_CONT " s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n",
+		regs->s2, regs->s3, regs->s4);
+	printk(KERN_CONT " s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n",
+		regs->s5, regs->s6, regs->s7);
+	printk(KERN_CONT " s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n",
+		regs->s8, regs->s9, regs->s10);
+	printk(KERN_CONT " s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n",
+		regs->s11, regs->t3, regs->t4);
+	printk(KERN_CONT " t5 : " REG_FMT " t6 : " REG_FMT "\n",
+		regs->t5, regs->t6);
+
+	printk(KERN_CONT "sstatus: " REG_FMT " sbadaddr: " REG_FMT " scause: " REG_FMT "\n",
+		regs->sstatus, regs->sbadaddr, regs->scause);
+}
+
+void start_thread(struct pt_regs *regs, unsigned long pc,
+	unsigned long sp)
+{
+	regs->sstatus = SR_PIE /* User mode, irqs on */ | SR_FS_INITIAL;
+#ifndef CONFIG_RV_PUM
+	regs->sstatus |= SR_SUM;
+#endif
+	regs->sepc = pc;
+	regs->sp = sp;
+	set_fs(USER_DS);
+}
+
+void flush_thread(void)
+{
+	/* Reset FPU context
+	 *	frm: round to nearest, ties to even (IEEE default)
+	 *	fflags: accrued exceptions cleared
+	 */
+	memset(&current->thread.fstate, 0, sizeof(current->thread.fstate));
+}
+
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+	fstate_save(src, task_pt_regs(src));
+	*dst = *src;
+	return 0;
+}
+
+int copy_thread(unsigned long clone_flags, unsigned long usp,
+	unsigned long arg, struct task_struct *p)
+{
+	struct pt_regs *childregs = task_pt_regs(p);
+
+	/* p->thread holds context to be restored by __switch_to() */
+	if (unlikely(p->flags & PF_KTHREAD)) {
+		/* Kernel thread */
+		const register unsigned long gp __asm__ ("gp");
+		memset(childregs, 0, sizeof(struct pt_regs));
+		childregs->gp = gp;
+		childregs->sstatus = SR_PS | SR_PIE; /* Supervisor, irqs on */
+
+		p->thread.ra = (unsigned long)ret_from_kernel_thread;
+		p->thread.s[0] = usp; /* fn */
+		p->thread.s[1] = arg;
+	} else {
+		*childregs = *(current_pt_regs());
+		if (usp) /* User fork */
+			childregs->sp = usp;
+		if (clone_flags & CLONE_SETTLS)
+			childregs->tp = childregs->a5;
+		childregs->a0 = 0; /* Return value of fork() */
+		p->thread.ra = (unsigned long)ret_from_fork;
+	}
+	p->thread.sp = (unsigned long)childregs; /* kernel sp */
+	return 0;
+}
-- 
2.13.0

  parent reply	other threads:[~2017-06-28 18:56 UTC|newest]

Thread overview: 284+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-05-23  0:41 RISC-V Linux Port v1 Palmer Dabbelt
2017-05-23  0:41 ` [PATCH 1/7] RISC-V: Top-Level Makefile for riscv{32,64} Palmer Dabbelt
2017-05-23 11:30   ` Arnd Bergmann
2017-05-27  0:57     ` Palmer Dabbelt
2017-05-29 10:50       ` Arnd Bergmann
2017-06-06  4:56         ` Palmer Dabbelt
2017-06-06 17:39           ` Karsten Merker
2017-06-06 17:57             ` Palmer Dabbelt
2017-05-23  0:41 ` [PATCH 2/7] RISC-V: arch/riscv Makefile and Kconfigs Palmer Dabbelt
2017-05-23  1:27   ` Olof Johansson
2017-05-23  1:31     ` Randy Dunlap
2017-05-23  4:49       ` Palmer Dabbelt
2017-05-23  4:49     ` Palmer Dabbelt
2017-05-23  5:16       ` [patches] " Olof Johansson
2017-05-23 21:07         ` Benjamin Herrenschmidt
2017-05-23  5:23   ` Olof Johansson
2017-05-23 15:29     ` Palmer Dabbelt
2017-05-23 10:51   ` Geert Uytterhoeven
2017-05-25  1:59     ` Palmer Dabbelt
2017-05-23 11:46   ` Arnd Bergmann
2017-05-27  0:57     ` Palmer Dabbelt
2017-05-29 11:17       ` Arnd Bergmann
2017-06-06  4:56         ` Palmer Dabbelt
2017-06-06  9:20           ` Arnd Bergmann
2017-06-06 20:38             ` Palmer Dabbelt
2017-05-23  0:41 ` [PATCH 3/7] RISC-V: Device Tree Documentation Palmer Dabbelt
2017-05-23 12:03   ` Arnd Bergmann
2017-05-27  0:57     ` Palmer Dabbelt
2017-05-23  0:41 ` [PATCH 4/7] RISC-V: arch/riscv/include Palmer Dabbelt
2017-05-23 12:55   ` Arnd Bergmann
2017-05-23 21:23     ` Benjamin Herrenschmidt
2017-06-03  2:00       ` Palmer Dabbelt
2017-06-01  0:56     ` Palmer Dabbelt
2017-06-01  9:00       ` Arnd Bergmann
2017-06-06  4:56         ` Palmer Dabbelt
2017-06-06  8:54           ` Arnd Bergmann
2017-06-06 19:07             ` Palmer Dabbelt
2017-05-23  0:41 ` [PATCH 5/7] RISC-V: arch/riscv/lib Palmer Dabbelt
2017-05-23 10:47   ` Geert Uytterhoeven
2017-05-23 22:07     ` Palmer Dabbelt
2017-05-23 11:19   ` Arnd Bergmann
2017-05-25  1:59     ` Palmer Dabbelt
2017-05-26  9:06       ` Arnd Bergmann
2017-06-06  4:56         ` Palmer Dabbelt
2017-06-06  9:31           ` Arnd Bergmann
2017-06-06 20:53             ` Palmer Dabbelt
2017-06-07  7:35               ` Arnd Bergmann
2017-06-23 23:24                 ` Palmer Dabbelt
2017-05-23  0:41 ` [PATCH 6/7] RISC-V: arch/riscv/kernel Palmer Dabbelt
2017-05-23  2:11   ` Olof Johansson
2017-05-25  1:59     ` Palmer Dabbelt
2017-05-25 19:51       ` Arnd Bergmann
2017-06-06  4:56         ` Palmer Dabbelt
2017-06-06  9:03           ` Arnd Bergmann
2017-06-06 20:38             ` Palmer Dabbelt
2017-05-23 13:35   ` Arnd Bergmann
2017-06-02 23:56     ` Palmer Dabbelt
2017-06-06  9:01       ` Arnd Bergmann
2017-06-06 20:37         ` Palmer Dabbelt
2017-05-25 17:05   ` Pavel Machek
2017-06-03  3:32     ` Palmer Dabbelt
2017-05-23  0:41 ` [PATCH 7/7] RISC-V: arch/riscv/mm Palmer Dabbelt
2017-05-23  1:28   ` Randy Dunlap
2017-05-23  2:17     ` Olof Johansson
2017-05-23  3:36       ` Palmer Dabbelt
2017-05-23  1:16 ` RISC-V Linux Port v1 Olof Johansson
2017-05-23  1:25   ` Randy Dunlap
2017-05-23  3:36     ` Palmer Dabbelt
2017-05-23  3:36   ` Palmer Dabbelt
2017-05-23  6:45     ` Tobias Klauser
2017-05-23 15:44       ` Palmer Dabbelt
2017-05-23  2:16 ` Randy Dunlap
2017-05-23  4:49   ` Palmer Dabbelt
2017-06-06 22:59 ` RISC-V Linux Port v2 Palmer Dabbelt
2017-06-06 22:59   ` Palmer Dabbelt
2017-06-06 22:59   ` [PATCH 01/17] drivers: support PCIe in RISCV Palmer Dabbelt
2017-06-06 22:59     ` Palmer Dabbelt
2017-06-07  7:17     ` Geert Uytterhoeven
2017-06-07 14:25     ` Christoph Hellwig
     [not found]       ` <CAMgXwTjXZ5dsxmJ2FyWhCRWo-3nyvKUDfhfV0nNC+oakF=AEsA@mail.gmail.com>
2017-06-07 17:40         ` Olof Johansson
2017-06-23 21:47           ` [patches] " Palmer Dabbelt
2017-06-23 21:47             ` Palmer Dabbelt
2017-06-06 22:59   ` [PATCH 02/17] pcie-xilinx: add missing 5th legacy interrupt Palmer Dabbelt
2017-06-06 22:59   ` Palmer Dabbelt
2017-06-06 22:59     ` Palmer Dabbelt
2017-06-07  7:18     ` Geert Uytterhoeven
2017-06-07  9:24     ` Marc Zyngier
2017-06-07 19:03       ` Wesley Terpstra
2017-06-06 22:59   ` [PATCH 03/17] base: fix order of OF initialization Palmer Dabbelt
2017-06-06 22:59     ` Palmer Dabbelt
2017-06-07  7:07     ` Geert Uytterhoeven
2017-06-07  9:35       ` Mark Rutland
2017-06-07  9:35         ` Mark Rutland
2017-06-07 18:39         ` Wesley Terpstra
2017-06-07 21:10           ` Benjamin Herrenschmidt
2017-06-08  3:49           ` Frank Rowand
2017-06-08  9:05             ` Mark Rutland
2017-06-08  9:05               ` Mark Rutland
2017-06-08  9:05               ` Mark Rutland
2017-06-09  0:37               ` Frank Rowand
2017-06-06 22:59   ` [PATCH 04/17] Documentation: atomic_ops.txt is core-api/atomic_ops.rst Palmer Dabbelt
2017-06-06 22:59     ` Palmer Dabbelt
2017-06-07  7:19     ` Geert Uytterhoeven
2017-06-07  9:20     ` Will Deacon
2017-06-06 22:59   ` [PATCH 05/17] MAINTAINERS: Add RISC-V Palmer Dabbelt
2017-06-06 22:59   ` Palmer Dabbelt
2017-06-06 22:59     ` Palmer Dabbelt
2017-06-06 22:59   ` [PATCH 06/17] pci: Add generic pcibios_{fixup_bus,align_resource} Palmer Dabbelt
2017-06-06 22:59     ` Palmer Dabbelt
2017-06-07  7:19     ` Geert Uytterhoeven
2017-06-07  8:01       ` Arnd Bergmann
2017-06-24  2:01         ` Palmer Dabbelt
2017-06-24  2:01           ` Palmer Dabbelt
2017-06-08  8:12       ` Christoph Hellwig
2017-06-08  8:35         ` Arnd Bergmann
2017-06-24  2:01           ` Palmer Dabbelt
2017-06-24  2:01             ` Palmer Dabbelt
2017-06-06 22:59   ` [PATCH 07/17] lib: Add shared copies of some GCC library routines Palmer Dabbelt
2017-06-06 22:59     ` Palmer Dabbelt
2017-06-06 22:59   ` Palmer Dabbelt
2017-06-06 22:59   ` [PATCH 08/17] dts: include documentation for the RISC-V interrupt controllers Palmer Dabbelt
2017-06-06 22:59     ` Palmer Dabbelt
2017-06-07  7:11     ` Geert Uytterhoeven
2017-06-07 10:13       ` Mark Rutland
2017-06-07 18:57         ` Wesley Terpstra
2017-06-07 19:57           ` Rob Herring
2017-06-07 20:31             ` Wesley Terpstra
2017-06-08 10:52           ` Mark Rutland
2017-06-09 21:46             ` Wesley Terpstra
2017-06-09 21:46               ` Wesley Terpstra
2017-06-09 21:58             ` Wesley Terpstra
2017-06-19 14:30               ` Mark Rutland
2017-06-07 22:27     ` Luis R. Rodriguez
2017-06-06 22:59   ` Palmer Dabbelt
2017-06-06 22:59   ` [PATCH 09/17] clocksource/timer-riscv: New RISC-V Clocksource Palmer Dabbelt
2017-06-06 22:59     ` Palmer Dabbelt
2017-06-07  7:12     ` Geert Uytterhoeven
2017-06-07  7:25       ` Arnd Bergmann
2017-06-23 23:24         ` Palmer Dabbelt
2017-06-23 23:24           ` Palmer Dabbelt
2017-06-07  9:43     ` Marc Zyngier
2017-06-24  2:02       ` Palmer Dabbelt
2017-06-24  2:02         ` Palmer Dabbelt
2017-06-06 23:00   ` [PATCH 10/17] irqchip: New RISC-V PLIC Driver Palmer Dabbelt
2017-06-06 23:00     ` Palmer Dabbelt
2017-06-07  7:13     ` Geert Uytterhoeven
2017-06-07  7:55       ` Arnd Bergmann
2017-06-24  0:45         ` Palmer Dabbelt
2017-06-24  0:45           ` Palmer Dabbelt
2017-06-07 10:52     ` Marc Zyngier
2017-06-09 13:47       ` Will Deacon
2017-06-27  1:09         ` Palmer Dabbelt
2017-06-27  1:09           ` Palmer Dabbelt
2017-06-25 20:49       ` Palmer Dabbelt
2017-06-25 20:49         ` Palmer Dabbelt
2017-06-06 23:00   ` Palmer Dabbelt
2017-06-06 23:00   ` [PATCH 11/17] irqchip: RISC-V Local Interrupt Controller Driver Palmer Dabbelt
2017-06-06 23:00     ` Palmer Dabbelt
2017-06-07  7:14     ` Geert Uytterhoeven
2017-06-06 23:00   ` [PATCH 12/17] tty: New RISC-V SBI Console Driver Palmer Dabbelt
2017-06-06 23:00     ` Palmer Dabbelt
2017-06-07  7:15     ` Geert Uytterhoeven
2017-06-07  7:58       ` Arnd Bergmann
2017-06-24  0:45         ` Palmer Dabbelt
2017-06-24  0:45           ` Palmer Dabbelt
2017-06-06 23:00   ` Palmer Dabbelt
2017-06-06 23:00   ` [PATCH 13/17] RISC-V: Add include subdirectory Palmer Dabbelt
2017-06-06 23:00     ` Palmer Dabbelt
2017-06-07  8:12     ` Arnd Bergmann
2017-06-24  2:01       ` Palmer Dabbelt
2017-06-24  2:01         ` Palmer Dabbelt
2017-06-24 15:42         ` Benjamin Herrenschmidt
2017-06-24 21:32           ` [patches] " Palmer Dabbelt
2017-06-24 21:32             ` Palmer Dabbelt
2017-06-25  3:01             ` Benjamin Herrenschmidt
2017-06-07 11:54     ` Peter Zijlstra
2017-06-07 12:25       ` Peter Zijlstra
2017-06-07 12:06     ` Peter Zijlstra
2017-06-07 12:18       ` Peter Zijlstra
2017-06-07 12:36       ` Peter Zijlstra
2017-06-07 12:58         ` Peter Zijlstra
2017-06-07 13:16           ` Will Deacon
2017-06-26 20:07             ` Palmer Dabbelt
2017-06-26 20:07               ` Palmer Dabbelt
2017-06-27  0:07               ` Daniel Lustig
2017-06-27  8:48                 ` Will Deacon
2017-06-07 16:35           ` Peter Zijlstra
2017-06-26 20:07           ` Palmer Dabbelt
2017-06-26 20:07             ` Palmer Dabbelt
2017-06-07 12:42     ` Peter Zijlstra
2017-06-07 13:17     ` Peter Zijlstra
2017-06-09  8:16       ` Peter Zijlstra
2017-06-26 20:07       ` Palmer Dabbelt
2017-06-26 20:07         ` Palmer Dabbelt
2017-06-06 23:00   ` [PATCH 14/17] RISC-V: lib files Palmer Dabbelt
2017-06-06 23:00     ` Palmer Dabbelt
2017-06-06 23:00   ` [PATCH 15/17] RISC-V: Add mm subdirectory Palmer Dabbelt
2017-06-06 23:00     ` Palmer Dabbelt
2017-06-06 23:00   ` [PATCH 16/17] RISC-V: Add kernel subdirectory Palmer Dabbelt
2017-06-06 23:00     ` Palmer Dabbelt
2017-06-06 23:00   ` Palmer Dabbelt
2017-06-06 23:00   ` [PATCH 17/17] RISC-V: Makefile and Kconfig Palmer Dabbelt
2017-06-06 23:00   ` Palmer Dabbelt
2017-06-06 23:00     ` Palmer Dabbelt
2017-06-07  9:23   ` RISC-V Linux Port v2 Will Deacon
2017-06-07 21:54     ` Palmer Dabbelt
2017-06-07 21:54       ` Palmer Dabbelt
2017-06-08 10:26       ` Will Deacon
2017-06-08 18:16         ` Palmer Dabbelt
2017-06-08 18:16           ` Palmer Dabbelt
2017-06-28 18:55   ` RISC-V Linux Port v3 Palmer Dabbelt
2017-06-28 18:55     ` Palmer Dabbelt
2017-06-28 18:55     ` [PATCH 1/9] RISC-V: Init and Halt Code Palmer Dabbelt
2017-06-28 18:55       ` Palmer Dabbelt
2017-06-29  9:44       ` Geert Uytterhoeven
2017-06-29  9:44         ` Geert Uytterhoeven
2017-06-29 22:52         ` Palmer Dabbelt
2017-06-29 22:52           ` Palmer Dabbelt
2017-06-28 18:55     ` Palmer Dabbelt
2017-06-28 18:55     ` [PATCH 2/9] RISC-V: Atomic and Locking Code Palmer Dabbelt
2017-06-28 18:55       ` Palmer Dabbelt
2017-06-28 18:55     ` [PATCH 3/9] RISC-V: Generic library routines and assembly Palmer Dabbelt
2017-06-28 18:55       ` Palmer Dabbelt
2017-06-28 18:55     ` [PATCH 4/9] RISC-V: ELF and module implementation Palmer Dabbelt
2017-06-28 18:55     ` Palmer Dabbelt
2017-06-28 18:55       ` Palmer Dabbelt
2017-06-28 18:55     ` Palmer Dabbelt [this message]
2017-06-28 18:55       ` [PATCH 5/9] RISC-V: Task implementation Palmer Dabbelt
2017-06-28 23:32       ` James Hogan
2017-06-28 23:32         ` James Hogan
2017-06-29 22:52         ` Palmer Dabbelt
2017-06-29 22:52           ` Palmer Dabbelt
2017-06-29  8:22       ` Tobias Klauser
2017-06-29 22:52         ` Palmer Dabbelt
2017-06-29 22:52           ` Palmer Dabbelt
2017-06-28 18:55     ` Palmer Dabbelt
2017-06-28 18:55     ` [PATCH 6/9] RISC-V: Device, timer, IRQs, and the SBI Palmer Dabbelt
2017-06-28 18:55       ` Palmer Dabbelt
2017-06-29  8:39       ` Tobias Klauser
2017-06-29 22:52         ` Palmer Dabbelt
2017-06-29 22:52           ` Palmer Dabbelt
2017-06-30  7:57           ` Tobias Klauser
2017-06-28 18:55     ` [PATCH 7/9] RISC-V: Paging and MMU Palmer Dabbelt
2017-06-28 18:55       ` Palmer Dabbelt
2017-06-28 23:09       ` James Hogan
2017-06-28 23:09         ` James Hogan
2017-06-29 22:11         ` Palmer Dabbelt
2017-06-29 22:11           ` Palmer Dabbelt
2017-06-28 18:55     ` Palmer Dabbelt
2017-06-28 18:55     ` [PATCH 8/9] RISC-V: User-facing API Palmer Dabbelt
2017-06-28 18:55       ` Palmer Dabbelt
2017-06-28 21:49       ` Thomas Gleixner
2017-06-28 21:52         ` Thomas Gleixner
2017-06-29 17:22         ` Palmer Dabbelt
2017-06-29 17:22           ` Palmer Dabbelt
2017-06-28 22:42       ` James Hogan
2017-06-28 22:42         ` James Hogan
2017-06-29 21:42         ` Palmer Dabbelt
2017-06-29 21:42           ` Palmer Dabbelt
2017-07-03 23:06           ` James Hogan
2017-07-03 23:06             ` James Hogan
2017-07-05 16:49             ` Palmer Dabbelt
2017-07-05 16:49               ` Palmer Dabbelt
2017-06-28 18:55     ` [PATCH 9/9] RISC-V: Build Infastructure Palmer Dabbelt
2017-06-28 18:55       ` Palmer Dabbelt
2017-06-28 21:05       ` Karsten Merker
2017-06-28 21:13         ` Palmer Dabbelt
2017-06-28 21:13           ` Palmer Dabbelt
2017-06-28 21:25       ` James Hogan
2017-06-28 21:25         ` James Hogan
2017-06-29 16:29         ` Palmer Dabbelt
2017-06-29 16:29           ` Palmer Dabbelt
2017-06-28 22:54       ` James Hogan
2017-06-28 22:54         ` James Hogan
2017-06-29 22:11         ` Palmer Dabbelt
2017-06-29 22:11           ` Palmer Dabbelt
2017-06-06 22:59 ` RISC-V Linux Port v2 Palmer Dabbelt
2017-06-07  7:29 ` David Howells
2017-06-07 21:54   ` Palmer Dabbelt
2017-06-07 21:54     ` Palmer Dabbelt
2017-06-07 21:54     ` Palmer Dabbelt
2017-07-04 19:50 RISC-V Linux Port v4 Palmer Dabbelt
2017-07-04 19:50 ` [PATCH 5/9] RISC-V: Task implementation Palmer Dabbelt
2017-07-04 19:50 ` Palmer Dabbelt
2017-07-04 19:50   ` Palmer Dabbelt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20170628185538.1804-6-palmer@dabbelt.com \
    --to=palmer@dabbelt.com \
    --cc=peterz@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.