All of lore.kernel.org
 help / color / mirror / Atom feed
From: Anup Patel <Anup.Patel@wdc.com>
To: Palmer Dabbelt <palmer@sifive.com>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Paolo Bonzini <pbonzini@redhat.com>, Radim K <rkrcmar@redhat.com>
Cc: Daniel Lezcano <daniel.lezcano@linaro.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Atish Patra <Atish.Patra@wdc.com>,
	Alistair Francis <Alistair.Francis@wdc.com>,
	Damien Le Moal <Damien.LeMoal@wdc.com>,
	Christoph Hellwig <hch@infradead.org>,
	Anup Patel <anup@brainfault.org>,
	"kvm@vger.kernel.org" <kvm@vger.kernel.org>,
	"linux-riscv@lists.infradead.org"
	<linux-riscv@lists.infradead.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	Anup Patel <Anup.Patel@wdc.com>
Subject: [RFC PATCH 05/16] RISC-V: KVM: Implement VCPU interrupts and requests handling
Date: Mon, 29 Jul 2019 11:56:53 +0000	[thread overview]
Message-ID: <20190729115544.17895-6-anup.patel@wdc.com> (raw)
In-Reply-To: <20190729115544.17895-1-anup.patel@wdc.com>

This patch implements VCPU interrupts and requests which are both
asynchronous events.

The VCPU interrupts can be set/unset using KVM_INTERRUPT ioctl from
user-space. In future, the in-kernel IRQCHIP emulation will use
kvm_riscv_vcpu_set_interrupt() and kvm_riscv_vcpu_unset_interrupt()
functions to set/unset VCPU interrupts.

Important VCPU requests implemented by this patch are:
KVM_REQ_IRQ_PENDING - set whenever some VCPU interrupt pending
KVM_REQ_SLEEP       - set whenever VCPU itself goes to sleep state
KVM_REQ_VCPU_RESET  - set whenever VCPU reset is requested

The WFI trap-n-emulate (added later) will use KVM_REQ_SLEEP request
and kvm_riscv_vcpu_has_interrupt() function.

The KVM_REQ_VCPU_RESET request will be used by SBI emulation (added
later) to power-up a VCPU in power-off state. The user-space can use
the GET_MPSTATE/SET_MPSTATE ioctls to get/set power state of a VCPU.

Signed-off-by: Anup Patel <anup.patel@wdc.com>
---
 arch/riscv/include/asm/kvm_host.h |  13 +++
 arch/riscv/include/uapi/asm/kvm.h |   3 +
 arch/riscv/kvm/vcpu.c             | 174 +++++++++++++++++++++++++++---
 3 files changed, 177 insertions(+), 13 deletions(-)

diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 244eabe62710..aa89f1922da1 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -125,6 +125,13 @@ struct kvm_vcpu_arch {
 	/* CPU CSR context upon Guest VCPU reset */
 	struct kvm_vcpu_csr guest_reset_csr;
 
+	/* VCPU interrupts */
+	raw_spinlock_t irqs_lock;
+	unsigned long irqs_pending;
+
+	/* VCPU power-off state */
+	bool power_off;
+
 	/* Don't run the VCPU (blocked) */
 	bool pause;
 };
@@ -146,6 +153,12 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
 
 static inline void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch) {}
 
+int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
+int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
+bool kvm_riscv_vcpu_has_interrupt(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
+
 void kvm_riscv_halt_guest(struct kvm *kvm);
 void kvm_riscv_resume_guest(struct kvm *kvm);
 
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index d15875818b6e..6dbc056d58ba 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -18,6 +18,9 @@
 
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 
+#define KVM_INTERRUPT_SET	-1U
+#define KVM_INTERRUPT_UNSET	-2U
+
 /* for KVM_GET_REGS and KVM_SET_REGS */
 struct kvm_regs {
 };
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 1ae806f28c0e..c6f57caa95f0 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -42,6 +42,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 
 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
 {
+	unsigned long f;
 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
 	struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
@@ -50,6 +51,10 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
 	memcpy(csr, reset_csr, sizeof(*csr));
 
 	memcpy(cntx, reset_cntx, sizeof(*cntx));
+
+	raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+	vcpu->arch.irqs_pending = 0;
+	raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
 }
 
 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
@@ -103,6 +108,9 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 	cntx->hstatus |= HSTATUS_SP2P;
 	cntx->hstatus |= HSTATUS_SPV;
 
+	/* Setup VCPU irqs lock */
+	raw_spin_lock_init(&vcpu->arch.irqs_lock);
+
 	/* Setup reset state of HEDELEG and HIDELEG CSRs */
 	csr = &vcpu->arch.guest_reset_csr;
 	csr->hedeleg = 0;
@@ -131,8 +139,15 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 {
-	/* TODO: */
-	return 0;
+	int ret;
+	unsigned long f, irqs;
+
+	raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+	irqs = vcpu->arch.irqs_pending & vcpu->arch.guest_csr.vsie;
+	ret = (irqs & (1UL << IRQ_S_TIMER)) ? 1 : 0;
+	raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
+
+	return ret;
 }
 
 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
@@ -145,20 +160,18 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
 
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
-	/* TODO: */
-	return 0;
+	return (kvm_riscv_vcpu_has_interrupt(vcpu) &&
+		!vcpu->arch.power_off && !vcpu->arch.pause);
 }
 
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
-	/* TODO: */
-	return 0;
+	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
 }
 
 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 {
-	/* TODO: */
-	return false;
+	return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
 }
 
 bool kvm_arch_has_vcpu_debugfs(void)
@@ -179,7 +192,21 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
 long kvm_arch_vcpu_async_ioctl(struct file *filp,
 			       unsigned int ioctl, unsigned long arg)
 {
-	/* TODO; */
+	struct kvm_vcpu *vcpu = filp->private_data;
+	void __user *argp = (void __user *)arg;
+
+	if (ioctl == KVM_INTERRUPT) {
+		struct kvm_interrupt irq;
+
+		if (copy_from_user(&irq, argp, sizeof(irq)))
+			return -EFAULT;
+
+		if (irq.irq == KVM_INTERRUPT_SET)
+			return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_S_EXT);
+		else
+			return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_S_EXT);
+	}
+
 	return -ENOIOCTLCMD;
 }
 
@@ -228,18 +255,113 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 	return -EINVAL;
 }
 
+static void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
+{
+	unsigned long f;
+
+	raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+	if (vcpu->arch.irqs_pending ^ vcpu->arch.guest_csr.vsip) {
+		csr_write(CSR_VSIP, vcpu->arch.irqs_pending);
+		vcpu->arch.guest_csr.vsip = vcpu->arch.irqs_pending;
+	}
+	raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
+}
+
+static void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.guest_csr.vsip = csr_read(CSR_VSIP);
+	vcpu->arch.guest_csr.vsie = csr_read(CSR_VSIE);
+}
+
+int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
+{
+	unsigned long f;
+
+	if (irq != IRQ_S_SOFT &&
+	    irq != IRQ_S_TIMER &&
+	    irq != IRQ_S_EXT)
+		return -EINVAL;
+
+	raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+	vcpu->arch.irqs_pending |= (1UL << irq);
+	raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
+
+	kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
+	kvm_vcpu_kick(vcpu);
+
+	return 0;
+}
+
+int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
+{
+	unsigned long f;
+
+	if (irq != IRQ_S_SOFT &&
+	    irq != IRQ_S_TIMER &&
+	    irq != IRQ_S_EXT)
+		return -EINVAL;
+
+	raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+	vcpu->arch.irqs_pending &= ~(1UL << irq);
+	raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
+
+	return 0;
+}
+
+bool kvm_riscv_vcpu_has_interrupt(struct kvm_vcpu *vcpu)
+{
+	bool ret = false;
+	unsigned long f;
+
+	raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+	if (vcpu->arch.irqs_pending & vcpu->arch.guest_csr.vsie)
+		ret = true;
+	raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
+
+	return ret;
+}
+
+void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.power_off = true;
+	kvm_make_request(KVM_REQ_SLEEP, vcpu);
+	kvm_vcpu_kick(vcpu);
+}
+
+void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.power_off = false;
+	kvm_vcpu_wake_up(vcpu);
+}
+
 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 				    struct kvm_mp_state *mp_state)
 {
-	/* TODO: */
+	if (vcpu->arch.power_off)
+		mp_state->mp_state = KVM_MP_STATE_STOPPED;
+	else
+		mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
+
 	return 0;
 }
 
 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 				    struct kvm_mp_state *mp_state)
 {
-	/* TODO: */
-	return 0;
+	int ret = 0;
+
+	switch (mp_state->mp_state) {
+	case KVM_MP_STATE_RUNNABLE:
+		vcpu->arch.power_off = false;
+		break;
+	case KVM_MP_STATE_STOPPED:
+		kvm_riscv_vcpu_power_off(vcpu);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
 }
 
 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
@@ -263,8 +385,25 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 
 static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
 {
+	struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
+
 	if (kvm_request_pending(vcpu)) {
-		/* TODO: */
+		if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
+			swait_event_interruptible_exclusive(*wq,
+						((!vcpu->arch.power_off) &&
+						(!vcpu->arch.pause)));
+
+			if (vcpu->arch.power_off || vcpu->arch.pause) {
+				/*
+				 * Awaken to handle a signal, request to
+				 * sleep again later.
+				 */
+				kvm_make_request(KVM_REQ_SLEEP, vcpu);
+			}
+		}
+
+		if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
+			kvm_riscv_reset_vcpu(vcpu);
 
 		/*
 		 * Clear IRQ_PENDING requests that were made to guarantee
@@ -317,6 +456,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 			run->exit_reason = KVM_EXIT_INTR;
 		}
 
+		/*
+		 * We might have got VCPU interrupts updated asynchronously
+		 * so update it in HW.
+		 */
+		kvm_riscv_vcpu_flush_interrupts(vcpu);
+
 		/*
 		 * Ensure we set mode to IN_GUEST_MODE after we disable
 		 * interrupts and before the final VCPU requests check.
@@ -347,6 +492,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 		scause = csr_read(CSR_SCAUSE);
 		stval = csr_read(CSR_STVAL);
 
+		/* Syncup interrupts state with HW */
+		kvm_riscv_vcpu_sync_interrupts(vcpu);
+
 		/*
 		 * We may have taken a host interrupt in VS/VU-mode (i.e.
 		 * while executing the guest). This interrupt is still
-- 
2.17.1


WARNING: multiple messages have this Message-ID (diff)
From: Anup Patel <Anup.Patel@wdc.com>
To: Palmer Dabbelt <palmer@sifive.com>,
	Paul Walmsley <paul.walmsley@sifive.com>,
	Paolo Bonzini <pbonzini@redhat.com>, Radim K <rkrcmar@redhat.com>
Cc: Damien Le Moal <Damien.LeMoal@wdc.com>,
	Anup Patel <Anup.Patel@wdc.com>,
	"kvm@vger.kernel.org" <kvm@vger.kernel.org>,
	Anup Patel <anup@brainfault.org>,
	Daniel Lezcano <daniel.lezcano@linaro.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	Christoph Hellwig <hch@infradead.org>,
	Atish Patra <Atish.Patra@wdc.com>,
	Alistair Francis <Alistair.Francis@wdc.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	"linux-riscv@lists.infradead.org"
	<linux-riscv@lists.infradead.org>
Subject: [RFC PATCH 05/16] RISC-V: KVM: Implement VCPU interrupts and requests handling
Date: Mon, 29 Jul 2019 11:56:53 +0000	[thread overview]
Message-ID: <20190729115544.17895-6-anup.patel@wdc.com> (raw)
In-Reply-To: <20190729115544.17895-1-anup.patel@wdc.com>

This patch implements VCPU interrupts and requests which are both
asynchronous events.

The VCPU interrupts can be set/unset using KVM_INTERRUPT ioctl from
user-space. In future, the in-kernel IRQCHIP emulation will use
kvm_riscv_vcpu_set_interrupt() and kvm_riscv_vcpu_unset_interrupt()
functions to set/unset VCPU interrupts.

Important VCPU requests implemented by this patch are:
KVM_REQ_IRQ_PENDING - set whenever some VCPU interrupt pending
KVM_REQ_SLEEP       - set whenever VCPU itself goes to sleep state
KVM_REQ_VCPU_RESET  - set whenever VCPU reset is requested

The WFI trap-n-emulate (added later) will use KVM_REQ_SLEEP request
and kvm_riscv_vcpu_has_interrupt() function.

The KVM_REQ_VCPU_RESET request will be used by SBI emulation (added
later) to power-up a VCPU in power-off state. The user-space can use
the GET_MPSTATE/SET_MPSTATE ioctls to get/set power state of a VCPU.

Signed-off-by: Anup Patel <anup.patel@wdc.com>
---
 arch/riscv/include/asm/kvm_host.h |  13 +++
 arch/riscv/include/uapi/asm/kvm.h |   3 +
 arch/riscv/kvm/vcpu.c             | 174 +++++++++++++++++++++++++++---
 3 files changed, 177 insertions(+), 13 deletions(-)

diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
index 244eabe62710..aa89f1922da1 100644
--- a/arch/riscv/include/asm/kvm_host.h
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -125,6 +125,13 @@ struct kvm_vcpu_arch {
 	/* CPU CSR context upon Guest VCPU reset */
 	struct kvm_vcpu_csr guest_reset_csr;
 
+	/* VCPU interrupts */
+	raw_spinlock_t irqs_lock;
+	unsigned long irqs_pending;
+
+	/* VCPU power-off state */
+	bool power_off;
+
 	/* Don't run the VCPU (blocked) */
 	bool pause;
 };
@@ -146,6 +153,12 @@ int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
 
 static inline void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch) {}
 
+int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
+int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
+bool kvm_riscv_vcpu_has_interrupt(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
+
 void kvm_riscv_halt_guest(struct kvm *kvm);
 void kvm_riscv_resume_guest(struct kvm *kvm);
 
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h
index d15875818b6e..6dbc056d58ba 100644
--- a/arch/riscv/include/uapi/asm/kvm.h
+++ b/arch/riscv/include/uapi/asm/kvm.h
@@ -18,6 +18,9 @@
 
 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1
 
+#define KVM_INTERRUPT_SET	-1U
+#define KVM_INTERRUPT_UNSET	-2U
+
 /* for KVM_GET_REGS and KVM_SET_REGS */
 struct kvm_regs {
 };
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index 1ae806f28c0e..c6f57caa95f0 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -42,6 +42,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
 
 static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
 {
+	unsigned long f;
 	struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
 	struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr;
 	struct kvm_cpu_context *cntx = &vcpu->arch.guest_context;
@@ -50,6 +51,10 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu)
 	memcpy(csr, reset_csr, sizeof(*csr));
 
 	memcpy(cntx, reset_cntx, sizeof(*cntx));
+
+	raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+	vcpu->arch.irqs_pending = 0;
+	raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
 }
 
 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
@@ -103,6 +108,9 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
 	cntx->hstatus |= HSTATUS_SP2P;
 	cntx->hstatus |= HSTATUS_SPV;
 
+	/* Setup VCPU irqs lock */
+	raw_spin_lock_init(&vcpu->arch.irqs_lock);
+
 	/* Setup reset state of HEDELEG and HIDELEG CSRs */
 	csr = &vcpu->arch.guest_reset_csr;
 	csr->hedeleg = 0;
@@ -131,8 +139,15 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 
 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
 {
-	/* TODO: */
-	return 0;
+	int ret;
+	unsigned long f, irqs;
+
+	raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+	irqs = vcpu->arch.irqs_pending & vcpu->arch.guest_csr.vsie;
+	ret = (irqs & (1UL << IRQ_S_TIMER)) ? 1 : 0;
+	raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
+
+	return ret;
 }
 
 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
@@ -145,20 +160,18 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
 
 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 {
-	/* TODO: */
-	return 0;
+	return (kvm_riscv_vcpu_has_interrupt(vcpu) &&
+		!vcpu->arch.power_off && !vcpu->arch.pause);
 }
 
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
-	/* TODO: */
-	return 0;
+	return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
 }
 
 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 {
-	/* TODO: */
-	return false;
+	return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false;
 }
 
 bool kvm_arch_has_vcpu_debugfs(void)
@@ -179,7 +192,21 @@ vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
 long kvm_arch_vcpu_async_ioctl(struct file *filp,
 			       unsigned int ioctl, unsigned long arg)
 {
-	/* TODO; */
+	struct kvm_vcpu *vcpu = filp->private_data;
+	void __user *argp = (void __user *)arg;
+
+	if (ioctl == KVM_INTERRUPT) {
+		struct kvm_interrupt irq;
+
+		if (copy_from_user(&irq, argp, sizeof(irq)))
+			return -EFAULT;
+
+		if (irq.irq == KVM_INTERRUPT_SET)
+			return kvm_riscv_vcpu_set_interrupt(vcpu, IRQ_S_EXT);
+		else
+			return kvm_riscv_vcpu_unset_interrupt(vcpu, IRQ_S_EXT);
+	}
+
 	return -ENOIOCTLCMD;
 }
 
@@ -228,18 +255,113 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 	return -EINVAL;
 }
 
+static void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu)
+{
+	unsigned long f;
+
+	raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+	if (vcpu->arch.irqs_pending ^ vcpu->arch.guest_csr.vsip) {
+		csr_write(CSR_VSIP, vcpu->arch.irqs_pending);
+		vcpu->arch.guest_csr.vsip = vcpu->arch.irqs_pending;
+	}
+	raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
+}
+
+static void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.guest_csr.vsip = csr_read(CSR_VSIP);
+	vcpu->arch.guest_csr.vsie = csr_read(CSR_VSIE);
+}
+
+int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
+{
+	unsigned long f;
+
+	if (irq != IRQ_S_SOFT &&
+	    irq != IRQ_S_TIMER &&
+	    irq != IRQ_S_EXT)
+		return -EINVAL;
+
+	raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+	vcpu->arch.irqs_pending |= (1UL << irq);
+	raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
+
+	kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu);
+	kvm_vcpu_kick(vcpu);
+
+	return 0;
+}
+
+int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq)
+{
+	unsigned long f;
+
+	if (irq != IRQ_S_SOFT &&
+	    irq != IRQ_S_TIMER &&
+	    irq != IRQ_S_EXT)
+		return -EINVAL;
+
+	raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+	vcpu->arch.irqs_pending &= ~(1UL << irq);
+	raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
+
+	return 0;
+}
+
+bool kvm_riscv_vcpu_has_interrupt(struct kvm_vcpu *vcpu)
+{
+	bool ret = false;
+	unsigned long f;
+
+	raw_spin_lock_irqsave(&vcpu->arch.irqs_lock, f);
+	if (vcpu->arch.irqs_pending & vcpu->arch.guest_csr.vsie)
+		ret = true;
+	raw_spin_unlock_irqrestore(&vcpu->arch.irqs_lock, f);
+
+	return ret;
+}
+
+void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.power_off = true;
+	kvm_make_request(KVM_REQ_SLEEP, vcpu);
+	kvm_vcpu_kick(vcpu);
+}
+
+void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu)
+{
+	vcpu->arch.power_off = false;
+	kvm_vcpu_wake_up(vcpu);
+}
+
 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 				    struct kvm_mp_state *mp_state)
 {
-	/* TODO: */
+	if (vcpu->arch.power_off)
+		mp_state->mp_state = KVM_MP_STATE_STOPPED;
+	else
+		mp_state->mp_state = KVM_MP_STATE_RUNNABLE;
+
 	return 0;
 }
 
 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
 				    struct kvm_mp_state *mp_state)
 {
-	/* TODO: */
-	return 0;
+	int ret = 0;
+
+	switch (mp_state->mp_state) {
+	case KVM_MP_STATE_RUNNABLE:
+		vcpu->arch.power_off = false;
+		break;
+	case KVM_MP_STATE_STOPPED:
+		kvm_riscv_vcpu_power_off(vcpu);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
 }
 
 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
@@ -263,8 +385,25 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 
 static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu)
 {
+	struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
+
 	if (kvm_request_pending(vcpu)) {
-		/* TODO: */
+		if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) {
+			swait_event_interruptible_exclusive(*wq,
+						((!vcpu->arch.power_off) &&
+						(!vcpu->arch.pause)));
+
+			if (vcpu->arch.power_off || vcpu->arch.pause) {
+				/*
+				 * Awaken to handle a signal, request to
+				 * sleep again later.
+				 */
+				kvm_make_request(KVM_REQ_SLEEP, vcpu);
+			}
+		}
+
+		if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
+			kvm_riscv_reset_vcpu(vcpu);
 
 		/*
 		 * Clear IRQ_PENDING requests that were made to guarantee
@@ -317,6 +456,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 			run->exit_reason = KVM_EXIT_INTR;
 		}
 
+		/*
+		 * We might have got VCPU interrupts updated asynchronously
+		 * so update it in HW.
+		 */
+		kvm_riscv_vcpu_flush_interrupts(vcpu);
+
 		/*
 		 * Ensure we set mode to IN_GUEST_MODE after we disable
 		 * interrupts and before the final VCPU requests check.
@@ -347,6 +492,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
 		scause = csr_read(CSR_SCAUSE);
 		stval = csr_read(CSR_STVAL);
 
+		/* Syncup interrupts state with HW */
+		kvm_riscv_vcpu_sync_interrupts(vcpu);
+
 		/*
 		 * We may have taken a host interrupt in VS/VU-mode (i.e.
 		 * while executing the guest). This interrupt is still
-- 
2.17.1


_______________________________________________
linux-riscv mailing list
linux-riscv@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-riscv

  parent reply	other threads:[~2019-07-29 11:56 UTC|newest]

Thread overview: 134+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-07-29 11:56 [RFC PATCH 00/16] KVM RISC-V Support Anup Patel
2019-07-29 11:56 ` Anup Patel
2019-07-29 11:56 ` [RFC PATCH 01/16] KVM: RISC-V: Add KVM_REG_RISCV for ONE_REG interface Anup Patel
2019-07-29 11:56   ` Anup Patel
2019-07-29 11:56 ` [RFC PATCH 02/16] RISC-V: Add hypervisor extension related CSR defines Anup Patel
2019-07-29 11:56   ` Anup Patel
2019-07-29 11:56 ` [RFC PATCH 03/16] RISC-V: Add initial skeletal KVM support Anup Patel
2019-07-29 11:56   ` Anup Patel
2019-07-30  9:23   ` Paolo Bonzini
2019-07-30  9:23     ` Paolo Bonzini
2019-07-30 11:04     ` Anup Patel
2019-07-30 11:04       ` Anup Patel
2019-07-30  9:25   ` Paolo Bonzini
2019-07-30  9:25     ` Paolo Bonzini
2019-07-30 11:03     ` Anup Patel
2019-07-30 11:03       ` Anup Patel
2019-07-29 11:56 ` [RFC PATCH 04/16] RISC-V: KVM: Implement VCPU create, init and destroy functions Anup Patel
2019-07-29 11:56   ` Anup Patel
2019-07-30  8:48   ` Paolo Bonzini
2019-07-30  8:48     ` Paolo Bonzini
2019-07-30 10:16     ` Paolo Bonzini
2019-07-30 10:16       ` Paolo Bonzini
2019-07-30 11:45       ` Anup Patel
2019-07-30 11:45         ` Anup Patel
2019-07-30 11:47         ` Paolo Bonzini
2019-07-30 11:47           ` Paolo Bonzini
2019-07-29 11:56 ` Anup Patel [this message]
2019-07-29 11:56   ` [RFC PATCH 05/16] RISC-V: KVM: Implement VCPU interrupts and requests handling Anup Patel
2019-07-30 11:17   ` Paolo Bonzini
2019-07-30 11:17     ` Paolo Bonzini
2019-07-30 12:00     ` Anup Patel
2019-07-30 12:00       ` Anup Patel
2019-07-30 12:12       ` Paolo Bonzini
2019-07-30 12:12         ` Paolo Bonzini
2019-07-30 12:45         ` Anup Patel
2019-07-30 12:45           ` Anup Patel
2019-07-30 13:18           ` Paolo Bonzini
2019-07-30 13:18             ` Paolo Bonzini
2019-07-30 13:35             ` Anup Patel
2019-07-30 13:35               ` Anup Patel
2019-07-30 14:08               ` Paolo Bonzini
2019-07-30 14:08                 ` Paolo Bonzini
2019-08-02  3:59                 ` Anup Patel
2019-08-02  3:59                   ` Anup Patel
2019-07-29 11:56 ` [RFC PATCH 06/16] RISC-V: KVM: Implement KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctls Anup Patel
2019-07-29 11:56   ` Anup Patel
2019-07-30  8:43   ` Paolo Bonzini
2019-07-30  8:43     ` Paolo Bonzini
2019-07-30  9:35     ` Paolo Bonzini
2019-07-30  9:35       ` Paolo Bonzini
2019-07-30 12:08       ` Anup Patel
2019-07-30 12:08         ` Anup Patel
2019-07-30 12:10         ` Paolo Bonzini
2019-07-30 12:10           ` Paolo Bonzini
2019-07-30 12:16           ` Anup Patel
2019-07-30 12:16             ` Anup Patel
2019-07-29 11:57 ` [RFC PATCH 07/16] RISC-V: KVM: Implement VCPU world-switch Anup Patel
2019-07-29 11:57   ` Anup Patel
2019-07-30  9:34   ` Paolo Bonzini
2019-07-30  9:34     ` Paolo Bonzini
2019-07-30 12:51     ` Anup Patel
2019-07-30 12:51       ` Anup Patel
2019-07-29 11:57 ` [RFC PATCH 08/16] RISC-V: KVM: Handle MMIO exits for VCPU Anup Patel
2019-07-29 11:57   ` Anup Patel
2019-07-30 11:20   ` Paolo Bonzini
2019-07-30 11:20     ` Paolo Bonzini
2019-07-31  7:23     ` Anup Patel
2019-07-31  7:23       ` Anup Patel
2019-07-29 11:57 ` [RFC PATCH 09/16] RISC-V: KVM: Handle WFI " Anup Patel
2019-07-29 11:57   ` Anup Patel
2019-07-29 11:57 ` [RFC PATCH 10/16] RISC-V: KVM: Implement VMID allocator Anup Patel
2019-07-29 11:57   ` Anup Patel
2019-07-30  8:59   ` Paolo Bonzini
2019-07-30  8:59     ` Paolo Bonzini
2019-07-29 11:57 ` [RFC PATCH 11/16] RISC-V: KVM: Implement stage2 page table programming Anup Patel
2019-07-29 11:57   ` Anup Patel
2019-07-30  9:00   ` Paolo Bonzini
2019-07-30  9:00     ` Paolo Bonzini
2019-07-30 12:14     ` Anup Patel
2019-07-30 12:14       ` Anup Patel
2019-07-29 11:57 ` [RFC PATCH 12/16] RISC-V: KVM: Implement MMU notifiers Anup Patel
2019-07-29 11:57   ` Anup Patel
2019-07-29 11:57 ` [RFC PATCH 13/16] RISC-V: KVM: Add timer functionality Anup Patel
2019-07-29 11:57   ` Anup Patel
2019-07-29 14:40   ` Andreas Schwab
2019-07-29 14:40     ` Andreas Schwab
2019-07-29 18:02     ` Atish Patra
2019-07-29 18:02       ` Atish Patra
2019-07-30  6:51       ` Andreas Schwab
2019-07-30  6:51         ` Andreas Schwab
2019-07-30  7:00         ` Atish Patra
2019-07-30  7:00           ` Atish Patra
2019-07-30 11:26   ` Paolo Bonzini
2019-07-30 11:26     ` Paolo Bonzini
2019-07-31  1:55     ` Atish Patra
2019-07-31  1:55       ` Atish Patra
2019-07-31  6:58       ` Paolo Bonzini
2019-07-31  6:58         ` Paolo Bonzini
2019-07-31  7:18         ` Anup Patel
2019-07-31  7:18           ` Anup Patel
2019-07-29 11:57 ` [RFC PATCH 14/16] RISC-V: KVM: FP lazy save/restore Anup Patel
2019-07-29 11:57   ` Anup Patel
2019-07-29 11:57 ` [RFC PATCH 15/16] RISC-V: KVM: Add SBI v0.1 support Anup Patel
2019-07-29 11:57   ` Anup Patel
2019-07-29 19:40   ` Paolo Bonzini
2019-07-29 19:40     ` Paolo Bonzini
2019-07-29 19:51     ` Atish Patra
2019-07-29 19:51       ` Atish Patra
2019-07-29 20:08       ` Paolo Bonzini
2019-07-29 20:08         ` Paolo Bonzini
2019-07-29 21:08         ` Atish Patra
2019-07-29 21:08           ` Atish Patra
2019-07-30  9:26   ` Paolo Bonzini
2019-07-30  9:26     ` Paolo Bonzini
2019-07-29 11:58 ` [RFC PATCH 16/16] RISC-V: Enable VIRTIO drivers in RV64 and RV32 defconfig Anup Patel
2019-07-29 11:58   ` Anup Patel
2019-07-29 21:47 ` [RFC PATCH 00/16] KVM RISC-V Support Paolo Bonzini
2019-07-29 21:47   ` Paolo Bonzini
2019-07-30  5:26   ` Anup Patel
2019-07-30  5:26     ` Anup Patel
2019-07-30 11:33     ` Paolo Bonzini
2019-07-30 11:33       ` Paolo Bonzini
2019-07-30 13:50       ` Anup Patel
2019-07-30 13:50         ` Anup Patel
2019-07-30 14:02         ` Paolo Bonzini
2019-07-30 14:02           ` Paolo Bonzini
2019-07-30  6:53 ` Andreas Schwab
2019-07-30  6:53   ` Andreas Schwab
2019-07-30  7:25   ` Anup Patel
2019-07-30  7:25     ` Anup Patel
2019-07-30  7:42     ` Andreas Schwab
2019-07-30  7:42       ` Andreas Schwab
2019-07-30  7:36   ` Anup Patel
2019-07-30  7:36     ` Anup Patel

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190729115544.17895-6-anup.patel@wdc.com \
    --to=anup.patel@wdc.com \
    --cc=Alistair.Francis@wdc.com \
    --cc=Atish.Patra@wdc.com \
    --cc=Damien.LeMoal@wdc.com \
    --cc=anup@brainfault.org \
    --cc=daniel.lezcano@linaro.org \
    --cc=hch@infradead.org \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-riscv@lists.infradead.org \
    --cc=palmer@sifive.com \
    --cc=paul.walmsley@sifive.com \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.