kvmarm.lists.cs.columbia.edu archive mirror
 help / color / mirror / Atom feed
From: Gavin Shan <gshan@redhat.com>
To: kvmarm@lists.cs.columbia.edu
Cc: maz@kernel.org, shan.gavin@gmail.com, pbonzini@redhat.com
Subject: [PATCH 14/18] arm64/kvm: Implement event handler
Date: Mon, 17 Aug 2020 20:05:27 +1000	[thread overview]
Message-ID: <20200817100531.83045-15-gshan@redhat.com> (raw)
In-Reply-To: <20200817100531.83045-1-gshan@redhat.com>

This implements the event handler with help of KVM SDEI vCPU event,
which is represented by "struct kvm_sdei_vcpu_event". The shared
event is delivered to all VMs where it was registered and enabled.
The private event is delivered to the vCPUs, which are running or
suspending on current CPU.

KVM_REQ_SDEI request is fired to the vCPU if it receives new event
no matter what type it is. With that, kvm_sdei_deliver() is called
when the vCPU is loaded, to inject the SDEI event to the guest. The
behaviour is defined in SDEI specification (v1.0):

   * x0 to x17 are saved.
   * the interrupted PC/PState are saved.
   * x0/x1/x2/x3 is set to the event number, event parameter, the
     interrupt PC and PState separately.
   * PSTATE is modified as follows: DAIF=0b1111, EL=ELc, nRW=0, SP=1
   * PC is set to the address of the handler

Signed-off-by: Gavin Shan <gshan@redhat.com>
---
 arch/arm64/include/asm/kvm_host.h |   1 +
 arch/arm64/include/asm/kvm_sdei.h |   2 +
 arch/arm64/kvm/arm.c              |   4 +
 arch/arm64/kvm/sdei.c             | 240 +++++++++++++++++++++++++++++-
 4 files changed, 246 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 2a8cfb3895f7..ba8cdc304b81 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -45,6 +45,7 @@
 #define KVM_REQ_VCPU_RESET	KVM_ARCH_REQ(2)
 #define KVM_REQ_RECORD_STEAL	KVM_ARCH_REQ(3)
 #define KVM_REQ_RELOAD_GICv4	KVM_ARCH_REQ(4)
+#define KVM_REQ_SDEI		KVM_ARCH_REQ(5)
 
 #define KVM_DIRTY_LOG_MANUAL_CAPS   (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
 				     KVM_DIRTY_LOG_INITIALLY_SET)
diff --git a/arch/arm64/include/asm/kvm_sdei.h b/arch/arm64/include/asm/kvm_sdei.h
index 6cbf4015a371..70e613941577 100644
--- a/arch/arm64/include/asm/kvm_sdei.h
+++ b/arch/arm64/include/asm/kvm_sdei.h
@@ -104,8 +104,10 @@ static inline bool kvm_sdei_num_is_valid(unsigned long num)
 }
 
 int kvm_sdei_hypercall(struct kvm_vcpu *vcpu);
+void kvm_sdei_deliver(struct kvm_vcpu *vcpu);
 void kvm_sdei_init(void);
 void kvm_sdei_create_vcpu(struct kvm_vcpu *vcpu);
+void kvm_sdei_vcpu_load(struct kvm_vcpu *vcpu);
 void kvm_sdei_destroy_vm(struct kvm *kvm);
 
 #endif /* __ARM64_KVM_SDEI_H__ */
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index bb539b51cd57..a79a4343bac6 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -356,6 +356,7 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 
 	vcpu->cpu = cpu;
 
+	kvm_sdei_vcpu_load(vcpu);
 	kvm_vgic_load(vcpu);
 	kvm_timer_vcpu_load(vcpu);
 	kvm_vcpu_load_sysregs(vcpu);
@@ -623,6 +624,9 @@ static void check_vcpu_requests(struct kvm_vcpu *vcpu)
 		if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu))
 			kvm_reset_vcpu(vcpu);
 
+		if (kvm_check_request(KVM_REQ_SDEI, vcpu))
+			kvm_sdei_deliver(vcpu);
+
 		/*
 		 * Clear IRQ_PENDING requests that were made to guarantee
 		 * that a VCPU sees new virtual interrupts.
diff --git a/arch/arm64/kvm/sdei.c b/arch/arm64/kvm/sdei.c
index 2d5e44bb5497..52d0f0809a37 100644
--- a/arch/arm64/kvm/sdei.c
+++ b/arch/arm64/kvm/sdei.c
@@ -151,11 +151,242 @@ static unsigned long kvm_sdei_hypercall_version(struct kvm_vcpu *vcpu)
 	return ret;
 }
 
-static int kvm_sdei_handler(u32 num, struct pt_regs *regs, void *arg)
+void kvm_sdei_deliver(struct kvm_vcpu *vcpu)
+{
+	struct kvm_sdei_event *event;
+	struct kvm_sdei_kvm_event *kevent;
+	struct kvm_sdei_vcpu_event *tmp, *vevent = NULL;
+	struct user_pt_regs *regs;
+	unsigned long num, type, priority, pstate;
+	bool handle_critical;
+	int index, i;
+
+	spin_lock(&vcpu->arch.sdei_lock);
+
+	/* No way to preempt critical event */
+	if (vcpu->arch.sdei_critical_event)
+		goto unlock;
+
+	/* Find the suitable event to deliver */
+	handle_critical = vcpu->arch.sdei_normal_event ? true : false;
+	list_for_each_entry(tmp, &vcpu->arch.sdei_events, link) {
+		event = tmp->event->event;
+		priority = event->priv ? event->priv->priority :
+					 event->event->priority;
+		if (!handle_critical ||
+		    (priority == SDEI_EVENT_PRIORITY_CRITICAL)) {
+			vevent = tmp;
+			kevent = vevent->event;
+			break;
+		}
+	}
+
+	if (!vevent)
+		goto unlock;
+
+	/* Save registers: x0 -> x17, PC, PState */
+	if (priority == SDEI_EVENT_PRIORITY_CRITICAL) {
+		vcpu->arch.sdei_critical_event = vevent;
+		regs = &vcpu->arch.sdei_critical_regs;
+	} else {
+		vcpu->arch.sdei_normal_event = vevent;
+		regs = &vcpu->arch.sdei_normal_regs;
+	}
+
+	for (i = 0; i < 18; i++)
+		regs->regs[i] = vcpu_get_reg(vcpu, i);
+
+	regs->pc = *vcpu_pc(vcpu);
+	regs->pstate = *vcpu_cpsr(vcpu);
+
+	/* Inject SDEI event: x0 -> x3, PC, PState */
+	num = event->priv ? event->priv->num : event->event->event_num;
+	type = event->priv ? event->priv->type : event->event->type;
+	index = (type == SDEI_EVENT_TYPE_PRIVATE) ? vcpu->vcpu_idx : 0;
+	for (i = 0; i < 18; i++)
+		vcpu_set_reg(vcpu, i, 0);
+
+	vcpu_set_reg(vcpu, 0, num);
+	vcpu_set_reg(vcpu, 1, kevent->params[index]);
+	vcpu_set_reg(vcpu, 2, regs->pc);
+	vcpu_set_reg(vcpu, 3, regs->pstate);
+
+	pstate = regs->pstate;
+	pstate |= (PSR_D_BIT | PSR_A_BIT | PSR_I_BIT | PSR_F_BIT);
+	pstate &= ~PSR_MODE_MASK;
+	pstate |= PSR_MODE_EL1h;
+	pstate &= ~PSR_MODE32_BIT;
+
+	vcpu_write_spsr(vcpu, regs->pstate);
+	*vcpu_cpsr(vcpu) = pstate;
+	*vcpu_pc(vcpu) = kevent->entries[index];
+
+	/* Notifier */
+	if (event->priv && event->priv->notifier)
+		event->priv->notifier(vcpu, num, KVM_SDEI_STATE_DELIVERED);
+
+unlock:
+	spin_unlock(&vcpu->arch.sdei_lock);
+}
+
+static int kvm_sdei_queue_event(struct kvm_vcpu *vcpu,
+				struct kvm_sdei_kvm_event *kevent)
+{
+	struct kvm_sdei_vcpu_event *e, *vevent = NULL;
+
+	lockdep_assert_held(&kevent->event->lock);
+	lockdep_assert_held(&vcpu->arch.sdei_lock);
+
+	list_for_each_entry(e, &vcpu->arch.sdei_events, link) {
+		if (e->event == kevent) {
+			vevent = e;
+			break;
+		}
+	}
+
+	/*
+	 * We just need increase the count if the vCPU event has been
+	 * existing. Otherwise, we have to create a new one.
+	 */
+	if (vevent) {
+		vevent->users++;
+		kevent->users++;
+		kvm_make_request(KVM_REQ_SDEI, vcpu);
+		return 0;
+	}
+
+	vevent = kzalloc(sizeof(*vevent), GFP_ATOMIC);
+	if (!vevent) {
+		pr_warn("%s: Unable to alloc memory (%lu, %u-%d)\n",
+			__func__, kevent->event->num,
+			kevent->kvm->userspace_pid, vcpu->vcpu_idx);
+		return -ENOMEM;
+	}
+
+	vevent->event = kevent;
+	vevent->users = 1;
+	kevent->users++;
+	list_add_tail(&vevent->link, &vcpu->arch.sdei_events);
+	kvm_make_request(KVM_REQ_SDEI, vcpu);
+
+	return 0;
+}
+
+/*
+ * Queue the shared event to the target VMs where the event have been
+ * registered and enabled. For the particular VM, the event is delivered
+ * to the first unmasked vCPU if the strict routing isn't specified.
+ * Otherwise, the event is delivered to the specified vCPU.
+ *
+ * If the vCPU event exists, we just need increase its count. Otherwise,
+ * a new one is created and queued to the target vCPU.
+ */
+static int kvm_sdei_shared_handler(struct kvm_sdei_event *event)
+{
+	struct kvm_sdei_kvm_event *kevent, *n;
+	struct kvm_vcpu *target, *vcpu;
+	unsigned long affinity;
+	int i;
+
+	spin_lock(&event->lock);
+
+	rbtree_postorder_for_each_entry_safe(kevent, n,
+					     &event->root, node) {
+		if (!test_bit(0, kevent->registered) ||
+		    !test_bit(0, kevent->enabled))
+			continue;
+
+		/*
+		 * Select the target vCPU according to the routing
+		 * mode and affinity.
+		 */
+		target = NULL;
+		kvm_for_each_vcpu(i, vcpu, kevent->kvm) {
+			affinity = kvm_vcpu_get_mpidr_aff(vcpu);
+			spin_lock(&vcpu->arch.sdei_lock);
+
+			if (kevent->route_mode == SDEI_EVENT_REGISTER_RM_ANY) {
+				if (!vcpu->arch.sdei_masked) {
+					target = vcpu;
+					spin_unlock(&vcpu->arch.sdei_lock);
+					break;
+				}
+			} else if (kevent->route_affinity == affinity) {
+				target = !vcpu->arch.sdei_masked ? vcpu : NULL;
+				spin_unlock(&vcpu->arch.sdei_lock);
+				break;
+			}
+
+			spin_unlock(&vcpu->arch.sdei_lock);
+		}
+
+		if (!target)
+			continue;
+
+		spin_lock(&target->arch.sdei_lock);
+		kvm_sdei_queue_event(target, kevent);
+		spin_unlock(&target->arch.sdei_lock);
+	}
+
+	spin_unlock(&event->lock);
+
+	return 0;
+}
+
+/*
+ * The private SDEI event is delivered into the vCPUs, which are
+ * running or suspending on the current CPU.
+ */
+static int kvm_sdei_private_handler(struct kvm_sdei_event *event)
 {
+	struct kvm_sdei_kvm_event *kevent, *n;
+	struct kvm_vcpu *vcpu;
+	int i;
+
+	spin_lock(&event->lock);
+
+	rbtree_postorder_for_each_entry_safe(kevent, n,
+					     &event->root, node) {
+		if (bitmap_empty(kevent->registered, KVM_MAX_VCPUS) ||
+		    bitmap_empty(kevent->enabled, KVM_MAX_VCPUS))
+			continue;
+
+		kvm_for_each_vcpu(i, vcpu, kevent->kvm) {
+			if (!test_bit(vcpu->vcpu_idx, kevent->registered) ||
+			    !test_bit(vcpu->vcpu_idx, kevent->enabled))
+				continue;
+
+			spin_lock(&vcpu->arch.sdei_lock);
+
+			if (vcpu->arch.sdei_masked ||
+			    vcpu->arch.sdei_cpu != smp_processor_id()) {
+				spin_unlock(&vcpu->arch.sdei_lock);
+				continue;
+			}
+
+			kvm_sdei_queue_event(vcpu, kevent);
+
+			spin_unlock(&vcpu->arch.sdei_lock);
+		}
+	}
+
+	spin_unlock(&event->lock);
+
 	return 0;
 }
 
+static int kvm_sdei_handler(u32 num, struct pt_regs *regs, void *arg)
+{
+	struct kvm_sdei_event *event = (struct kvm_sdei_event *)arg;
+	unsigned long type = (event->priv) ? event->priv->type :
+					     event->event->type;
+
+	if (type == SDEI_EVENT_TYPE_SHARED)
+		kvm_sdei_shared_handler(event);
+
+	return kvm_sdei_private_handler(event);
+}
+
 static unsigned long kvm_sdei_hypercall_register(struct kvm_vcpu *vcpu)
 {
 	struct kvm *kvm = vcpu->kvm;
@@ -826,6 +1057,13 @@ void kvm_sdei_create_vcpu(struct kvm_vcpu *vcpu)
 	INIT_LIST_HEAD(&vcpu->arch.sdei_events);
 }
 
+void kvm_sdei_vcpu_load(struct kvm_vcpu *vcpu)
+{
+	spin_lock(&vcpu->arch.sdei_lock);
+	vcpu->arch.sdei_cpu = smp_processor_id();
+	spin_unlock(&vcpu->arch.sdei_lock);
+}
+
 void kvm_sdei_destroy_vm(struct kvm *kvm)
 {
 	unsigned int types = ((1 << SDEI_EVENT_TYPE_PRIVATE) |
-- 
2.23.0

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

  parent reply	other threads:[~2020-08-17 10:06 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-08-17 10:05 [PATCH 00/18] Support SDEI Virtualization Gavin Shan
2020-08-17 10:05 ` [PATCH 01/18] drivers/firmware/sdei: Retrieve event signaled property on registration Gavin Shan
2020-08-17 10:05 ` [PATCH 02/18] drivers/firmware/sdei: Add sdei_event_get_info() Gavin Shan
2020-08-17 10:05 ` [PATCH 03/18] arm/smccc: Introduce template for inline functions Gavin Shan
2020-08-17 10:05 ` [PATCH 04/18] arm64/kvm: Add SDEI virtualization infrastructure Gavin Shan
2020-08-17 10:05 ` [PATCH 05/18] arm64/kvm: Support SDEI_1_0_FN_SDEI_VERSION hypercall Gavin Shan
2020-08-17 10:05 ` [PATCH 06/18] arm64/kvm: Support SDEI_1_0_FN_SDEI_EVENT_REGISTER Gavin Shan
2020-08-17 10:05 ` [PATCH 07/18] arm64/kvm: Support SDEI_1_0_FN_SDEI_EVENT_{ENABLE, DISABLE} hypercall Gavin Shan
2020-08-17 10:05 ` [PATCH 08/18] arm64/kvm: Support SDEI_1_0_FN_SDEI_EVENT_UNREGISTER hypercall Gavin Shan
2020-08-17 10:05 ` [PATCH 09/18] arm64/kvm: Support SDEI_1_0_FN_SDEI_EVENT_STATUS hypercall Gavin Shan
2020-08-17 10:05 ` [PATCH 10/18] arm64/kvm: Support SDEI_1_0_FN_SDEI_EVENT_GET_INFO hypercall Gavin Shan
2020-08-17 10:05 ` [PATCH 11/18] arm64/kvm: Support SDEI_1_0_FN_SDEI_EVENT_ROUTING_SET hypercall Gavin Shan
2020-08-17 10:05 ` [PATCH 12/18] arm64/kvm: Support SDEI_1_0_FN_SDEI_PE_{MASK, UNMASK} hypercall Gavin Shan
2020-08-17 10:05 ` [PATCH 13/18] arm64/kvm: Support SDEI_1_0_FN_SDEI_{PRIVATE, SHARED}_RESET hypercall Gavin Shan
2020-08-17 10:05 ` Gavin Shan [this message]
2020-08-17 10:05 ` [PATCH 15/18] arm64/kvm: Support SDEI_1_0_FN_SDEI_EVENT_{COMPLETE, COMPLETE_AND_RESUME} hypercall Gavin Shan
2020-08-17 10:05 ` [PATCH 16/18] arm64/kvm: Support SDEI_1_0_FN_SDEI_EVENT_CONTEXT hypercall Gavin Shan
2020-08-17 10:05 ` [PATCH 17/18] arm64/kvm: Expose SDEI capability Gavin Shan
2020-08-17 10:05 ` [PATCH 18/18] kvm/selftests: Add SDEI test case Gavin Shan
2020-08-17 11:01 ` [PATCH 00/18] Support SDEI Virtualization Gavin Shan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200817100531.83045-15-gshan@redhat.com \
    --to=gshan@redhat.com \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=maz@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=shan.gavin@gmail.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).