All of lore.kernel.org
 help / color / mirror / Atom feed
From: Gavin Shan <gshan@redhat.com>
To: kvmarm@lists.cs.columbia.edu
Cc: linux-kernel@vger.kernel.org, eauger@redhat.com,
	shannon.zhaosl@gmail.com, maz@kernel.org,
	Jonathan.Cameron@huawei.com, will@kernel.org,
	pbonzini@redhat.com, james.morse@arm.com, mark.rutland@arm.com,
	drjones@redhat.com, vkuznets@redhat.com, shan.gavin@gmail.com
Subject: [PATCH v5 19/22] KVM: arm64: Support SDEI ioctl commands on vCPU
Date: Tue, 22 Mar 2022 16:07:07 +0800	[thread overview]
Message-ID: <20220322080710.51727-20-gshan@redhat.com> (raw)
In-Reply-To: <20220322080710.51727-1-gshan@redhat.com>

This supports ioctl commands on vCPU to manage the various object.
It's primarily used by VMM to accomplish migration. The ioctl
commands introduced by this are highlighted as below:

   * KVM_SDEI_CMD_GET_VCPU_EVENT_COUNT
     Return the total count of vCPU events, which have been queued
     on the target vCPU.

   * KVM_SDEI_CMD_GET_VCPU_EVENT
   * KVM_SDEI_CMD_SET_VCPU_EVENT
     Get or set vCPU events.

   * KVM_SDEI_CMD_GET_VCPU_STATE
   * KVM_SDEI_CMD_SET_VCPU_STATE
     Get or set vCPU state.

   * KVM_SDEI_CMD_INJECT_EVENT
     Inject SDEI event.

Signed-off-by: Gavin Shan <gshan@redhat.com>
---
 arch/arm64/include/asm/kvm_sdei.h            |   1 +
 arch/arm64/include/uapi/asm/kvm_sdei_state.h |   9 +
 arch/arm64/kvm/arm.c                         |   3 +
 arch/arm64/kvm/sdei.c                        | 299 +++++++++++++++++++
 4 files changed, 312 insertions(+)

diff --git a/arch/arm64/include/asm/kvm_sdei.h b/arch/arm64/include/asm/kvm_sdei.h
index 64f00cc79162..ea4f222cf73d 100644
--- a/arch/arm64/include/asm/kvm_sdei.h
+++ b/arch/arm64/include/asm/kvm_sdei.h
@@ -180,6 +180,7 @@ int kvm_sdei_inject_event(struct kvm_vcpu *vcpu,
 int kvm_sdei_cancel_event(struct kvm_vcpu *vcpu, unsigned long num);
 void kvm_sdei_deliver_event(struct kvm_vcpu *vcpu);
 long kvm_sdei_vm_ioctl(struct kvm *kvm, unsigned long arg);
+long kvm_sdei_vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long arg);
 void kvm_sdei_destroy_vcpu(struct kvm_vcpu *vcpu);
 void kvm_sdei_destroy_vm(struct kvm *kvm);
 
diff --git a/arch/arm64/include/uapi/asm/kvm_sdei_state.h b/arch/arm64/include/uapi/asm/kvm_sdei_state.h
index 2bd6d11627bc..149451c5584f 100644
--- a/arch/arm64/include/uapi/asm/kvm_sdei_state.h
+++ b/arch/arm64/include/uapi/asm/kvm_sdei_state.h
@@ -75,6 +75,12 @@ struct kvm_sdei_vcpu_state {
 #define KVM_SDEI_CMD_GET_REGISTERED_EVENT_COUNT	4
 #define KVM_SDEI_CMD_GET_REGISTERED_EVENT	5
 #define KVM_SDEI_CMD_SET_REGISTERED_EVENT	6
+#define KVM_SDEI_CMD_GET_VCPU_EVENT_COUNT	7
+#define KVM_SDEI_CMD_GET_VCPU_EVENT		8
+#define KVM_SDEI_CMD_SET_VCPU_EVENT		9
+#define KVM_SDEI_CMD_GET_VCPU_STATE		10
+#define KVM_SDEI_CMD_SET_VCPU_STATE		11
+#define KVM_SDEI_CMD_INJECT_EVENT		12
 
 struct kvm_sdei_cmd {
 	__u32                                           cmd;
@@ -85,6 +91,9 @@ struct kvm_sdei_cmd {
 	union {
 		struct kvm_sdei_exposed_event_state     *exposed_event_state;
 		struct kvm_sdei_registered_event_state  *registered_event_state;
+		struct kvm_sdei_vcpu_event_state	*vcpu_event_state;
+		struct kvm_sdei_vcpu_state		*vcpu_state;
+		__u64					num;
 	};
 };
 
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index ebfd504a1c08..3f532e1c4a95 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1387,6 +1387,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
 
 		return kvm_arm_vcpu_finalize(vcpu, what);
 	}
+	case KVM_ARM_SDEI_COMMAND: {
+		return kvm_sdei_vcpu_ioctl(vcpu, arg);
+	}
 	default:
 		r = -EINVAL;
 	}
diff --git a/arch/arm64/kvm/sdei.c b/arch/arm64/kvm/sdei.c
index d9cf494990a9..06895ac73c24 100644
--- a/arch/arm64/kvm/sdei.c
+++ b/arch/arm64/kvm/sdei.c
@@ -1567,6 +1567,305 @@ long kvm_sdei_vm_ioctl(struct kvm *kvm, unsigned long arg)
 	return ret;
 }
 
+static long vcpu_ioctl_get_vcpu_event(struct kvm_vcpu *vcpu,
+				      struct kvm_sdei_cmd *cmd)
+{
+	struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
+	struct kvm_sdei_vcpu_event *vcpu_event;
+	struct kvm_sdei_vcpu_event_state *state;
+	void __user *user_state = (void __user *)(cmd->vcpu_event_state);
+	unsigned int count, i;
+	long ret = 0;
+
+	if (!cmd->count)
+		return 0;
+
+	state = kcalloc(cmd->count, sizeof(*state), GFP_KERNEL_ACCOUNT);
+	if (!state)
+		return -ENOMEM;
+
+	i = 0;
+	count = cmd->count;
+	list_for_each_entry(vcpu_event, &vsdei->critical_events, link) {
+		state[i++] = vcpu_event->state;
+		if (!--count)
+			break;
+	}
+
+	if (count) {
+		list_for_each_entry(vcpu_event, &vsdei->normal_events, link) {
+			state[i++] = vcpu_event->state;
+			if (!--count)
+				break;
+		}
+	}
+
+	if (copy_to_user(user_state, state, sizeof(*state) * cmd->count))
+		ret = -EFAULT;
+
+	kfree(state);
+	return ret;
+}
+
+static long vcpu_ioctl_set_vcpu_event(struct kvm_vcpu *vcpu,
+				      struct kvm_sdei_cmd *cmd)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
+	struct kvm_sdei_exposed_event *exposed_event;
+	struct kvm_sdei_registered_event *registered_event;
+	struct kvm_sdei_vcpu_event *vcpu_event;
+	struct kvm_sdei_vcpu_event_state *state;
+	void __user *user_state = (void __user *)(cmd->vcpu_event_state);
+	unsigned int vcpu_event_count, i, j;
+	long ret = 0;
+
+	if (!cmd->count)
+		return 0;
+
+	state = kcalloc(cmd->count, sizeof(*state), GFP_KERNEL_ACCOUNT);
+	if (!state)
+		return -ENOMEM;
+
+	if (copy_from_user(state, user_state, sizeof(*state) * cmd->count)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	vcpu_event_count = vsdei->critical_event_count +
+			   vsdei->normal_event_count;
+	for (i = 0; i < cmd->count; i++) {
+		if (!kvm_sdei_is_supported(state[i].num)) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		/* Check if the event has been exposed */
+		exposed_event = find_exposed_event(kvm, state[i].num);
+		if (!exposed_event) {
+			ret = -ENOENT;
+			goto out;
+		}
+
+		/* Check if the event has been registered */
+		registered_event = find_registered_event(kvm, state[i].num);
+		if (!registered_event) {
+			ret = -ENOENT;
+			goto out;
+		}
+
+		/*
+		 * Calculate the total count of the vcpu event instances.
+		 * We needn't a new vcpu event instance if it is existing
+		 * or a duplicated event.
+		 */
+		vcpu_event = find_vcpu_event(vcpu, state[i].num);
+		if (vcpu_event)
+			continue;
+
+		for (j = 0; j < cmd->count; j++) {
+			if (j != i && state[j].num == state[i].num)
+				break;
+		}
+
+		if (j >= cmd->count || i < j)
+			vcpu_event_count++;
+	}
+
+	/*
+	 * Check if the required count of vcpu event instances exceeds
+	 * the limit.
+	 */
+	if (vcpu_event_count > KVM_SDEI_MAX_EVENTS) {
+		ret = -ERANGE;
+		goto out;
+	}
+
+	for (i = 0; i < cmd->count; i++) {
+		/* The vcpu event might have been existing */
+		vcpu_event = find_vcpu_event(vcpu, state[i].num);
+		if (vcpu_event) {
+			vcpu_event->state.event_count += state[i].event_count;
+			continue;
+		}
+
+		vcpu_event = kzalloc(sizeof(*vcpu_event), GFP_KERNEL_ACCOUNT);
+		if (!vcpu_event) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		registered_event = find_registered_event(kvm, state[i].num);
+		exposed_event = registered_event->exposed_event;
+
+		vcpu_event->state            = state[i];
+		vcpu_event->registered_event = registered_event;
+		vcpu_event->vcpu             = vcpu;
+
+		registered_event->vcpu_event_count++;
+		if (kvm_sdei_is_critical(exposed_event->state.priority)) {
+			list_add_tail(&vcpu_event->link,
+				      &vsdei->critical_events);
+			vsdei->critical_event_count++;
+		} else {
+			list_add_tail(&vcpu_event->link,
+				      &vsdei->normal_events);
+			vsdei->normal_event_count++;
+		}
+	}
+
+out:
+	kfree(state);
+	return ret;
+}
+
+static long vcpu_ioctl_set_vcpu_state(struct kvm_vcpu *vcpu,
+				      struct kvm_sdei_cmd *cmd)
+{
+	struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
+	struct kvm_sdei_vcpu_event *critical_vcpu_event = NULL;
+	struct kvm_sdei_vcpu_event *normal_vcpu_event = NULL;
+	struct kvm_sdei_vcpu_state *state;
+	void __user *user_state = (void __user *)(cmd->vcpu_state);
+	long ret = 0;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL_ACCOUNT);
+	if (!state)
+		return -ENOMEM;
+
+	if (copy_from_user(state, user_state, sizeof(*state))) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	if (kvm_sdei_is_supported(state->critical_num)) {
+		critical_vcpu_event = find_vcpu_event(vcpu,
+						      state->critical_num);
+		if (!critical_vcpu_event) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	if (kvm_sdei_is_supported(state->normal_num)) {
+		normal_vcpu_event = find_vcpu_event(vcpu, state->normal_num);
+		if (!normal_vcpu_event) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	vsdei->state          = *state;
+	vsdei->critical_event = critical_vcpu_event;
+	vsdei->normal_event   = normal_vcpu_event;
+
+	/*
+	 * To deliver the vCPU events if we don't have a valid handler
+	 * running. Otherwise, the vCPU events should be delivered when
+	 * the running handler is completed.
+	 */
+	if (!vsdei->critical_event && !vsdei->normal_event &&
+	    (vsdei->critical_event_count + vsdei->normal_event_count) > 0)
+		kvm_make_request(KVM_REQ_SDEI, vcpu);
+
+out:
+	kfree(state);
+	return ret;
+}
+
+static long vcpu_ioctl_inject_event(struct kvm_vcpu *vcpu,
+				    struct kvm_sdei_cmd *cmd)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
+	struct kvm_sdei_exposed_event *exposed_event;
+	struct kvm_sdei_registered_event *registered_event;
+	int index;
+
+	if (!kvm_sdei_is_supported(cmd->num))
+		return -EINVAL;
+
+	registered_event = find_registered_event(kvm, cmd->num);
+	if (!registered_event)
+		return -ENOENT;
+
+	exposed_event = registered_event->exposed_event;
+	index = kvm_sdei_vcpu_index(vcpu, exposed_event);
+	if (!kvm_sdei_is_registered(registered_event, index) ||
+	    !kvm_sdei_is_enabled(registered_event, index) ||
+	    kvm_sdei_is_unregister_pending(registered_event, index))
+		return -EPERM;
+
+	if (vsdei->state.masked)
+		return -EPERM;
+
+	return do_inject_event(vcpu, registered_event, false);
+}
+
+long kvm_sdei_vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long arg)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_sdei_kvm *ksdei = kvm->arch.sdei;
+	struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
+	struct kvm_sdei_cmd *cmd = NULL;
+	void __user *argp = (void __user *)arg;
+	long ret = 0;
+
+	if (!(ksdei && vsdei)) {
+		ret = -EPERM;
+		goto out;
+	}
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL_ACCOUNT);
+	if (!cmd) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	if (copy_from_user(cmd, argp, sizeof(*cmd))) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	spin_lock(&ksdei->lock);
+	spin_lock(&vsdei->lock);
+
+	switch (cmd->cmd) {
+	case KVM_SDEI_CMD_GET_VCPU_EVENT_COUNT:
+		cmd->count = vsdei->critical_event_count +
+			     vsdei->normal_event_count;
+		if (copy_to_user(argp, cmd, sizeof(*cmd)))
+			ret = -EFAULT;
+		break;
+	case KVM_SDEI_CMD_GET_VCPU_EVENT:
+		ret = vcpu_ioctl_get_vcpu_event(vcpu, cmd);
+		break;
+	case KVM_SDEI_CMD_SET_VCPU_EVENT:
+		ret = vcpu_ioctl_set_vcpu_event(vcpu, cmd);
+		break;
+	case KVM_SDEI_CMD_GET_VCPU_STATE:
+		if (copy_to_user(cmd->vcpu_state, &vsdei->state,
+				 sizeof(vsdei->state)))
+			ret = -EFAULT;
+		break;
+	case KVM_SDEI_CMD_SET_VCPU_STATE:
+		ret = vcpu_ioctl_set_vcpu_state(vcpu, cmd);
+		break;
+	case KVM_SDEI_CMD_INJECT_EVENT:
+		ret = vcpu_ioctl_inject_event(vcpu, cmd);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	spin_unlock(&vsdei->lock);
+	spin_unlock(&ksdei->lock);
+
+out:
+	kfree(cmd);
+	return ret;
+}
+
 void kvm_sdei_destroy_vcpu(struct kvm_vcpu *vcpu)
 {
 	struct kvm *kvm = vcpu->kvm;
-- 
2.23.0


WARNING: multiple messages have this Message-ID (diff)
From: Gavin Shan <gshan@redhat.com>
To: kvmarm@lists.cs.columbia.edu
Cc: maz@kernel.org, linux-kernel@vger.kernel.org, eauger@redhat.com,
	shan.gavin@gmail.com, Jonathan.Cameron@huawei.com,
	pbonzini@redhat.com, vkuznets@redhat.com, will@kernel.org
Subject: [PATCH v5 19/22] KVM: arm64: Support SDEI ioctl commands on vCPU
Date: Tue, 22 Mar 2022 16:07:07 +0800	[thread overview]
Message-ID: <20220322080710.51727-20-gshan@redhat.com> (raw)
In-Reply-To: <20220322080710.51727-1-gshan@redhat.com>

This supports ioctl commands on vCPU to manage the various object.
It's primarily used by VMM to accomplish migration. The ioctl
commands introduced by this are highlighted as below:

   * KVM_SDEI_CMD_GET_VCPU_EVENT_COUNT
     Return the total count of vCPU events, which have been queued
     on the target vCPU.

   * KVM_SDEI_CMD_GET_VCPU_EVENT
   * KVM_SDEI_CMD_SET_VCPU_EVENT
     Get or set vCPU events.

   * KVM_SDEI_CMD_GET_VCPU_STATE
   * KVM_SDEI_CMD_SET_VCPU_STATE
     Get or set vCPU state.

   * KVM_SDEI_CMD_INJECT_EVENT
     Inject SDEI event.

Signed-off-by: Gavin Shan <gshan@redhat.com>
---
 arch/arm64/include/asm/kvm_sdei.h            |   1 +
 arch/arm64/include/uapi/asm/kvm_sdei_state.h |   9 +
 arch/arm64/kvm/arm.c                         |   3 +
 arch/arm64/kvm/sdei.c                        | 299 +++++++++++++++++++
 4 files changed, 312 insertions(+)

diff --git a/arch/arm64/include/asm/kvm_sdei.h b/arch/arm64/include/asm/kvm_sdei.h
index 64f00cc79162..ea4f222cf73d 100644
--- a/arch/arm64/include/asm/kvm_sdei.h
+++ b/arch/arm64/include/asm/kvm_sdei.h
@@ -180,6 +180,7 @@ int kvm_sdei_inject_event(struct kvm_vcpu *vcpu,
 int kvm_sdei_cancel_event(struct kvm_vcpu *vcpu, unsigned long num);
 void kvm_sdei_deliver_event(struct kvm_vcpu *vcpu);
 long kvm_sdei_vm_ioctl(struct kvm *kvm, unsigned long arg);
+long kvm_sdei_vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long arg);
 void kvm_sdei_destroy_vcpu(struct kvm_vcpu *vcpu);
 void kvm_sdei_destroy_vm(struct kvm *kvm);
 
diff --git a/arch/arm64/include/uapi/asm/kvm_sdei_state.h b/arch/arm64/include/uapi/asm/kvm_sdei_state.h
index 2bd6d11627bc..149451c5584f 100644
--- a/arch/arm64/include/uapi/asm/kvm_sdei_state.h
+++ b/arch/arm64/include/uapi/asm/kvm_sdei_state.h
@@ -75,6 +75,12 @@ struct kvm_sdei_vcpu_state {
 #define KVM_SDEI_CMD_GET_REGISTERED_EVENT_COUNT	4
 #define KVM_SDEI_CMD_GET_REGISTERED_EVENT	5
 #define KVM_SDEI_CMD_SET_REGISTERED_EVENT	6
+#define KVM_SDEI_CMD_GET_VCPU_EVENT_COUNT	7
+#define KVM_SDEI_CMD_GET_VCPU_EVENT		8
+#define KVM_SDEI_CMD_SET_VCPU_EVENT		9
+#define KVM_SDEI_CMD_GET_VCPU_STATE		10
+#define KVM_SDEI_CMD_SET_VCPU_STATE		11
+#define KVM_SDEI_CMD_INJECT_EVENT		12
 
 struct kvm_sdei_cmd {
 	__u32                                           cmd;
@@ -85,6 +91,9 @@ struct kvm_sdei_cmd {
 	union {
 		struct kvm_sdei_exposed_event_state     *exposed_event_state;
 		struct kvm_sdei_registered_event_state  *registered_event_state;
+		struct kvm_sdei_vcpu_event_state	*vcpu_event_state;
+		struct kvm_sdei_vcpu_state		*vcpu_state;
+		__u64					num;
 	};
 };
 
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index ebfd504a1c08..3f532e1c4a95 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1387,6 +1387,9 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
 
 		return kvm_arm_vcpu_finalize(vcpu, what);
 	}
+	case KVM_ARM_SDEI_COMMAND: {
+		return kvm_sdei_vcpu_ioctl(vcpu, arg);
+	}
 	default:
 		r = -EINVAL;
 	}
diff --git a/arch/arm64/kvm/sdei.c b/arch/arm64/kvm/sdei.c
index d9cf494990a9..06895ac73c24 100644
--- a/arch/arm64/kvm/sdei.c
+++ b/arch/arm64/kvm/sdei.c
@@ -1567,6 +1567,305 @@ long kvm_sdei_vm_ioctl(struct kvm *kvm, unsigned long arg)
 	return ret;
 }
 
+static long vcpu_ioctl_get_vcpu_event(struct kvm_vcpu *vcpu,
+				      struct kvm_sdei_cmd *cmd)
+{
+	struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
+	struct kvm_sdei_vcpu_event *vcpu_event;
+	struct kvm_sdei_vcpu_event_state *state;
+	void __user *user_state = (void __user *)(cmd->vcpu_event_state);
+	unsigned int count, i;
+	long ret = 0;
+
+	if (!cmd->count)
+		return 0;
+
+	state = kcalloc(cmd->count, sizeof(*state), GFP_KERNEL_ACCOUNT);
+	if (!state)
+		return -ENOMEM;
+
+	i = 0;
+	count = cmd->count;
+	list_for_each_entry(vcpu_event, &vsdei->critical_events, link) {
+		state[i++] = vcpu_event->state;
+		if (!--count)
+			break;
+	}
+
+	if (count) {
+		list_for_each_entry(vcpu_event, &vsdei->normal_events, link) {
+			state[i++] = vcpu_event->state;
+			if (!--count)
+				break;
+		}
+	}
+
+	if (copy_to_user(user_state, state, sizeof(*state) * cmd->count))
+		ret = -EFAULT;
+
+	kfree(state);
+	return ret;
+}
+
+static long vcpu_ioctl_set_vcpu_event(struct kvm_vcpu *vcpu,
+				      struct kvm_sdei_cmd *cmd)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
+	struct kvm_sdei_exposed_event *exposed_event;
+	struct kvm_sdei_registered_event *registered_event;
+	struct kvm_sdei_vcpu_event *vcpu_event;
+	struct kvm_sdei_vcpu_event_state *state;
+	void __user *user_state = (void __user *)(cmd->vcpu_event_state);
+	unsigned int vcpu_event_count, i, j;
+	long ret = 0;
+
+	if (!cmd->count)
+		return 0;
+
+	state = kcalloc(cmd->count, sizeof(*state), GFP_KERNEL_ACCOUNT);
+	if (!state)
+		return -ENOMEM;
+
+	if (copy_from_user(state, user_state, sizeof(*state) * cmd->count)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	vcpu_event_count = vsdei->critical_event_count +
+			   vsdei->normal_event_count;
+	for (i = 0; i < cmd->count; i++) {
+		if (!kvm_sdei_is_supported(state[i].num)) {
+			ret = -EINVAL;
+			goto out;
+		}
+
+		/* Check if the event has been exposed */
+		exposed_event = find_exposed_event(kvm, state[i].num);
+		if (!exposed_event) {
+			ret = -ENOENT;
+			goto out;
+		}
+
+		/* Check if the event has been registered */
+		registered_event = find_registered_event(kvm, state[i].num);
+		if (!registered_event) {
+			ret = -ENOENT;
+			goto out;
+		}
+
+		/*
+		 * Calculate the total count of the vcpu event instances.
+		 * We needn't a new vcpu event instance if it is existing
+		 * or a duplicated event.
+		 */
+		vcpu_event = find_vcpu_event(vcpu, state[i].num);
+		if (vcpu_event)
+			continue;
+
+		for (j = 0; j < cmd->count; j++) {
+			if (j != i && state[j].num == state[i].num)
+				break;
+		}
+
+		if (j >= cmd->count || i < j)
+			vcpu_event_count++;
+	}
+
+	/*
+	 * Check if the required count of vcpu event instances exceeds
+	 * the limit.
+	 */
+	if (vcpu_event_count > KVM_SDEI_MAX_EVENTS) {
+		ret = -ERANGE;
+		goto out;
+	}
+
+	for (i = 0; i < cmd->count; i++) {
+		/* The vcpu event might have been existing */
+		vcpu_event = find_vcpu_event(vcpu, state[i].num);
+		if (vcpu_event) {
+			vcpu_event->state.event_count += state[i].event_count;
+			continue;
+		}
+
+		vcpu_event = kzalloc(sizeof(*vcpu_event), GFP_KERNEL_ACCOUNT);
+		if (!vcpu_event) {
+			ret = -ENOMEM;
+			goto out;
+		}
+
+		registered_event = find_registered_event(kvm, state[i].num);
+		exposed_event = registered_event->exposed_event;
+
+		vcpu_event->state            = state[i];
+		vcpu_event->registered_event = registered_event;
+		vcpu_event->vcpu             = vcpu;
+
+		registered_event->vcpu_event_count++;
+		if (kvm_sdei_is_critical(exposed_event->state.priority)) {
+			list_add_tail(&vcpu_event->link,
+				      &vsdei->critical_events);
+			vsdei->critical_event_count++;
+		} else {
+			list_add_tail(&vcpu_event->link,
+				      &vsdei->normal_events);
+			vsdei->normal_event_count++;
+		}
+	}
+
+out:
+	kfree(state);
+	return ret;
+}
+
+static long vcpu_ioctl_set_vcpu_state(struct kvm_vcpu *vcpu,
+				      struct kvm_sdei_cmd *cmd)
+{
+	struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
+	struct kvm_sdei_vcpu_event *critical_vcpu_event = NULL;
+	struct kvm_sdei_vcpu_event *normal_vcpu_event = NULL;
+	struct kvm_sdei_vcpu_state *state;
+	void __user *user_state = (void __user *)(cmd->vcpu_state);
+	long ret = 0;
+
+	state = kzalloc(sizeof(*state), GFP_KERNEL_ACCOUNT);
+	if (!state)
+		return -ENOMEM;
+
+	if (copy_from_user(state, user_state, sizeof(*state))) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	if (kvm_sdei_is_supported(state->critical_num)) {
+		critical_vcpu_event = find_vcpu_event(vcpu,
+						      state->critical_num);
+		if (!critical_vcpu_event) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	if (kvm_sdei_is_supported(state->normal_num)) {
+		normal_vcpu_event = find_vcpu_event(vcpu, state->normal_num);
+		if (!normal_vcpu_event) {
+			ret = -EINVAL;
+			goto out;
+		}
+	}
+
+	vsdei->state          = *state;
+	vsdei->critical_event = critical_vcpu_event;
+	vsdei->normal_event   = normal_vcpu_event;
+
+	/*
+	 * To deliver the vCPU events if we don't have a valid handler
+	 * running. Otherwise, the vCPU events should be delivered when
+	 * the running handler is completed.
+	 */
+	if (!vsdei->critical_event && !vsdei->normal_event &&
+	    (vsdei->critical_event_count + vsdei->normal_event_count) > 0)
+		kvm_make_request(KVM_REQ_SDEI, vcpu);
+
+out:
+	kfree(state);
+	return ret;
+}
+
+static long vcpu_ioctl_inject_event(struct kvm_vcpu *vcpu,
+				    struct kvm_sdei_cmd *cmd)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
+	struct kvm_sdei_exposed_event *exposed_event;
+	struct kvm_sdei_registered_event *registered_event;
+	int index;
+
+	if (!kvm_sdei_is_supported(cmd->num))
+		return -EINVAL;
+
+	registered_event = find_registered_event(kvm, cmd->num);
+	if (!registered_event)
+		return -ENOENT;
+
+	exposed_event = registered_event->exposed_event;
+	index = kvm_sdei_vcpu_index(vcpu, exposed_event);
+	if (!kvm_sdei_is_registered(registered_event, index) ||
+	    !kvm_sdei_is_enabled(registered_event, index) ||
+	    kvm_sdei_is_unregister_pending(registered_event, index))
+		return -EPERM;
+
+	if (vsdei->state.masked)
+		return -EPERM;
+
+	return do_inject_event(vcpu, registered_event, false);
+}
+
+long kvm_sdei_vcpu_ioctl(struct kvm_vcpu *vcpu, unsigned long arg)
+{
+	struct kvm *kvm = vcpu->kvm;
+	struct kvm_sdei_kvm *ksdei = kvm->arch.sdei;
+	struct kvm_sdei_vcpu *vsdei = vcpu->arch.sdei;
+	struct kvm_sdei_cmd *cmd = NULL;
+	void __user *argp = (void __user *)arg;
+	long ret = 0;
+
+	if (!(ksdei && vsdei)) {
+		ret = -EPERM;
+		goto out;
+	}
+
+	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL_ACCOUNT);
+	if (!cmd) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	if (copy_from_user(cmd, argp, sizeof(*cmd))) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	spin_lock(&ksdei->lock);
+	spin_lock(&vsdei->lock);
+
+	switch (cmd->cmd) {
+	case KVM_SDEI_CMD_GET_VCPU_EVENT_COUNT:
+		cmd->count = vsdei->critical_event_count +
+			     vsdei->normal_event_count;
+		if (copy_to_user(argp, cmd, sizeof(*cmd)))
+			ret = -EFAULT;
+		break;
+	case KVM_SDEI_CMD_GET_VCPU_EVENT:
+		ret = vcpu_ioctl_get_vcpu_event(vcpu, cmd);
+		break;
+	case KVM_SDEI_CMD_SET_VCPU_EVENT:
+		ret = vcpu_ioctl_set_vcpu_event(vcpu, cmd);
+		break;
+	case KVM_SDEI_CMD_GET_VCPU_STATE:
+		if (copy_to_user(cmd->vcpu_state, &vsdei->state,
+				 sizeof(vsdei->state)))
+			ret = -EFAULT;
+		break;
+	case KVM_SDEI_CMD_SET_VCPU_STATE:
+		ret = vcpu_ioctl_set_vcpu_state(vcpu, cmd);
+		break;
+	case KVM_SDEI_CMD_INJECT_EVENT:
+		ret = vcpu_ioctl_inject_event(vcpu, cmd);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	spin_unlock(&vsdei->lock);
+	spin_unlock(&ksdei->lock);
+
+out:
+	kfree(cmd);
+	return ret;
+}
+
 void kvm_sdei_destroy_vcpu(struct kvm_vcpu *vcpu)
 {
 	struct kvm *kvm = vcpu->kvm;
-- 
2.23.0

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

  parent reply	other threads:[~2022-03-22  8:11 UTC|newest]

Thread overview: 98+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-03-22  8:06 [PATCH v5 00/22] Support SDEI Virtualization Gavin Shan
2022-03-22  8:06 ` Gavin Shan
2022-03-22  8:06 ` [PATCH v5 01/22] KVM: arm64: Introduce template for inline functions Gavin Shan
2022-03-22  8:06   ` Gavin Shan
2022-03-22 19:42   ` Oliver Upton
2022-03-22 19:42     ` Oliver Upton
2022-03-23 12:16     ` Gavin Shan
2022-03-23 12:16       ` Gavin Shan
2022-03-22  8:06 ` [PATCH v5 02/22] KVM: arm64: Add SDEI virtualization infrastructure Gavin Shan
2022-03-22  8:06   ` Gavin Shan
2022-03-22 22:43   ` Oliver Upton
2022-03-22 22:43     ` Oliver Upton
2022-03-23 12:40     ` Gavin Shan
2022-03-23 12:40       ` Gavin Shan
2022-03-23 17:11   ` Oliver Upton
2022-03-23 17:11     ` Oliver Upton
2022-03-24  6:54     ` Gavin Shan
2022-03-24  6:54       ` Gavin Shan
2022-03-24  9:04       ` Oliver Upton
2022-03-24  9:04         ` Oliver Upton
2022-03-25  6:07         ` Gavin Shan
2022-03-25  6:07           ` Gavin Shan
2022-03-22  8:06 ` [PATCH v5 03/22] KVM: arm64: Support SDEI_VERSION hypercall Gavin Shan
2022-03-22  8:06   ` Gavin Shan
2022-03-22 18:04   ` Oliver Upton
2022-03-22 18:04     ` Oliver Upton
2022-03-23 12:46     ` Gavin Shan
2022-03-23 12:46       ` Gavin Shan
2022-03-23 16:31       ` Oliver Upton
2022-03-23 16:31         ` Oliver Upton
2022-03-24  4:07         ` Gavin Shan
2022-03-24  4:07           ` Gavin Shan
2022-03-24  7:48           ` Oliver Upton
2022-03-24  7:48             ` Oliver Upton
2022-03-25  6:11             ` Gavin Shan
2022-03-25  6:11               ` Gavin Shan
2022-03-22  8:06 ` [PATCH v5 04/22] KVM: arm64: Support SDEI_EVENT_REGISTER hypercall Gavin Shan
2022-03-22  8:06   ` Gavin Shan
2022-03-22  8:06 ` [PATCH v5 05/22] KVM: arm64: Support SDEI_EVENT_{ENABLE, DISABLE} hypercall Gavin Shan
2022-03-22  8:06   ` Gavin Shan
2022-03-22  8:06 ` [PATCH v5 06/22] KVM: arm64: Support SDEI_EVENT_CONTEXT hypercall Gavin Shan
2022-03-22  8:06   ` Gavin Shan
2022-03-22  8:06 ` [PATCH v5 07/22] KVM: arm64: Support SDEI_EVENT_UNREGISTER hypercall Gavin Shan
2022-03-22  8:06   ` Gavin Shan
2022-03-22  8:06 ` [PATCH v5 08/22] KVM: arm64: Support SDEI_EVENT_STATUS hypercall Gavin Shan
2022-03-22  8:06   ` Gavin Shan
2022-03-22  8:06 ` [PATCH v5 09/22] KVM: arm64: Support SDEI_EVENT_GET_INFO hypercall Gavin Shan
2022-03-22  8:06   ` Gavin Shan
2022-03-22  8:06 ` [PATCH v5 10/22] KVM: arm64: Support SDEI_EVENT_ROUTING_SET hypercall Gavin Shan
2022-03-22  8:06   ` Gavin Shan
2022-03-22  8:06 ` [PATCH v5 11/22] KVM: arm64: Support SDEI_PE_{MASK, UNMASK} hypercall Gavin Shan
2022-03-22  8:06   ` Gavin Shan
2022-03-22  8:07 ` [PATCH v5 12/22] KVM: arm64: Support SDEI_{PRIVATE, SHARED}_RESET Gavin Shan
2022-03-22  8:07   ` Gavin Shan
2022-03-22  8:07 ` [PATCH v5 13/22] KVM: arm64: Support SDEI_FEATURES hypercall Gavin Shan
2022-03-22  8:07   ` Gavin Shan
2022-03-22  8:07 ` [PATCH v5 14/22] KVM: arm64: Support SDEI event injection, delivery and cancellation Gavin Shan
2022-03-22  8:07   ` Gavin Shan
2022-03-22  8:07 ` [PATCH v5 15/22] KVM: arm64: Support SDEI_EVENT_SIGNAL hypercall Gavin Shan
2022-03-22  8:07   ` Gavin Shan
2022-03-22 23:06   ` Oliver Upton
2022-03-22 23:06     ` Oliver Upton
2022-03-23 12:52     ` Gavin Shan
2022-03-23 12:52       ` Gavin Shan
2022-03-22  8:07 ` [PATCH v5 16/22] KVM: arm64: Support SDEI_EVENT_{COMPLETE,COMPLETE_AND_RESUME} hypercall Gavin Shan
2022-03-22  8:07   ` [PATCH v5 16/22] KVM: arm64: Support SDEI_EVENT_{COMPLETE, COMPLETE_AND_RESUME} hypercall Gavin Shan
2022-03-22  8:07 ` [PATCH v5 17/22] KVM: arm64: Support SDEI event notifier Gavin Shan
2022-03-22  8:07   ` Gavin Shan
2022-03-22  8:07 ` [PATCH v5 18/22] KVM: arm64: Support SDEI ioctl commands on VM Gavin Shan
2022-03-22  8:07   ` Gavin Shan
2022-03-23 17:28   ` Oliver Upton
2022-03-23 17:28     ` Oliver Upton
2022-03-25  6:59     ` Gavin Shan
2022-03-25  6:59       ` Gavin Shan
2022-03-25  7:35       ` Oliver Upton
2022-03-25  7:35         ` Oliver Upton
2022-03-25 10:14         ` Gavin Shan
2022-03-25 10:14           ` Gavin Shan
2022-03-22  8:07 ` Gavin Shan [this message]
2022-03-22  8:07   ` [PATCH v5 19/22] KVM: arm64: Support SDEI ioctl commands on vCPU Gavin Shan
2022-03-23 17:55   ` Oliver Upton
2022-03-23 17:55     ` Oliver Upton
2022-03-25  7:59     ` Gavin Shan
2022-03-25  7:59       ` Gavin Shan
2022-03-25  8:37       ` Oliver Upton
2022-03-25  8:37         ` Oliver Upton
2022-03-25 10:23         ` Gavin Shan
2022-03-25 10:23           ` Gavin Shan
2022-03-22  8:07 ` [PATCH v5 20/22] KVM: arm64: Export SDEI capability Gavin Shan
2022-03-22  8:07   ` Gavin Shan
2022-03-22  8:07 ` [PATCH v5 21/22] KVM: arm64: Add SDEI document Gavin Shan
2022-03-22  8:07   ` Gavin Shan
2022-03-22  8:07 ` [PATCH v5 22/22] KVM: selftests: Add SDEI test case Gavin Shan
2022-03-22  8:07   ` Gavin Shan
2022-03-22 18:13 ` [PATCH v5 00/22] Support SDEI Virtualization Oliver Upton
2022-03-22 18:13   ` Oliver Upton
2022-03-23 12:57   ` Gavin Shan
2022-03-23 12:57     ` Gavin Shan

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220322080710.51727-20-gshan@redhat.com \
    --to=gshan@redhat.com \
    --cc=Jonathan.Cameron@huawei.com \
    --cc=drjones@redhat.com \
    --cc=eauger@redhat.com \
    --cc=james.morse@arm.com \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=maz@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=shan.gavin@gmail.com \
    --cc=shannon.zhaosl@gmail.com \
    --cc=vkuznets@redhat.com \
    --cc=will@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.