All of lore.kernel.org
 help / color / mirror / Atom feed
From: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
To: <linux-arm-kernel@lists.infradead.org>,
	<iommu@lists.linux-foundation.org>,
	<kvmarm@lists.cs.columbia.edu>
Cc: jean-philippe@linaro.org, maz@kernel.org, linuxarm@openeuler.org,
	alex.williamson@redhat.com, prime.zeng@hisilicon.com,
	zhangfei.gao@linaro.org
Subject: [RFC PATCH 3/5] KVM: ARM64: Add support for pinned VMIDs
Date: Mon, 22 Feb 2021 15:53:36 +0000	[thread overview]
Message-ID: <20210222155338.26132-4-shameerali.kolothum.thodi@huawei.com> (raw)
In-Reply-To: <20210222155338.26132-1-shameerali.kolothum.thodi@huawei.com>

On an ARM64 system with a SMMUv3 implementation that fully supports
Broadcast TLB Maintenance(BTM) feature, the CPU TLB invalidate
instructions are received by SMMU. This is very useful when the
SMMU shares the page tables with the CPU(eg: Guest SVA use case).
For this to work, the SMMU must use the same VMID that is allocated
by KVM to configure the stage 2 translations.

At present KVM VMID allocations are recycled on rollover and may
change as a result. This will create issues if we have to share
the KVM VMID with SMMU. Hence, we spilt the KVM VMID space into
two, the first half follows the normal recycle on rollover policy
while the second half of the VMID pace is used to allocate pinned
VMIDs. This feature is enabled based on a command line option
"kvm-arm.pinned_vmid_enable".

Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
---
 arch/arm64/include/asm/kvm_host.h |   2 +
 arch/arm64/kvm/Kconfig            |   1 +
 arch/arm64/kvm/arm.c              | 104 +++++++++++++++++++++++++++++-
 3 files changed, 106 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0cd9f0f75c13..db6441c6a580 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -25,6 +25,7 @@
 #include <asm/fpsimd.h>
 #include <asm/kvm.h>
 #include <asm/kvm_asm.h>
+#include <linux/refcount.h>
 #include <asm/thread_info.h>
 
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -65,6 +66,7 @@ struct kvm_vmid {
 	/* The VMID generation used for the virt. memory system */
 	u64    vmid_gen;
 	u32    vmid;
+	refcount_t   pinned;
 };
 
 struct kvm_s2_mmu {
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 043756db8f6e..c5c52953e842 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -40,6 +40,7 @@ menuconfig KVM
 	select HAVE_KVM_VCPU_RUN_PID_CHANGE
 	select TASKSTATS
 	select TASK_DELAY_ACCT
+	select HAVE_KVM_PINNED_VMID
 	help
 	  Support hosting virtualized guest machines.
 
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index c0ffb019ca8b..8955968be49f 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -56,6 +56,19 @@ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
 static u32 kvm_next_vmid;
 static DEFINE_SPINLOCK(kvm_vmid_lock);
 
+static bool kvm_pinned_vmid_enable;
+
+static int __init early_pinned_vmid_enable(char *buf)
+{
+	return strtobool(buf, &kvm_pinned_vmid_enable);
+}
+
+early_param("kvm-arm.pinned_vmid_enable", early_pinned_vmid_enable);
+
+static DEFINE_IDA(kvm_pinned_vmids);
+static u32 kvm_pinned_vmid_start;
+static u32 kvm_pinned_vmid_end;
+
 static bool vgic_present;
 
 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
@@ -475,6 +488,10 @@ void force_vm_exit(const cpumask_t *mask)
 static bool need_new_vmid_gen(struct kvm_vmid *vmid)
 {
 	u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
+
+	if (refcount_read(&vmid->pinned))
+		return false;
+
 	smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
 	return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
 }
@@ -485,6 +502,8 @@ static bool need_new_vmid_gen(struct kvm_vmid *vmid)
  */
 static void update_vmid(struct kvm_vmid *vmid)
 {
+	unsigned int vmid_bits;
+
 	if (!need_new_vmid_gen(vmid))
 		return;
 
@@ -521,7 +540,12 @@ static void update_vmid(struct kvm_vmid *vmid)
 
 	vmid->vmid = kvm_next_vmid;
 	kvm_next_vmid++;
-	kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
+	if (kvm_pinned_vmid_enable)
+		vmid_bits = kvm_get_vmid_bits() - 1;
+	else
+		vmid_bits = kvm_get_vmid_bits();
+
+	kvm_next_vmid &= (1 << vmid_bits) - 1;
 
 	smp_wmb();
 	WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
@@ -569,6 +593,71 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
 	return ret;
 }
 
+int kvm_arch_pinned_vmid_get(struct kvm *kvm)
+{
+	struct kvm_vcpu *vcpu;
+	struct kvm_vmid *kvm_vmid;
+	int ret;
+
+	if (!kvm_pinned_vmid_enable || !atomic_read(&kvm->online_vcpus))
+		return -EINVAL;
+
+	vcpu = kvm_get_vcpu(kvm, 0);
+	if (!vcpu)
+		return -EINVAL;
+
+	kvm_vmid = &vcpu->arch.hw_mmu->vmid;
+
+	spin_lock(&kvm_vmid_lock);
+
+	if (refcount_inc_not_zero(&kvm_vmid->pinned)) {
+		spin_unlock(&kvm_vmid_lock);
+		return kvm_vmid->vmid;
+	}
+
+	ret = ida_alloc_range(&kvm_pinned_vmids, kvm_pinned_vmid_start,
+			      kvm_pinned_vmid_end, GFP_KERNEL);
+	if (ret < 0) {
+		spin_unlock(&kvm_vmid_lock);
+		return ret;
+	}
+
+	force_vm_exit(cpu_all_mask);
+	kvm_call_hyp(__kvm_flush_vm_context);
+
+	kvm_vmid->vmid = (u32)ret;
+	refcount_set(&kvm_vmid->pinned, 1);
+	spin_unlock(&kvm_vmid_lock);
+
+	return ret;
+}
+
+int kvm_arch_pinned_vmid_put(struct kvm *kvm)
+{
+	struct kvm_vcpu *vcpu;
+	struct kvm_vmid *kvm_vmid;
+
+	if (!kvm_pinned_vmid_enable)
+		return -EINVAL;
+
+	vcpu = kvm_get_vcpu(kvm, 0);
+	if (!vcpu)
+		return -EINVAL;
+
+	kvm_vmid = &vcpu->arch.hw_mmu->vmid;
+
+	spin_lock(&kvm_vmid_lock);
+
+	if (!refcount_read(&kvm_vmid->pinned))
+		goto out;
+
+	if (refcount_dec_and_test(&kvm_vmid->pinned))
+		ida_free(&kvm_pinned_vmids, kvm_vmid->vmid);
+out:
+	spin_unlock(&kvm_vmid_lock);
+	return 0;
+}
+
 bool kvm_arch_intc_initialized(struct kvm *kvm)
 {
 	return vgic_initialized(kvm);
@@ -1680,6 +1769,16 @@ static void check_kvm_target_cpu(void *ret)
 	*(int *)ret = kvm_target_cpu();
 }
 
+static void kvm_arm_pinned_vmid_init(void)
+{
+	unsigned int vmid_bits = kvm_get_vmid_bits();
+
+	kvm_pinned_vmid_start = (1 << (vmid_bits - 1));
+	kvm_pinned_vmid_end = (1 << vmid_bits) - 1;
+
+	kvm_info("Pinned VMID[0x%x - 0x%x] enabled\n", kvm_pinned_vmid_start, kvm_pinned_vmid_end);
+}
+
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
 {
 	struct kvm_vcpu *vcpu;
@@ -1790,6 +1889,9 @@ int kvm_arch_init(void *opaque)
 	else
 		kvm_info("Hyp mode initialized successfully\n");
 
+	if (kvm_pinned_vmid_enable)
+		kvm_arm_pinned_vmid_init();
+
 	return 0;
 
 out_hyp:
-- 
2.17.1

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

WARNING: multiple messages have this Message-ID (diff)
From: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
To: <linux-arm-kernel@lists.infradead.org>,
	<iommu@lists.linux-foundation.org>,
	<kvmarm@lists.cs.columbia.edu>
Cc: jean-philippe@linaro.org, maz@kernel.org, linuxarm@openeuler.org,
	alex.williamson@redhat.com, prime.zeng@hisilicon.com,
	jonathan.cameron@huawei.com, zhangfei.gao@linaro.org
Subject: [RFC PATCH 3/5] KVM: ARM64: Add support for pinned VMIDs
Date: Mon, 22 Feb 2021 15:53:36 +0000	[thread overview]
Message-ID: <20210222155338.26132-4-shameerali.kolothum.thodi@huawei.com> (raw)
In-Reply-To: <20210222155338.26132-1-shameerali.kolothum.thodi@huawei.com>

On an ARM64 system with a SMMUv3 implementation that fully supports
Broadcast TLB Maintenance(BTM) feature, the CPU TLB invalidate
instructions are received by SMMU. This is very useful when the
SMMU shares the page tables with the CPU(eg: Guest SVA use case).
For this to work, the SMMU must use the same VMID that is allocated
by KVM to configure the stage 2 translations.

At present KVM VMID allocations are recycled on rollover and may
change as a result. This will create issues if we have to share
the KVM VMID with SMMU. Hence, we spilt the KVM VMID space into
two, the first half follows the normal recycle on rollover policy
while the second half of the VMID pace is used to allocate pinned
VMIDs. This feature is enabled based on a command line option
"kvm-arm.pinned_vmid_enable".

Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
---
 arch/arm64/include/asm/kvm_host.h |   2 +
 arch/arm64/kvm/Kconfig            |   1 +
 arch/arm64/kvm/arm.c              | 104 +++++++++++++++++++++++++++++-
 3 files changed, 106 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0cd9f0f75c13..db6441c6a580 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -25,6 +25,7 @@
 #include <asm/fpsimd.h>
 #include <asm/kvm.h>
 #include <asm/kvm_asm.h>
+#include <linux/refcount.h>
 #include <asm/thread_info.h>
 
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -65,6 +66,7 @@ struct kvm_vmid {
 	/* The VMID generation used for the virt. memory system */
 	u64    vmid_gen;
 	u32    vmid;
+	refcount_t   pinned;
 };
 
 struct kvm_s2_mmu {
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 043756db8f6e..c5c52953e842 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -40,6 +40,7 @@ menuconfig KVM
 	select HAVE_KVM_VCPU_RUN_PID_CHANGE
 	select TASKSTATS
 	select TASK_DELAY_ACCT
+	select HAVE_KVM_PINNED_VMID
 	help
 	  Support hosting virtualized guest machines.
 
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index c0ffb019ca8b..8955968be49f 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -56,6 +56,19 @@ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
 static u32 kvm_next_vmid;
 static DEFINE_SPINLOCK(kvm_vmid_lock);
 
+static bool kvm_pinned_vmid_enable;
+
+static int __init early_pinned_vmid_enable(char *buf)
+{
+	return strtobool(buf, &kvm_pinned_vmid_enable);
+}
+
+early_param("kvm-arm.pinned_vmid_enable", early_pinned_vmid_enable);
+
+static DEFINE_IDA(kvm_pinned_vmids);
+static u32 kvm_pinned_vmid_start;
+static u32 kvm_pinned_vmid_end;
+
 static bool vgic_present;
 
 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
@@ -475,6 +488,10 @@ void force_vm_exit(const cpumask_t *mask)
 static bool need_new_vmid_gen(struct kvm_vmid *vmid)
 {
 	u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
+
+	if (refcount_read(&vmid->pinned))
+		return false;
+
 	smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
 	return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
 }
@@ -485,6 +502,8 @@ static bool need_new_vmid_gen(struct kvm_vmid *vmid)
  */
 static void update_vmid(struct kvm_vmid *vmid)
 {
+	unsigned int vmid_bits;
+
 	if (!need_new_vmid_gen(vmid))
 		return;
 
@@ -521,7 +540,12 @@ static void update_vmid(struct kvm_vmid *vmid)
 
 	vmid->vmid = kvm_next_vmid;
 	kvm_next_vmid++;
-	kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
+	if (kvm_pinned_vmid_enable)
+		vmid_bits = kvm_get_vmid_bits() - 1;
+	else
+		vmid_bits = kvm_get_vmid_bits();
+
+	kvm_next_vmid &= (1 << vmid_bits) - 1;
 
 	smp_wmb();
 	WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
@@ -569,6 +593,71 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
 	return ret;
 }
 
+int kvm_arch_pinned_vmid_get(struct kvm *kvm)
+{
+	struct kvm_vcpu *vcpu;
+	struct kvm_vmid *kvm_vmid;
+	int ret;
+
+	if (!kvm_pinned_vmid_enable || !atomic_read(&kvm->online_vcpus))
+		return -EINVAL;
+
+	vcpu = kvm_get_vcpu(kvm, 0);
+	if (!vcpu)
+		return -EINVAL;
+
+	kvm_vmid = &vcpu->arch.hw_mmu->vmid;
+
+	spin_lock(&kvm_vmid_lock);
+
+	if (refcount_inc_not_zero(&kvm_vmid->pinned)) {
+		spin_unlock(&kvm_vmid_lock);
+		return kvm_vmid->vmid;
+	}
+
+	ret = ida_alloc_range(&kvm_pinned_vmids, kvm_pinned_vmid_start,
+			      kvm_pinned_vmid_end, GFP_KERNEL);
+	if (ret < 0) {
+		spin_unlock(&kvm_vmid_lock);
+		return ret;
+	}
+
+	force_vm_exit(cpu_all_mask);
+	kvm_call_hyp(__kvm_flush_vm_context);
+
+	kvm_vmid->vmid = (u32)ret;
+	refcount_set(&kvm_vmid->pinned, 1);
+	spin_unlock(&kvm_vmid_lock);
+
+	return ret;
+}
+
+int kvm_arch_pinned_vmid_put(struct kvm *kvm)
+{
+	struct kvm_vcpu *vcpu;
+	struct kvm_vmid *kvm_vmid;
+
+	if (!kvm_pinned_vmid_enable)
+		return -EINVAL;
+
+	vcpu = kvm_get_vcpu(kvm, 0);
+	if (!vcpu)
+		return -EINVAL;
+
+	kvm_vmid = &vcpu->arch.hw_mmu->vmid;
+
+	spin_lock(&kvm_vmid_lock);
+
+	if (!refcount_read(&kvm_vmid->pinned))
+		goto out;
+
+	if (refcount_dec_and_test(&kvm_vmid->pinned))
+		ida_free(&kvm_pinned_vmids, kvm_vmid->vmid);
+out:
+	spin_unlock(&kvm_vmid_lock);
+	return 0;
+}
+
 bool kvm_arch_intc_initialized(struct kvm *kvm)
 {
 	return vgic_initialized(kvm);
@@ -1680,6 +1769,16 @@ static void check_kvm_target_cpu(void *ret)
 	*(int *)ret = kvm_target_cpu();
 }
 
+static void kvm_arm_pinned_vmid_init(void)
+{
+	unsigned int vmid_bits = kvm_get_vmid_bits();
+
+	kvm_pinned_vmid_start = (1 << (vmid_bits - 1));
+	kvm_pinned_vmid_end = (1 << vmid_bits) - 1;
+
+	kvm_info("Pinned VMID[0x%x - 0x%x] enabled\n", kvm_pinned_vmid_start, kvm_pinned_vmid_end);
+}
+
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
 {
 	struct kvm_vcpu *vcpu;
@@ -1790,6 +1889,9 @@ int kvm_arch_init(void *opaque)
 	else
 		kvm_info("Hyp mode initialized successfully\n");
 
+	if (kvm_pinned_vmid_enable)
+		kvm_arm_pinned_vmid_init();
+
 	return 0;
 
 out_hyp:
-- 
2.17.1

_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
To: <linux-arm-kernel@lists.infradead.org>,
	<iommu@lists.linux-foundation.org>,
	<kvmarm@lists.cs.columbia.edu>
Cc: jean-philippe@linaro.org, maz@kernel.org, linuxarm@openeuler.org,
	eric.auger@redhat.com, alex.williamson@redhat.com,
	prime.zeng@hisilicon.com, jonathan.cameron@huawei.com,
	zhangfei.gao@linaro.org
Subject: [RFC PATCH 3/5] KVM: ARM64: Add support for pinned VMIDs
Date: Mon, 22 Feb 2021 15:53:36 +0000	[thread overview]
Message-ID: <20210222155338.26132-4-shameerali.kolothum.thodi@huawei.com> (raw)
In-Reply-To: <20210222155338.26132-1-shameerali.kolothum.thodi@huawei.com>

On an ARM64 system with a SMMUv3 implementation that fully supports
Broadcast TLB Maintenance(BTM) feature, the CPU TLB invalidate
instructions are received by SMMU. This is very useful when the
SMMU shares the page tables with the CPU(eg: Guest SVA use case).
For this to work, the SMMU must use the same VMID that is allocated
by KVM to configure the stage 2 translations.

At present KVM VMID allocations are recycled on rollover and may
change as a result. This will create issues if we have to share
the KVM VMID with SMMU. Hence, we spilt the KVM VMID space into
two, the first half follows the normal recycle on rollover policy
while the second half of the VMID pace is used to allocate pinned
VMIDs. This feature is enabled based on a command line option
"kvm-arm.pinned_vmid_enable".

Signed-off-by: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
---
 arch/arm64/include/asm/kvm_host.h |   2 +
 arch/arm64/kvm/Kconfig            |   1 +
 arch/arm64/kvm/arm.c              | 104 +++++++++++++++++++++++++++++-
 3 files changed, 106 insertions(+), 1 deletion(-)

diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0cd9f0f75c13..db6441c6a580 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -25,6 +25,7 @@
 #include <asm/fpsimd.h>
 #include <asm/kvm.h>
 #include <asm/kvm_asm.h>
+#include <linux/refcount.h>
 #include <asm/thread_info.h>
 
 #define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -65,6 +66,7 @@ struct kvm_vmid {
 	/* The VMID generation used for the virt. memory system */
 	u64    vmid_gen;
 	u32    vmid;
+	refcount_t   pinned;
 };
 
 struct kvm_s2_mmu {
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index 043756db8f6e..c5c52953e842 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -40,6 +40,7 @@ menuconfig KVM
 	select HAVE_KVM_VCPU_RUN_PID_CHANGE
 	select TASKSTATS
 	select TASK_DELAY_ACCT
+	select HAVE_KVM_PINNED_VMID
 	help
 	  Support hosting virtualized guest machines.
 
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index c0ffb019ca8b..8955968be49f 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -56,6 +56,19 @@ static atomic64_t kvm_vmid_gen = ATOMIC64_INIT(1);
 static u32 kvm_next_vmid;
 static DEFINE_SPINLOCK(kvm_vmid_lock);
 
+static bool kvm_pinned_vmid_enable;
+
+static int __init early_pinned_vmid_enable(char *buf)
+{
+	return strtobool(buf, &kvm_pinned_vmid_enable);
+}
+
+early_param("kvm-arm.pinned_vmid_enable", early_pinned_vmid_enable);
+
+static DEFINE_IDA(kvm_pinned_vmids);
+static u32 kvm_pinned_vmid_start;
+static u32 kvm_pinned_vmid_end;
+
 static bool vgic_present;
 
 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
@@ -475,6 +488,10 @@ void force_vm_exit(const cpumask_t *mask)
 static bool need_new_vmid_gen(struct kvm_vmid *vmid)
 {
 	u64 current_vmid_gen = atomic64_read(&kvm_vmid_gen);
+
+	if (refcount_read(&vmid->pinned))
+		return false;
+
 	smp_rmb(); /* Orders read of kvm_vmid_gen and kvm->arch.vmid */
 	return unlikely(READ_ONCE(vmid->vmid_gen) != current_vmid_gen);
 }
@@ -485,6 +502,8 @@ static bool need_new_vmid_gen(struct kvm_vmid *vmid)
  */
 static void update_vmid(struct kvm_vmid *vmid)
 {
+	unsigned int vmid_bits;
+
 	if (!need_new_vmid_gen(vmid))
 		return;
 
@@ -521,7 +540,12 @@ static void update_vmid(struct kvm_vmid *vmid)
 
 	vmid->vmid = kvm_next_vmid;
 	kvm_next_vmid++;
-	kvm_next_vmid &= (1 << kvm_get_vmid_bits()) - 1;
+	if (kvm_pinned_vmid_enable)
+		vmid_bits = kvm_get_vmid_bits() - 1;
+	else
+		vmid_bits = kvm_get_vmid_bits();
+
+	kvm_next_vmid &= (1 << vmid_bits) - 1;
 
 	smp_wmb();
 	WRITE_ONCE(vmid->vmid_gen, atomic64_read(&kvm_vmid_gen));
@@ -569,6 +593,71 @@ static int kvm_vcpu_first_run_init(struct kvm_vcpu *vcpu)
 	return ret;
 }
 
+int kvm_arch_pinned_vmid_get(struct kvm *kvm)
+{
+	struct kvm_vcpu *vcpu;
+	struct kvm_vmid *kvm_vmid;
+	int ret;
+
+	if (!kvm_pinned_vmid_enable || !atomic_read(&kvm->online_vcpus))
+		return -EINVAL;
+
+	vcpu = kvm_get_vcpu(kvm, 0);
+	if (!vcpu)
+		return -EINVAL;
+
+	kvm_vmid = &vcpu->arch.hw_mmu->vmid;
+
+	spin_lock(&kvm_vmid_lock);
+
+	if (refcount_inc_not_zero(&kvm_vmid->pinned)) {
+		spin_unlock(&kvm_vmid_lock);
+		return kvm_vmid->vmid;
+	}
+
+	ret = ida_alloc_range(&kvm_pinned_vmids, kvm_pinned_vmid_start,
+			      kvm_pinned_vmid_end, GFP_KERNEL);
+	if (ret < 0) {
+		spin_unlock(&kvm_vmid_lock);
+		return ret;
+	}
+
+	force_vm_exit(cpu_all_mask);
+	kvm_call_hyp(__kvm_flush_vm_context);
+
+	kvm_vmid->vmid = (u32)ret;
+	refcount_set(&kvm_vmid->pinned, 1);
+	spin_unlock(&kvm_vmid_lock);
+
+	return ret;
+}
+
+int kvm_arch_pinned_vmid_put(struct kvm *kvm)
+{
+	struct kvm_vcpu *vcpu;
+	struct kvm_vmid *kvm_vmid;
+
+	if (!kvm_pinned_vmid_enable)
+		return -EINVAL;
+
+	vcpu = kvm_get_vcpu(kvm, 0);
+	if (!vcpu)
+		return -EINVAL;
+
+	kvm_vmid = &vcpu->arch.hw_mmu->vmid;
+
+	spin_lock(&kvm_vmid_lock);
+
+	if (!refcount_read(&kvm_vmid->pinned))
+		goto out;
+
+	if (refcount_dec_and_test(&kvm_vmid->pinned))
+		ida_free(&kvm_pinned_vmids, kvm_vmid->vmid);
+out:
+	spin_unlock(&kvm_vmid_lock);
+	return 0;
+}
+
 bool kvm_arch_intc_initialized(struct kvm *kvm)
 {
 	return vgic_initialized(kvm);
@@ -1680,6 +1769,16 @@ static void check_kvm_target_cpu(void *ret)
 	*(int *)ret = kvm_target_cpu();
 }
 
+static void kvm_arm_pinned_vmid_init(void)
+{
+	unsigned int vmid_bits = kvm_get_vmid_bits();
+
+	kvm_pinned_vmid_start = (1 << (vmid_bits - 1));
+	kvm_pinned_vmid_end = (1 << vmid_bits) - 1;
+
+	kvm_info("Pinned VMID[0x%x - 0x%x] enabled\n", kvm_pinned_vmid_start, kvm_pinned_vmid_end);
+}
+
 struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr)
 {
 	struct kvm_vcpu *vcpu;
@@ -1790,6 +1889,9 @@ int kvm_arch_init(void *opaque)
 	else
 		kvm_info("Hyp mode initialized successfully\n");
 
+	if (kvm_pinned_vmid_enable)
+		kvm_arm_pinned_vmid_init();
+
 	return 0;
 
 out_hyp:
-- 
2.17.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2021-02-22 15:55 UTC|newest]

Thread overview: 39+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-02-22 15:53 [RFC PATCH 0/5] KVM/ARM64 Add support for pinned VMIDs Shameer Kolothum
2021-02-22 15:53 ` Shameer Kolothum
2021-02-22 15:53 ` Shameer Kolothum
2021-02-22 15:53 ` [RFC PATCH 1/5] vfio: Add a helper to retrieve kvm instance from a dev Shameer Kolothum
2021-02-22 15:53   ` Shameer Kolothum
2021-02-22 15:53   ` Shameer Kolothum
2021-02-22 15:53 ` [RFC PATCH 2/5] KVM: Add generic infrastructure to support pinned VMIDs Shameer Kolothum
2021-02-22 15:53   ` Shameer Kolothum
2021-02-22 15:53   ` Shameer Kolothum
2021-02-22 15:53 ` Shameer Kolothum [this message]
2021-02-22 15:53   ` [RFC PATCH 3/5] KVM: ARM64: Add support for " Shameer Kolothum
2021-02-22 15:53   ` Shameer Kolothum
2021-03-09 10:32   ` Marc Zyngier
2021-03-09 10:32     ` Marc Zyngier
2021-03-09 10:32     ` Marc Zyngier
2021-03-09 11:12     ` Shameerali Kolothum Thodi
2021-03-09 11:12       ` Shameerali Kolothum Thodi
2021-03-09 11:12       ` Shameerali Kolothum Thodi
2021-02-22 15:53 ` [RFC PATCH 4/5] iommu/arm-smmu-v3: Use pinned VMID for NESTED stage with BTM Shameer Kolothum
2021-02-22 15:53   ` Shameer Kolothum
2021-02-22 15:53   ` Shameer Kolothum
2021-03-04 17:10   ` Jean-Philippe Brucker
2021-03-04 17:10     ` Jean-Philippe Brucker
2021-03-04 17:10     ` Jean-Philippe Brucker
2021-03-05  8:51     ` Shameerali Kolothum Thodi
2021-03-05  8:51       ` Shameerali Kolothum Thodi
2021-03-05  8:51       ` Shameerali Kolothum Thodi
2021-07-21  8:54     ` Shameerali Kolothum Thodi
2021-07-21  8:54       ` Shameerali Kolothum Thodi
2021-07-21  8:54       ` Shameerali Kolothum Thodi
2021-07-22 16:45       ` Jean-Philippe Brucker
2021-07-22 16:45         ` Jean-Philippe Brucker
2021-07-22 16:45         ` Jean-Philippe Brucker
2021-07-23  8:27         ` [Linuxarm] " Shameerali Kolothum Thodi
2021-07-23  8:27           ` Shameerali Kolothum Thodi
2021-07-23  8:27           ` Shameerali Kolothum Thodi
2021-02-22 15:53 ` [RFC PATCH 5/5] KVM: arm64: Make sure pinned vmid is released on VM exit Shameer Kolothum
2021-02-22 15:53   ` Shameer Kolothum
2021-02-22 15:53   ` Shameer Kolothum

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210222155338.26132-4-shameerali.kolothum.thodi@huawei.com \
    --to=shameerali.kolothum.thodi@huawei.com \
    --cc=alex.williamson@redhat.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jean-philippe@linaro.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linuxarm@openeuler.org \
    --cc=maz@kernel.org \
    --cc=prime.zeng@hisilicon.com \
    --cc=zhangfei.gao@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.