All of lore.kernel.org
 help / color / mirror / Atom feed
From: Vitaly Kuznetsov <vkuznets@redhat.com>
To: kvm@vger.kernel.org, Paolo Bonzini <pbonzini@redhat.com>
Cc: Sean Christopherson <sean.j.christopherson@intel.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>,
	"Dr . David Alan Gilbert" <dgilbert@redhat.com>,
	Wei Huang <whuang2@amd.com>,
	linux-kernel@vger.kernel.org
Subject: [PATCH RFC 1/2] KVM: x86: allocate vcpu->arch.cpuid_entries dynamically
Date: Tue, 15 Sep 2020 17:43:05 +0200	[thread overview]
Message-ID: <20200915154306.724953-2-vkuznets@redhat.com> (raw)
In-Reply-To: <20200915154306.724953-1-vkuznets@redhat.com>

The current limit for guest CPUID leaves (KVM_MAX_CPUID_ENTRIES, 80)
is reported to be insufficient but before we bump it let's switch to
allocating vcpu->arch.cpuid_entries dynamically. Currenly,
'struct kvm_cpuid_entry2' is 40 bytes so vcpu->arch.cpuid_entries is
3200 bytes which accounts for 1/4 of the whole 'struct kvm_vcpu_arch'
but having it pre-allocated (for all vCPUs which we also pre-allocate)
gives us no benefits.

Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  2 +-
 arch/x86/kvm/cpuid.c            | 55 ++++++++++++++++++++++++---------
 arch/x86/kvm/x86.c              |  1 +
 3 files changed, 42 insertions(+), 16 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 5303dbc5c9bc..0c5f2ca3e838 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -636,7 +636,7 @@ struct kvm_vcpu_arch {
 	int halt_request; /* real mode on Intel only */
 
 	int cpuid_nent;
-	struct kvm_cpuid_entry2 cpuid_entries[KVM_MAX_CPUID_ENTRIES];
+	struct kvm_cpuid_entry2 *cpuid_entries;
 
 	int maxphyaddr;
 	int max_tdp_level;
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 3fd6eec202d7..0ce943a8a39a 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -195,6 +195,7 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
 {
 	int r, i;
 	struct kvm_cpuid_entry *cpuid_entries = NULL;
+	struct kvm_cpuid_entry2 *cpuid_entries2 = NULL;
 
 	r = -E2BIG;
 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
@@ -207,31 +208,42 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
 			r = PTR_ERR(cpuid_entries);
 			goto out;
 		}
+		cpuid_entries2 = kvmalloc_array(cpuid->nent, sizeof(cpuid_entries2[0]),
+						GFP_KERNEL_ACCOUNT);
+		if (!cpuid_entries2) {
+			r = -ENOMEM;
+			goto out_free_cpuid;
+		}
 	}
 	for (i = 0; i < cpuid->nent; i++) {
-		vcpu->arch.cpuid_entries[i].function = cpuid_entries[i].function;
-		vcpu->arch.cpuid_entries[i].eax = cpuid_entries[i].eax;
-		vcpu->arch.cpuid_entries[i].ebx = cpuid_entries[i].ebx;
-		vcpu->arch.cpuid_entries[i].ecx = cpuid_entries[i].ecx;
-		vcpu->arch.cpuid_entries[i].edx = cpuid_entries[i].edx;
-		vcpu->arch.cpuid_entries[i].index = 0;
-		vcpu->arch.cpuid_entries[i].flags = 0;
-		vcpu->arch.cpuid_entries[i].padding[0] = 0;
-		vcpu->arch.cpuid_entries[i].padding[1] = 0;
-		vcpu->arch.cpuid_entries[i].padding[2] = 0;
+		cpuid_entries2[i].function = cpuid_entries[i].function;
+		cpuid_entries2[i].eax = cpuid_entries[i].eax;
+		cpuid_entries2[i].ebx = cpuid_entries[i].ebx;
+		cpuid_entries2[i].ecx = cpuid_entries[i].ecx;
+		cpuid_entries2[i].edx = cpuid_entries[i].edx;
+		cpuid_entries2[i].index = 0;
+		cpuid_entries2[i].flags = 0;
+		cpuid_entries2[i].padding[0] = 0;
+		cpuid_entries2[i].padding[1] = 0;
+		cpuid_entries2[i].padding[2] = 0;
 	}
+	kvfree(vcpu->arch.cpuid_entries);
+	vcpu->arch.cpuid_entries = cpuid_entries2;
 	vcpu->arch.cpuid_nent = cpuid->nent;
+
 	r = kvm_check_cpuid(vcpu);
 	if (r) {
+		kvfree(vcpu->arch.cpuid_entries);
+		vcpu->arch.cpuid_entries = NULL;
 		vcpu->arch.cpuid_nent = 0;
-		kvfree(cpuid_entries);
-		goto out;
+		goto out_free_cpuid;
 	}
 
 	cpuid_fix_nx_cap(vcpu);
 	kvm_update_cpuid_runtime(vcpu);
 	kvm_vcpu_after_set_cpuid(vcpu);
 
+out_free_cpuid:
 	kvfree(cpuid_entries);
 out:
 	return r;
@@ -241,18 +253,31 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
 			      struct kvm_cpuid2 *cpuid,
 			      struct kvm_cpuid_entry2 __user *entries)
 {
+	struct kvm_cpuid_entry2 *cpuid_entries2 = NULL;
 	int r;
 
 	r = -E2BIG;
 	if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
 		goto out;
 	r = -EFAULT;
-	if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
-			   cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
-		goto out;
+
+	if (cpuid->nent) {
+		cpuid_entries2 = vmemdup_user(entries,
+					      array_size(sizeof(cpuid_entries2[0]),
+							 cpuid->nent));
+		if (IS_ERR(cpuid_entries2)) {
+			r = PTR_ERR(cpuid_entries2);
+			goto out;
+		}
+	}
+	kvfree(vcpu->arch.cpuid_entries);
+	vcpu->arch.cpuid_entries = cpuid_entries2;
 	vcpu->arch.cpuid_nent = cpuid->nent;
+
 	r = kvm_check_cpuid(vcpu);
 	if (r) {
+		kvfree(vcpu->arch.cpuid_entries);
+		vcpu->arch.cpuid_entries = NULL;
 		vcpu->arch.cpuid_nent = 0;
 		goto out;
 	}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1994602a0851..42259a6ec1d8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9610,6 +9610,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 	kvm_mmu_destroy(vcpu);
 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
 	free_page((unsigned long)vcpu->arch.pio_data);
+	kvfree(vcpu->arch.cpuid_entries);
 	if (!lapic_in_kernel(vcpu))
 		static_key_slow_dec(&kvm_no_apic_vcpu);
 }
-- 
2.25.4


  reply	other threads:[~2020-09-15 22:49 UTC|newest]

Thread overview: 10+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-09-15 15:43 [PATCH RFC 0/2] KVM: x86: allow for more CPUID entries Vitaly Kuznetsov
2020-09-15 15:43 ` Vitaly Kuznetsov [this message]
2020-09-18  2:41   ` [PATCH RFC 1/2] KVM: x86: allocate vcpu->arch.cpuid_entries dynamically Sean Christopherson
2020-10-01 10:04     ` Vitaly Kuznetsov
2020-09-15 15:43 ` [PATCH RFC 2/2] KVM: x86: bump KVM_MAX_CPUID_ENTRIES Vitaly Kuznetsov
2020-09-15 16:51 ` [PATCH RFC 0/2] KVM: x86: allow for more CPUID entries Dr. David Alan Gilbert
2020-09-16  3:49   ` Wei Huang
2020-09-16  7:44     ` Vitaly Kuznetsov
2020-09-16  8:33     ` Dr. David Alan Gilbert
2020-09-16 19:22       ` Wei Huang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20200915154306.724953-2-vkuznets@redhat.com \
    --to=vkuznets@redhat.com \
    --cc=dgilbert@redhat.com \
    --cc=jmattson@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=sean.j.christopherson@intel.com \
    --cc=wanpengli@tencent.com \
    --cc=whuang2@amd.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.