From: Jing Liu <jing2.liu@linux.intel.com>
To: pbonzini@redhat.com, seanjc@google.com, kvm@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, jing2.liu@intel.com
Subject: [PATCH RFC 4/7] kvm: x86: Add new ioctls for XSAVE extension
Date: Sun, 7 Feb 2021 10:42:53 -0500 [thread overview]
Message-ID: <20210207154256.52850-5-jing2.liu@linux.intel.com> (raw)
In-Reply-To: <20210207154256.52850-1-jing2.liu@linux.intel.com>
The static xstate buffer kvm_xsave contains the extended register
states, but it is not enough for dynamic features with large state.
Introduce a new capability called KVM_CAP_X86_XSAVE_EXTENSION to
detect if hardware has XSAVE extension (XFD). Meanwhile, add two
new ioctl interfaces to get/set the whole xstate using struct
kvm_xsave_extension buffer containing both static and dynamic
xfeatures. Reuse fill_xsave and load_xsave for both cases.
Signed-off-by: Jing Liu <jing2.liu@linux.intel.com>
---
arch/x86/include/uapi/asm/kvm.h | 5 +++
arch/x86/kvm/x86.c | 70 +++++++++++++++++++++++++--------
include/uapi/linux/kvm.h | 8 ++++
3 files changed, 66 insertions(+), 17 deletions(-)
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 89e5f3d1bba8..bf785e89a728 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -362,6 +362,11 @@ struct kvm_xsave {
__u32 region[1024];
};
+/* for KVM_CAP_XSAVE_EXTENSION */
+struct kvm_xsave_extension {
+ __u32 region[3072];
+};
+
#define KVM_MAX_XCRS 16
struct kvm_xcr {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 15908bc65d1c..bfbde877221e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3786,6 +3786,10 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_XCRS:
r = boot_cpu_has(X86_FEATURE_XSAVE);
break;
+ case KVM_CAP_X86_XSAVE_EXTENSION:
+ r = boot_cpu_has(X86_FEATURE_XSAVE) &&
+ boot_cpu_has(X86_FEATURE_XFD);
+ break;
case KVM_CAP_TSC_CONTROL:
r = kvm_has_tsc_control;
break;
@@ -4395,7 +4399,7 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
#define XSTATE_COMPACTION_ENABLED (1ULL << 63)
-static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
+static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu, bool has_extension)
{
struct xregs_state *xsave;
struct fpu *guest_fpu;
@@ -4403,9 +4407,14 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
u64 valid;
guest_fpu = vcpu->arch.guest_fpu;
- xsave = &guest_fpu->state.xsave;
+ xsave = __xsave(guest_fpu);
xstate_bv = xsave->header.xfeatures;
+ if (!has_extension) {
+ /* truncate with only non-dynamic features */
+ xstate_bv = xstate_bv & ~xfeatures_mask_user_dynamic;
+ }
+
/*
* Copy legacy XSAVE area, to avoid complications with CPUID
* leaves 0 and 1 in the loop below.
@@ -4450,7 +4459,7 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
u64 valid;
guest_fpu = vcpu->arch.guest_fpu;
- xsave = &guest_fpu->state.xsave;
+ xsave = __xsave(guest_fpu);
/*
* Copy legacy XSAVE area, to avoid complications with CPUID
@@ -4488,29 +4497,31 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
}
}
-static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
- struct kvm_xsave *guest_xsave)
+static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu, u32 *region, bool has_extension)
{
if (boot_cpu_has(X86_FEATURE_XSAVE)) {
- memset(guest_xsave, 0, sizeof(struct kvm_xsave));
- fill_xsave((u8 *) guest_xsave->region, vcpu);
+ if (has_extension)
+ memset(region, 0, sizeof(struct kvm_xsave_extension));
+ else
+ memset(region, 0, sizeof(struct kvm_xsave));
+
+ fill_xsave((u8 *)region, vcpu, has_extension);
} else {
- memcpy(guest_xsave->region,
+ memcpy(region,
&vcpu->arch.guest_fpu->state.fxsave,
sizeof(struct fxregs_state));
- *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
+ *(u64 *)®ion[XSAVE_HDR_OFFSET / sizeof(u32)] =
XFEATURE_MASK_FPSSE;
}
}
#define XSAVE_MXCSR_OFFSET 24
-static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
- struct kvm_xsave *guest_xsave)
+static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, u32 *region)
{
u64 xstate_bv =
- *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)];
- u32 mxcsr = *(u32 *)&guest_xsave->region[XSAVE_MXCSR_OFFSET / sizeof(u32)];
+ *(u64 *)®ion[XSAVE_HDR_OFFSET / sizeof(u32)];
+ u32 mxcsr = *(u32 *)®ion[XSAVE_MXCSR_OFFSET / sizeof(u32)];
if (boot_cpu_has(X86_FEATURE_XSAVE)) {
/*
@@ -4520,13 +4531,13 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
*/
if (xstate_bv & ~supported_xcr0 || mxcsr & ~mxcsr_feature_mask)
return -EINVAL;
- load_xsave(vcpu, (u8 *)guest_xsave->region);
+ load_xsave(vcpu, (u8 *)region);
} else {
if (xstate_bv & ~XFEATURE_MASK_FPSSE ||
mxcsr & ~mxcsr_feature_mask)
return -EINVAL;
memcpy(&vcpu->arch.guest_fpu->state.fxsave,
- guest_xsave->region, sizeof(struct fxregs_state));
+ region, sizeof(struct fxregs_state));
}
return 0;
}
@@ -4642,6 +4653,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
union {
struct kvm_lapic_state *lapic;
struct kvm_xsave *xsave;
+ struct kvm_xsave_extension *xsave_ext;
struct kvm_xcrs *xcrs;
void *buffer;
} u;
@@ -4847,7 +4859,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
if (!u.xsave)
break;
- kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave);
+ kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave->region, false);
r = -EFAULT;
if (copy_to_user(argp, u.xsave, sizeof(struct kvm_xsave)))
@@ -4855,6 +4867,20 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = 0;
break;
}
+ case KVM_GET_XSAVE_EXTENSION: {
+ u.xsave_ext = kzalloc(sizeof(struct kvm_xsave_extension), GFP_KERNEL_ACCOUNT);
+ r = -ENOMEM;
+ if (!u.xsave_ext)
+ break;
+
+ kvm_vcpu_ioctl_x86_get_xsave(vcpu, u.xsave_ext->region, true);
+
+ r = -EFAULT;
+ if (copy_to_user(argp, u.xsave_ext, sizeof(struct kvm_xsave_extension)))
+ break;
+ r = 0;
+ break;
+ }
case KVM_SET_XSAVE: {
u.xsave = memdup_user(argp, sizeof(*u.xsave));
if (IS_ERR(u.xsave)) {
@@ -4862,7 +4888,17 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
goto out_nofree;
}
- r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
+ r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave->region);
+ break;
+ }
+ case KVM_SET_XSAVE_EXTENSION: {
+ u.xsave_ext = memdup_user(argp, sizeof(*u.xsave_ext));
+ if (IS_ERR(u.xsave_ext)) {
+ r = PTR_ERR(u.xsave_ext);
+ goto out_nofree;
+ }
+
+ r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave_ext->region);
break;
}
case KVM_GET_XCRS: {
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index ca41220b40b8..42a167a29350 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -1053,6 +1053,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_X86_USER_SPACE_MSR 188
#define KVM_CAP_X86_MSR_FILTER 189
#define KVM_CAP_ENFORCE_PV_FEATURE_CPUID 190
+#define KVM_CAP_X86_XSAVE_EXTENSION 191
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1462,6 +1463,13 @@ struct kvm_s390_ucas_mapping {
/* Available with KVM_CAP_XSAVE */
#define KVM_GET_XSAVE _IOR(KVMIO, 0xa4, struct kvm_xsave)
#define KVM_SET_XSAVE _IOW(KVMIO, 0xa5, struct kvm_xsave)
+/*
+ * Available with KVM_CAP_XSAVE_EXTENSION
+ * 0xa4/0xa5 are ok for the new ioctls since structure size is different.
+ */
+#define KVM_GET_XSAVE_EXTENSION _IOW(KVMIO, 0xa4, struct kvm_xsave_extension)
+#define KVM_SET_XSAVE_EXTENSION _IOW(KVMIO, 0xa5, struct kvm_xsave_extension)
+
/* Available with KVM_CAP_XCRS */
#define KVM_GET_XCRS _IOR(KVMIO, 0xa6, struct kvm_xcrs)
#define KVM_SET_XCRS _IOW(KVMIO, 0xa7, struct kvm_xcrs)
--
2.18.4
next prev parent reply other threads:[~2021-02-07 6:59 UTC|newest]
Thread overview: 39+ messages / expand[flat|nested] mbox.gz Atom feed top
2021-02-07 15:42 [PATCH RFC 0/7] Introduce support for guest AMX feature Jing Liu
2021-02-07 15:42 ` [PATCH RFC 1/7] kvm: x86: Expose XFD CPUID to guest Jing Liu
2021-05-24 21:34 ` Sean Christopherson
2021-06-07 3:27 ` Liu, Jing2
2021-02-07 15:42 ` [PATCH RFC 2/7] kvm: x86: Introduce XFD MSRs as passthrough " Jing Liu
2021-05-24 21:43 ` Sean Christopherson
2021-05-24 21:57 ` Jim Mattson
2021-06-02 3:12 ` Liu, Jing2
2021-06-23 17:50 ` Dave Hansen
2021-06-28 2:00 ` Liu, Jing2
2021-06-29 17:58 ` Dave Hansen
2021-07-06 7:33 ` Liu, Jing2
2021-02-07 15:42 ` [PATCH RFC 3/7] kvm: x86: XSAVE state and XFD MSRs context switch Jing Liu
2021-02-07 11:49 ` Borislav Petkov
2021-02-08 3:35 ` Liu, Jing2
2021-02-08 10:25 ` Paolo Bonzini
2021-02-08 17:31 ` Sean Christopherson
2021-02-08 17:45 ` Paolo Bonzini
2021-02-08 18:04 ` Sean Christopherson
2021-02-08 18:12 ` Paolo Bonzini
2021-02-08 18:55 ` Konrad Rzeszutek Wilk
2021-02-22 8:51 ` Liu, Jing2
2021-02-22 8:36 ` Liu, Jing2
2021-02-07 15:42 ` Jing Liu [this message]
2021-05-24 21:50 ` [PATCH RFC 4/7] kvm: x86: Add new ioctls for XSAVE extension Sean Christopherson
2021-05-26 6:09 ` Liu, Jing2
2021-05-26 14:43 ` Sean Christopherson
2021-06-01 10:24 ` Liu, Jing2
2021-06-07 5:23 ` Liu, Jing2
2021-05-24 22:06 ` Jim Mattson
2021-05-26 6:11 ` Liu, Jing2
2021-02-07 15:42 ` [PATCH RFC 5/7] kvm: x86: Revise CPUID.D.1.EBX for alignment rule Jing Liu
2021-05-24 21:28 ` Sean Christopherson
2021-06-03 4:45 ` Liu, Jing2
2021-02-07 15:42 ` [PATCH RFC 6/7] kvm: x86: Add AMX_TILE, AMX_INT8 and AMX_BF16 support Jing Liu
2021-02-07 15:42 ` [PATCH RFC 7/7] kvm: x86: AMX XCR0 support for guest Jing Liu
2021-05-24 21:53 ` Sean Christopherson
2021-05-26 7:54 ` Liu, Jing2
2021-05-26 14:54 ` Sean Christopherson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20210207154256.52850-5-jing2.liu@linux.intel.com \
--to=jing2.liu@linux.intel.com \
--cc=jing2.liu@intel.com \
--cc=kvm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=pbonzini@redhat.com \
--cc=seanjc@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).