kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH 0/4] KVM: SVM: SEV-ES groundwork
@ 2020-07-29 13:22 Joerg Roedel
  2020-07-29 13:22 ` [PATCH 1/4] KVM: SVM: nested: Don't allocate VMCB structures on stack Joerg Roedel
                   ` (3 more replies)
  0 siblings, 4 replies; 9+ messages in thread
From: Joerg Roedel @ 2020-07-29 13:22 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Sean Christopherson, Vitaly Kuznetsov, Wanpeng Li, Jim Mattson,
	Joerg Roedel, kvm, linux-kernel

Hi Paolo,

here are some groundwork patches for the upcoming SEV-ES support in the
Linux kernel. They are part of both the client patch-set and of the KVM
hypervisor patches (under development).

Patch 1 necesary to fix a compile warning about a stack-frame getting
too large. The other 3 patches are currently posted as part of the
SEV-ES client patch-set.

It would be great if you could consider them for v5.9, so that the
client and the hypervisor patch-sets can be developed more independently
of each other.

Please let me know what you think.

Regards,

	Joerg

Borislav Petkov (1):
  KVM: SVM: Use __packed shorthand

Joerg Roedel (2):
  KVM: SVM: nested: Don't allocate VMCB structures on stack
  KVM: SVM: Add GHCB Accessor functions

Tom Lendacky (1):
  KVM: SVM: Add GHCB definitions

 arch/x86/include/asm/svm.h | 118 ++++++++++++++++++++++++++++++++++---
 arch/x86/kvm/svm/nested.c  |  44 +++++++++-----
 arch/x86/kvm/svm/svm.c     |   2 +
 3 files changed, 143 insertions(+), 21 deletions(-)

-- 
2.17.1


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 1/4] KVM: SVM: nested: Don't allocate VMCB structures on stack
  2020-07-29 13:22 [PATCH 0/4] KVM: SVM: SEV-ES groundwork Joerg Roedel
@ 2020-07-29 13:22 ` Joerg Roedel
  2020-07-29 15:14   ` Sean Christopherson
  2020-07-29 13:22 ` [PATCH 2/4] KVM: SVM: Add GHCB definitions Joerg Roedel
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 9+ messages in thread
From: Joerg Roedel @ 2020-07-29 13:22 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Sean Christopherson, Vitaly Kuznetsov, Wanpeng Li, Jim Mattson,
	Joerg Roedel, kvm, linux-kernel, Joerg Roedel

From: Joerg Roedel <jroedel@suse.de>

Do not allocate a vmcb_control_area and a vmcb_save_area on the stack,
as these structures will become larger with future extenstions of
SVM and thus the svm_set_nested_state() function will become a too large
stack frame.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
 arch/x86/kvm/svm/nested.c | 44 ++++++++++++++++++++++++++-------------
 1 file changed, 30 insertions(+), 14 deletions(-)

diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 61378a3c2ce4..f3c3c4e1ca7f 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -1061,8 +1061,9 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
 	struct vmcb *hsave = svm->nested.hsave;
 	struct vmcb __user *user_vmcb = (struct vmcb __user *)
 		&user_kvm_nested_state->data.svm[0];
-	struct vmcb_control_area ctl;
-	struct vmcb_save_area save;
+	struct vmcb_control_area *ctl;
+	struct vmcb_save_area *save;
+	int ret;
 	u32 cr0;
 
 	if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
@@ -1096,13 +1097,22 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
 		return -EINVAL;
 	if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
 		return -EINVAL;
-	if (copy_from_user(&ctl, &user_vmcb->control, sizeof(ctl)))
-		return -EFAULT;
-	if (copy_from_user(&save, &user_vmcb->save, sizeof(save)))
-		return -EFAULT;
 
-	if (!nested_vmcb_check_controls(&ctl))
-		return -EINVAL;
+	ret  = -ENOMEM;
+	ctl  = kzalloc(sizeof(*ctl),  GFP_KERNEL);
+	save = kzalloc(sizeof(*save), GFP_KERNEL);
+	if (!ctl || !save)
+		goto out_free;
+
+	ret = -EFAULT;
+	if (copy_from_user(ctl, &user_vmcb->control, sizeof(ctl)))
+		goto out_free;
+	if (copy_from_user(save, &user_vmcb->save, sizeof(save)))
+		goto out_free;
+
+	ret = -EINVAL;
+	if (!nested_vmcb_check_controls(ctl))
+		goto out_free;
 
 	/*
 	 * Processor state contains L2 state.  Check that it is
@@ -1110,15 +1120,15 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
 	 */
 	cr0 = kvm_read_cr0(vcpu);
         if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
-                return -EINVAL;
+                goto out_free;
 
 	/*
 	 * Validate host state saved from before VMRUN (see
 	 * nested_svm_check_permissions).
 	 * TODO: validate reserved bits for all saved state.
 	 */
-	if (!(save.cr0 & X86_CR0_PG))
-		return -EINVAL;
+	if (!(save->cr0 & X86_CR0_PG))
+		goto out_free;
 
 	/*
 	 * All checks done, we can enter guest mode.  L1 control fields
@@ -1127,15 +1137,21 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
 	 * contains saved L1 state.
 	 */
 	copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
-	hsave->save = save;
+	hsave->save = *save;
 
 	svm->nested.vmcb = kvm_state->hdr.svm.vmcb_pa;
-	load_nested_vmcb_control(svm, &ctl);
+	load_nested_vmcb_control(svm, ctl);
 	nested_prepare_vmcb_control(svm);
 
 out_set_gif:
 	svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
-	return 0;
+
+	ret = 0;
+out_free:
+	kfree(save);
+	kfree(ctl);
+
+	return ret;
 }
 
 struct kvm_x86_nested_ops svm_nested_ops = {
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 2/4] KVM: SVM: Add GHCB definitions
  2020-07-29 13:22 [PATCH 0/4] KVM: SVM: SEV-ES groundwork Joerg Roedel
  2020-07-29 13:22 ` [PATCH 1/4] KVM: SVM: nested: Don't allocate VMCB structures on stack Joerg Roedel
@ 2020-07-29 13:22 ` Joerg Roedel
  2020-07-29 13:22 ` [PATCH 3/4] KVM: SVM: Add GHCB Accessor functions Joerg Roedel
  2020-07-29 13:22 ` [PATCH 4/4] KVM: SVM: Use __packed shorthand Joerg Roedel
  3 siblings, 0 replies; 9+ messages in thread
From: Joerg Roedel @ 2020-07-29 13:22 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Sean Christopherson, Vitaly Kuznetsov, Wanpeng Li, Jim Mattson,
	Joerg Roedel, kvm, linux-kernel, Tom Lendacky, Joerg Roedel

From: Tom Lendacky <thomas.lendacky@amd.com>

Extend the vmcb_safe_area with SEV-ES fields and add a new
'struct ghcb' which will be used for guest-hypervisor communication.

Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
 arch/x86/include/asm/svm.h | 45 +++++++++++++++++++++++++++++++++++++-
 arch/x86/kvm/svm/svm.c     |  2 ++
 2 files changed, 46 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 8a1f5382a4ea..9a3e0b802716 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -200,13 +200,56 @@ struct __attribute__ ((__packed__)) vmcb_save_area {
 	u64 br_to;
 	u64 last_excp_from;
 	u64 last_excp_to;
+
+	/*
+	 * The following part of the save area is valid only for
+	 * SEV-ES guests when referenced through the GHCB.
+	 */
+	u8 reserved_7[104];
+	u64 reserved_8;		/* rax already available at 0x01f8 */
+	u64 rcx;
+	u64 rdx;
+	u64 rbx;
+	u64 reserved_9;		/* rsp already available at 0x01d8 */
+	u64 rbp;
+	u64 rsi;
+	u64 rdi;
+	u64 r8;
+	u64 r9;
+	u64 r10;
+	u64 r11;
+	u64 r12;
+	u64 r13;
+	u64 r14;
+	u64 r15;
+	u8 reserved_10[16];
+	u64 sw_exit_code;
+	u64 sw_exit_info_1;
+	u64 sw_exit_info_2;
+	u64 sw_scratch;
+	u8 reserved_11[56];
+	u64 xcr0;
+	u8 valid_bitmap[16];
+	u64 x87_state_gpa;
+};
+
+struct __attribute__ ((__packed__)) ghcb {
+	struct vmcb_save_area save;
+	u8 reserved_save[2048 - sizeof(struct vmcb_save_area)];
+
+	u8 shared_buffer[2032];
+
+	u8 reserved_1[10];
+	u16 protocol_version;	/* negotiated SEV-ES/GHCB protocol version */
+	u32 ghcb_usage;
 };
 
 
 static inline void __unused_size_checks(void)
 {
-	BUILD_BUG_ON(sizeof(struct vmcb_save_area) != 0x298);
+	BUILD_BUG_ON(sizeof(struct vmcb_save_area) != 1032);
 	BUILD_BUG_ON(sizeof(struct vmcb_control_area) != 256);
+	BUILD_BUG_ON(sizeof(struct ghcb) != 4096);
 }
 
 struct __attribute__ ((__packed__)) vmcb {
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 783330d0e7b8..953cf947f022 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4161,6 +4161,8 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = {
 
 static int __init svm_init(void)
 {
+	__unused_size_checks();
+
 	return kvm_init(&svm_init_ops, sizeof(struct vcpu_svm),
 			__alignof__(struct vcpu_svm), THIS_MODULE);
 }
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 3/4] KVM: SVM: Add GHCB Accessor functions
  2020-07-29 13:22 [PATCH 0/4] KVM: SVM: SEV-ES groundwork Joerg Roedel
  2020-07-29 13:22 ` [PATCH 1/4] KVM: SVM: nested: Don't allocate VMCB structures on stack Joerg Roedel
  2020-07-29 13:22 ` [PATCH 2/4] KVM: SVM: Add GHCB definitions Joerg Roedel
@ 2020-07-29 13:22 ` Joerg Roedel
  2020-07-29 15:43   ` Sean Christopherson
  2020-07-29 13:22 ` [PATCH 4/4] KVM: SVM: Use __packed shorthand Joerg Roedel
  3 siblings, 1 reply; 9+ messages in thread
From: Joerg Roedel @ 2020-07-29 13:22 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Sean Christopherson, Vitaly Kuznetsov, Wanpeng Li, Jim Mattson,
	Joerg Roedel, kvm, linux-kernel, Joerg Roedel

From: Joerg Roedel <jroedel@suse.de>

Building a correct GHCB for the hypervisor requires setting valid bits
in the GHCB. Simplify that process by providing accessor functions to
set values and to update the valid bitmap.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
 arch/x86/include/asm/svm.h | 61 ++++++++++++++++++++++++++++++++++++++
 1 file changed, 61 insertions(+)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 9a3e0b802716..0420250b008b 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -341,4 +341,65 @@ struct __attribute__ ((__packed__)) vmcb {
 
 #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
 
+/* GHCB Accessor functions */
+
+#define DEFINE_GHCB_INDICES(field)					\
+	u16 idx = offsetof(struct vmcb_save_area, field) / 8;		\
+	u16 byte_idx  = idx / 8;					\
+	u16 bit_idx   = idx % 8;					\
+	BUILD_BUG_ON(byte_idx > ARRAY_SIZE(ghcb->save.valid_bitmap));
+
+#define GHCB_SET_VALID(ghcb, field)					\
+	{								\
+		DEFINE_GHCB_INDICES(field)				\
+		(ghcb)->save.valid_bitmap[byte_idx] |= BIT(bit_idx);	\
+	}
+
+#define DEFINE_GHCB_SETTER(field)					\
+	static inline void						\
+	ghcb_set_##field(struct ghcb *ghcb, u64 value)			\
+	{								\
+		GHCB_SET_VALID(ghcb, field)				\
+		(ghcb)->save.field = value;				\
+	}
+
+#define DEFINE_GHCB_ACCESSORS(field)					\
+	static inline bool ghcb_is_valid_##field(const struct ghcb *ghcb)	\
+	{								\
+		DEFINE_GHCB_INDICES(field)				\
+		return !!((ghcb)->save.valid_bitmap[byte_idx]		\
+						& BIT(bit_idx));	\
+	}								\
+									\
+	static inline void						\
+	ghcb_set_##field(struct ghcb *ghcb, u64 value)			\
+	{								\
+		GHCB_SET_VALID(ghcb, field)				\
+		(ghcb)->save.field = value;				\
+	}
+
+DEFINE_GHCB_ACCESSORS(cpl)
+DEFINE_GHCB_ACCESSORS(rip)
+DEFINE_GHCB_ACCESSORS(rsp)
+DEFINE_GHCB_ACCESSORS(rax)
+DEFINE_GHCB_ACCESSORS(rcx)
+DEFINE_GHCB_ACCESSORS(rdx)
+DEFINE_GHCB_ACCESSORS(rbx)
+DEFINE_GHCB_ACCESSORS(rbp)
+DEFINE_GHCB_ACCESSORS(rsi)
+DEFINE_GHCB_ACCESSORS(rdi)
+DEFINE_GHCB_ACCESSORS(r8)
+DEFINE_GHCB_ACCESSORS(r9)
+DEFINE_GHCB_ACCESSORS(r10)
+DEFINE_GHCB_ACCESSORS(r11)
+DEFINE_GHCB_ACCESSORS(r12)
+DEFINE_GHCB_ACCESSORS(r13)
+DEFINE_GHCB_ACCESSORS(r14)
+DEFINE_GHCB_ACCESSORS(r15)
+DEFINE_GHCB_ACCESSORS(sw_exit_code)
+DEFINE_GHCB_ACCESSORS(sw_exit_info_1)
+DEFINE_GHCB_ACCESSORS(sw_exit_info_2)
+DEFINE_GHCB_ACCESSORS(sw_scratch)
+DEFINE_GHCB_ACCESSORS(xcr0)
+
 #endif
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* [PATCH 4/4] KVM: SVM: Use __packed shorthand
  2020-07-29 13:22 [PATCH 0/4] KVM: SVM: SEV-ES groundwork Joerg Roedel
                   ` (2 preceding siblings ...)
  2020-07-29 13:22 ` [PATCH 3/4] KVM: SVM: Add GHCB Accessor functions Joerg Roedel
@ 2020-07-29 13:22 ` Joerg Roedel
  3 siblings, 0 replies; 9+ messages in thread
From: Joerg Roedel @ 2020-07-29 13:22 UTC (permalink / raw)
  To: Paolo Bonzini
  Cc: Sean Christopherson, Vitaly Kuznetsov, Wanpeng Li, Jim Mattson,
	Joerg Roedel, kvm, linux-kernel, Borislav Petkov, Joerg Roedel

From: Borislav Petkov <bp@alien8.de>

Use the shorthand to make it more readable.

No functional changes.

Signed-off-by: Joerg Roedel <jroedel@suse.de>
---
 arch/x86/include/asm/svm.h | 16 ++++++++--------
 1 file changed, 8 insertions(+), 8 deletions(-)

diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
index 0420250b008b..af91ced0f370 100644
--- a/arch/x86/include/asm/svm.h
+++ b/arch/x86/include/asm/svm.h
@@ -150,14 +150,14 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
 #define SVM_NESTED_CTL_NP_ENABLE	BIT(0)
 #define SVM_NESTED_CTL_SEV_ENABLE	BIT(1)
 
-struct __attribute__ ((__packed__)) vmcb_seg {
+struct vmcb_seg {
 	u16 selector;
 	u16 attrib;
 	u32 limit;
 	u64 base;
-};
+} __packed;
 
-struct __attribute__ ((__packed__)) vmcb_save_area {
+struct vmcb_save_area {
 	struct vmcb_seg es;
 	struct vmcb_seg cs;
 	struct vmcb_seg ss;
@@ -231,9 +231,9 @@ struct __attribute__ ((__packed__)) vmcb_save_area {
 	u64 xcr0;
 	u8 valid_bitmap[16];
 	u64 x87_state_gpa;
-};
+} __packed;
 
-struct __attribute__ ((__packed__)) ghcb {
+struct ghcb {
 	struct vmcb_save_area save;
 	u8 reserved_save[2048 - sizeof(struct vmcb_save_area)];
 
@@ -242,7 +242,7 @@ struct __attribute__ ((__packed__)) ghcb {
 	u8 reserved_1[10];
 	u16 protocol_version;	/* negotiated SEV-ES/GHCB protocol version */
 	u32 ghcb_usage;
-};
+} __packed;
 
 
 static inline void __unused_size_checks(void)
@@ -252,11 +252,11 @@ static inline void __unused_size_checks(void)
 	BUILD_BUG_ON(sizeof(struct ghcb) != 4096);
 }
 
-struct __attribute__ ((__packed__)) vmcb {
+struct vmcb {
 	struct vmcb_control_area control;
 	u8 reserved_control[1024 - sizeof(struct vmcb_control_area)];
 	struct vmcb_save_area save;
-};
+} __packed;
 
 #define SVM_CPUID_FUNC 0x8000000a
 
-- 
2.17.1


^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/4] KVM: SVM: nested: Don't allocate VMCB structures on stack
  2020-07-29 13:22 ` [PATCH 1/4] KVM: SVM: nested: Don't allocate VMCB structures on stack Joerg Roedel
@ 2020-07-29 15:14   ` Sean Christopherson
  2020-07-30 12:38     ` Joerg Roedel
  0 siblings, 1 reply; 9+ messages in thread
From: Sean Christopherson @ 2020-07-29 15:14 UTC (permalink / raw)
  To: Joerg Roedel
  Cc: Paolo Bonzini, Vitaly Kuznetsov, Wanpeng Li, Jim Mattson, kvm,
	linux-kernel, Joerg Roedel

On Wed, Jul 29, 2020 at 03:22:31PM +0200, Joerg Roedel wrote:
> From: Joerg Roedel <jroedel@suse.de>
> 
> Do not allocate a vmcb_control_area and a vmcb_save_area on the stack,
> as these structures will become larger with future extenstions of
> SVM and thus the svm_set_nested_state() function will become a too large
> stack frame.

Speaking of too large, would it be overly paranoid to add:

  BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) <
	       KVM_STATE_NESTED_SVM_VMCB_SIZE)

More so for documentation than for any real concern that the SVM architecture
will do something silly, e.g. to make it obvious that patch 2 in this series
won't break backwards compatibility.
 
> Signed-off-by: Joerg Roedel <jroedel@suse.de>
> ---
>  arch/x86/kvm/svm/nested.c | 44 ++++++++++++++++++++++++++-------------
>  1 file changed, 30 insertions(+), 14 deletions(-)
> 
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index 61378a3c2ce4..f3c3c4e1ca7f 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -1061,8 +1061,9 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
>  	struct vmcb *hsave = svm->nested.hsave;
>  	struct vmcb __user *user_vmcb = (struct vmcb __user *)
>  		&user_kvm_nested_state->data.svm[0];
> -	struct vmcb_control_area ctl;
> -	struct vmcb_save_area save;
> +	struct vmcb_control_area *ctl;
> +	struct vmcb_save_area *save;
> +	int ret;
>  	u32 cr0;
>  
>  	if (kvm_state->format != KVM_STATE_NESTED_FORMAT_SVM)
> @@ -1096,13 +1097,22 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
>  		return -EINVAL;
>  	if (kvm_state->size < sizeof(*kvm_state) + KVM_STATE_NESTED_SVM_VMCB_SIZE)
>  		return -EINVAL;
> -	if (copy_from_user(&ctl, &user_vmcb->control, sizeof(ctl)))
> -		return -EFAULT;
> -	if (copy_from_user(&save, &user_vmcb->save, sizeof(save)))
> -		return -EFAULT;
>  
> -	if (!nested_vmcb_check_controls(&ctl))
> -		return -EINVAL;
> +	ret  = -ENOMEM;
> +	ctl  = kzalloc(sizeof(*ctl),  GFP_KERNEL);
> +	save = kzalloc(sizeof(*save), GFP_KERNEL);
> +	if (!ctl || !save)
> +		goto out_free;
> +
> +	ret = -EFAULT;
> +	if (copy_from_user(ctl, &user_vmcb->control, sizeof(ctl)))

The sizeof() calc is wrong, this is now calculating the size of the pointer,
not the size of the struct.  It'd need to be sizeof(*ctl).

> +		goto out_free;
> +	if (copy_from_user(save, &user_vmcb->save, sizeof(save)))

Same bug here.

> +		goto out_free;
> +
> +	ret = -EINVAL;
> +	if (!nested_vmcb_check_controls(ctl))
> +		goto out_free;
>  
>  	/*
>  	 * Processor state contains L2 state.  Check that it is
> @@ -1110,15 +1120,15 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
>  	 */
>  	cr0 = kvm_read_cr0(vcpu);
>          if (((cr0 & X86_CR0_CD) == 0) && (cr0 & X86_CR0_NW))
> -                return -EINVAL;
> +                goto out_free;
>  
>  	/*
>  	 * Validate host state saved from before VMRUN (see
>  	 * nested_svm_check_permissions).
>  	 * TODO: validate reserved bits for all saved state.
>  	 */
> -	if (!(save.cr0 & X86_CR0_PG))
> -		return -EINVAL;
> +	if (!(save->cr0 & X86_CR0_PG))
> +		goto out_free;
>  
>  	/*
>  	 * All checks done, we can enter guest mode.  L1 control fields
> @@ -1127,15 +1137,21 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
>  	 * contains saved L1 state.
>  	 */
>  	copy_vmcb_control_area(&hsave->control, &svm->vmcb->control);
> -	hsave->save = save;
> +	hsave->save = *save;
>  
>  	svm->nested.vmcb = kvm_state->hdr.svm.vmcb_pa;
> -	load_nested_vmcb_control(svm, &ctl);
> +	load_nested_vmcb_control(svm, ctl);
>  	nested_prepare_vmcb_control(svm);
>  
>  out_set_gif:
>  	svm_set_gif(svm, !!(kvm_state->flags & KVM_STATE_NESTED_GIF_SET));
> -	return 0;
> +
> +	ret = 0;
> +out_free:
> +	kfree(save);
> +	kfree(ctl);
> +
> +	return ret;
>  }
>  
>  struct kvm_x86_nested_ops svm_nested_ops = {
> -- 
> 2.17.1
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/4] KVM: SVM: Add GHCB Accessor functions
  2020-07-29 13:22 ` [PATCH 3/4] KVM: SVM: Add GHCB Accessor functions Joerg Roedel
@ 2020-07-29 15:43   ` Sean Christopherson
  2020-07-30 13:05     ` Joerg Roedel
  0 siblings, 1 reply; 9+ messages in thread
From: Sean Christopherson @ 2020-07-29 15:43 UTC (permalink / raw)
  To: Joerg Roedel
  Cc: Paolo Bonzini, Vitaly Kuznetsov, Wanpeng Li, Jim Mattson, kvm,
	linux-kernel, Joerg Roedel

On Wed, Jul 29, 2020 at 03:22:33PM +0200, Joerg Roedel wrote:
> From: Joerg Roedel <jroedel@suse.de>
> 
> Building a correct GHCB for the hypervisor requires setting valid bits
> in the GHCB. Simplify that process by providing accessor functions to
> set values and to update the valid bitmap.
> 
> Signed-off-by: Joerg Roedel <jroedel@suse.de>
> ---
>  arch/x86/include/asm/svm.h | 61 ++++++++++++++++++++++++++++++++++++++
>  1 file changed, 61 insertions(+)
> 
> diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h
> index 9a3e0b802716..0420250b008b 100644
> --- a/arch/x86/include/asm/svm.h
> +++ b/arch/x86/include/asm/svm.h
> @@ -341,4 +341,65 @@ struct __attribute__ ((__packed__)) vmcb {
>  
>  #define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
>  
> +/* GHCB Accessor functions */
> +
> +#define DEFINE_GHCB_INDICES(field)					\
> +	u16 idx = offsetof(struct vmcb_save_area, field) / 8;		\

Using sizeof(u64) instead of '8' would be helpful here.

> +	u16 byte_idx  = idx / 8;					\
> +	u16 bit_idx   = idx % 8;					\

Oof.  I love macro frameworks as much as the next person, but declaring
local variables in a macro like this is nasty.

> +	BUILD_BUG_ON(byte_idx > ARRAY_SIZE(ghcb->save.valid_bitmap));
> +
> +#define GHCB_SET_VALID(ghcb, field)					\
> +	{								\
> +		DEFINE_GHCB_INDICES(field)				\
> +		(ghcb)->save.valid_bitmap[byte_idx] |= BIT(bit_idx);	\

Rather than manually calculate the byte/bit indices just use __set_bit()
and test_bit().  That will also solve the variable declaration issue.

E.g.

#define GHB_BITMAP_IDX(field)		\
	(offsetof(struct vmcb_save_area, (field)) / sizeof(u64))

#define GHCB_SET_VALID(ghcb, field)	\
	__set_bit(GHCB_BITMAP_IDX(field), (unsigned long *)&ghcb->save.valid_bitmap)

Or alternatively drop GHCB_SET_VALID() and just open code the two users.

> +	}
> +
> +#define DEFINE_GHCB_SETTER(field)					\
> +	static inline void						\
> +	ghcb_set_##field(struct ghcb *ghcb, u64 value)			\
> +	{								\
> +		GHCB_SET_VALID(ghcb, field)				\
> +		(ghcb)->save.field = value;				\


The ghcb doesn't need to be wrapped in (), it's a parameter to a function.
Same comment for the below usage.

> +	}
> +
> +#define DEFINE_GHCB_ACCESSORS(field)					\
> +	static inline bool ghcb_is_valid_##field(const struct ghcb *ghcb)	\

I'd prefer to follow the naming of the arch reg accessors, i.e.

	static inline bool ghcb_##field##_is_valid(...)

to pair with

	kvm_##lname##_read
	kvm_##lname##_write

And because ghcb_is_valid_rip() reads a bit weird, e.g. IMO is more likely
to be read as "does the RIP in the GHCB hold a valid (canonical) value",
versus ghcb_rip_is_valid() reading as "is RIP valid in the GHCB".

> +	{								\
> +		DEFINE_GHCB_INDICES(field)				\
> +		return !!((ghcb)->save.valid_bitmap[byte_idx]		\
> +						& BIT(bit_idx));	\
> +	}								\
> +									\
> +	static inline void						\
> +	ghcb_set_##field(struct ghcb *ghcb, u64 value)			\
> +	{								\
> +		GHCB_SET_VALID(ghcb, field)				\
> +		(ghcb)->save.field = value;				\

> +	}
> +
> +DEFINE_GHCB_ACCESSORS(cpl)
> +DEFINE_GHCB_ACCESSORS(rip)
> +DEFINE_GHCB_ACCESSORS(rsp)
> +DEFINE_GHCB_ACCESSORS(rax)
> +DEFINE_GHCB_ACCESSORS(rcx)
> +DEFINE_GHCB_ACCESSORS(rdx)
> +DEFINE_GHCB_ACCESSORS(rbx)
> +DEFINE_GHCB_ACCESSORS(rbp)
> +DEFINE_GHCB_ACCESSORS(rsi)
> +DEFINE_GHCB_ACCESSORS(rdi)
> +DEFINE_GHCB_ACCESSORS(r8)
> +DEFINE_GHCB_ACCESSORS(r9)
> +DEFINE_GHCB_ACCESSORS(r10)
> +DEFINE_GHCB_ACCESSORS(r11)
> +DEFINE_GHCB_ACCESSORS(r12)
> +DEFINE_GHCB_ACCESSORS(r13)
> +DEFINE_GHCB_ACCESSORS(r14)
> +DEFINE_GHCB_ACCESSORS(r15)
> +DEFINE_GHCB_ACCESSORS(sw_exit_code)
> +DEFINE_GHCB_ACCESSORS(sw_exit_info_1)
> +DEFINE_GHCB_ACCESSORS(sw_exit_info_2)
> +DEFINE_GHCB_ACCESSORS(sw_scratch)
> +DEFINE_GHCB_ACCESSORS(xcr0)
> +
>  #endif
> -- 
> 2.17.1
> 

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 1/4] KVM: SVM: nested: Don't allocate VMCB structures on stack
  2020-07-29 15:14   ` Sean Christopherson
@ 2020-07-30 12:38     ` Joerg Roedel
  0 siblings, 0 replies; 9+ messages in thread
From: Joerg Roedel @ 2020-07-30 12:38 UTC (permalink / raw)
  To: Sean Christopherson
  Cc: Paolo Bonzini, Vitaly Kuznetsov, Wanpeng Li, Jim Mattson, kvm,
	linux-kernel, Joerg Roedel

Hi Sean,

thanks for your review!

On Wed, Jul 29, 2020 at 08:14:55AM -0700, Sean Christopherson wrote:
> On Wed, Jul 29, 2020 at 03:22:31PM +0200, Joerg Roedel wrote:
> Speaking of too large, would it be overly paranoid to add:
> 
>   BUILD_BUG_ON(sizeof(struct vmcb_control_area) + sizeof(struct vmcb_save_area) <
> 	       KVM_STATE_NESTED_SVM_VMCB_SIZE)
> 
> More so for documentation than for any real concern that the SVM architecture
> will do something silly, e.g. to make it obvious that patch 2 in this series
> won't break backwards compatibility.

The check should actually be '>', but then it makes sense. The control-
and save-area together are still way smaller than 4k. I will add the
check for '>' to this patch.

> > +	ret = -EFAULT;
> > +	if (copy_from_user(ctl, &user_vmcb->control, sizeof(ctl)))
> 
> The sizeof() calc is wrong, this is now calculating the size of the pointer,
> not the size of the struct.  It'd need to be sizeof(*ctl).
> 
> > +		goto out_free;
> > +	if (copy_from_user(save, &user_vmcb->save, sizeof(save)))
> 
> Same bug here.

Thanks, fixed that.

Regards,

	Joerg

^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 3/4] KVM: SVM: Add GHCB Accessor functions
  2020-07-29 15:43   ` Sean Christopherson
@ 2020-07-30 13:05     ` Joerg Roedel
  0 siblings, 0 replies; 9+ messages in thread
From: Joerg Roedel @ 2020-07-30 13:05 UTC (permalink / raw)
  To: Sean Christopherson
  Cc: Paolo Bonzini, Vitaly Kuznetsov, Wanpeng Li, Jim Mattson, kvm,
	linux-kernel, Joerg Roedel

On Wed, Jul 29, 2020 at 08:43:28AM -0700, Sean Christopherson wrote:
> Rather than manually calculate the byte/bit indices just use __set_bit()
> and test_bit().  That will also solve the variable declaration issue.
> 
> E.g.
> 
> #define GHB_BITMAP_IDX(field)		\
> 	(offsetof(struct vmcb_save_area, (field)) / sizeof(u64))
> 
> #define GHCB_SET_VALID(ghcb, field)	\
> 	__set_bit(GHCB_BITMAP_IDX(field), (unsigned long *)&ghcb->save.valid_bitmap)
> 
> Or alternatively drop GHCB_SET_VALID() and just open code the two users.

Thanks for your suggestions, I updated the patch and will do some
testing before re-posting.

Regards,

	Joerg

> 
> > +	}
> > +
> > +#define DEFINE_GHCB_SETTER(field)					\
> > +	static inline void						\
> > +	ghcb_set_##field(struct ghcb *ghcb, u64 value)			\
> > +	{								\
> > +		GHCB_SET_VALID(ghcb, field)				\
> > +		(ghcb)->save.field = value;				\
> 
> 
> The ghcb doesn't need to be wrapped in (), it's a parameter to a function.
> Same comment for the below usage.
> 
> > +	}
> > +
> > +#define DEFINE_GHCB_ACCESSORS(field)					\
> > +	static inline bool ghcb_is_valid_##field(const struct ghcb *ghcb)	\
> 
> I'd prefer to follow the naming of the arch reg accessors, i.e.
> 
> 	static inline bool ghcb_##field##_is_valid(...)
> 
> to pair with
> 
> 	kvm_##lname##_read
> 	kvm_##lname##_write
> 
> And because ghcb_is_valid_rip() reads a bit weird, e.g. IMO is more likely
> to be read as "does the RIP in the GHCB hold a valid (canonical) value",
> versus ghcb_rip_is_valid() reading as "is RIP valid in the GHCB".
> 
> > +	{								\
> > +		DEFINE_GHCB_INDICES(field)				\
> > +		return !!((ghcb)->save.valid_bitmap[byte_idx]		\
> > +						& BIT(bit_idx));	\
> > +	}								\
> > +									\
> > +	static inline void						\
> > +	ghcb_set_##field(struct ghcb *ghcb, u64 value)			\
> > +	{								\
> > +		GHCB_SET_VALID(ghcb, field)				\
> > +		(ghcb)->save.field = value;				\
> 
> > +	}
> > +
> > +DEFINE_GHCB_ACCESSORS(cpl)
> > +DEFINE_GHCB_ACCESSORS(rip)
> > +DEFINE_GHCB_ACCESSORS(rsp)
> > +DEFINE_GHCB_ACCESSORS(rax)
> > +DEFINE_GHCB_ACCESSORS(rcx)
> > +DEFINE_GHCB_ACCESSORS(rdx)
> > +DEFINE_GHCB_ACCESSORS(rbx)
> > +DEFINE_GHCB_ACCESSORS(rbp)
> > +DEFINE_GHCB_ACCESSORS(rsi)
> > +DEFINE_GHCB_ACCESSORS(rdi)
> > +DEFINE_GHCB_ACCESSORS(r8)
> > +DEFINE_GHCB_ACCESSORS(r9)
> > +DEFINE_GHCB_ACCESSORS(r10)
> > +DEFINE_GHCB_ACCESSORS(r11)
> > +DEFINE_GHCB_ACCESSORS(r12)
> > +DEFINE_GHCB_ACCESSORS(r13)
> > +DEFINE_GHCB_ACCESSORS(r14)
> > +DEFINE_GHCB_ACCESSORS(r15)
> > +DEFINE_GHCB_ACCESSORS(sw_exit_code)
> > +DEFINE_GHCB_ACCESSORS(sw_exit_info_1)
> > +DEFINE_GHCB_ACCESSORS(sw_exit_info_2)
> > +DEFINE_GHCB_ACCESSORS(sw_scratch)
> > +DEFINE_GHCB_ACCESSORS(xcr0)
> > +
> >  #endif
> > -- 
> > 2.17.1
> > 

^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2020-07-30 13:05 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-07-29 13:22 [PATCH 0/4] KVM: SVM: SEV-ES groundwork Joerg Roedel
2020-07-29 13:22 ` [PATCH 1/4] KVM: SVM: nested: Don't allocate VMCB structures on stack Joerg Roedel
2020-07-29 15:14   ` Sean Christopherson
2020-07-30 12:38     ` Joerg Roedel
2020-07-29 13:22 ` [PATCH 2/4] KVM: SVM: Add GHCB definitions Joerg Roedel
2020-07-29 13:22 ` [PATCH 3/4] KVM: SVM: Add GHCB Accessor functions Joerg Roedel
2020-07-29 15:43   ` Sean Christopherson
2020-07-30 13:05     ` Joerg Roedel
2020-07-29 13:22 ` [PATCH 4/4] KVM: SVM: Use __packed shorthand Joerg Roedel

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).