* [PATCH v3 1/4] x86/vmx: introduce vmwrite_safe()
2017-02-08 10:09 [PATCH v3 0/4] x86/vvmx: correctly emulate VMREAD and VMWRITE Sergey Dyasli
@ 2017-02-08 10:09 ` Sergey Dyasli
2017-02-08 13:33 ` Jan Beulich
2017-02-09 0:24 ` Tian, Kevin
2017-02-08 10:09 ` [PATCH v3 2/4] x86/vmx: improve vmread_safe() Sergey Dyasli
` (2 subsequent siblings)
3 siblings, 2 replies; 10+ messages in thread
From: Sergey Dyasli @ 2017-02-08 10:09 UTC (permalink / raw)
To: xen-devel
Cc: Andrew Cooper, Kevin Tian, Jan Beulich, Jun Nakajima, Sergey Dyasli
Any fail during the original __vmwrite() leads to BUG() which can be
easily exploited from a guest in the nested vmx mode.
The new function returns error code depending on the outcome:
VMsucceed: 0
VMfailValid: VM Instruction Error Number
VMfailInvalid: a new VMX_INSN_FAIL_INVALID
A new macro GAS_VMX_OP is introduced in order to improve the
readability of asm. Existing ASM_FLAG_OUT macro is reused and copied
into asm_defns.h
Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
---
v2 --> v3:
* vmwrite_safe() now returns "enum vmx_insn_errno" (32-bit value)
* vmwrite_safe() is changed from always_inline to plain inline
xen/arch/x86/hvm/vmx/vvmx.c | 3 ++-
xen/include/asm-x86/asm_defns.h | 6 ++++++
xen/include/asm-x86/hvm/vmx/vmcs.h | 1 +
xen/include/asm-x86/hvm/vmx/vmx.h | 29 +++++++++++++++++++++++++++++
4 files changed, 38 insertions(+), 1 deletion(-)
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 9caebe5..105a3c0 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -483,7 +483,8 @@ static void vmfail_invalid(struct cpu_user_regs *regs)
static void vmfail(struct cpu_user_regs *regs, enum vmx_insn_errno errno)
{
- if ( vcpu_nestedhvm(current).nv_vvmcxaddr != INVALID_PADDR )
+ if ( vcpu_nestedhvm(current).nv_vvmcxaddr != INVALID_PADDR &&
+ errno != VMX_INSN_FAIL_INVALID )
vmfail_valid(regs, errno);
else
vmfail_invalid(regs);
diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h
index f1c6fa1..220ae2e 100644
--- a/xen/include/asm-x86/asm_defns.h
+++ b/xen/include/asm-x86/asm_defns.h
@@ -413,4 +413,10 @@ static always_inline void stac(void)
#define REX64_PREFIX "rex64/"
#endif
+#ifdef __GCC_ASM_FLAG_OUTPUTS__
+# define ASM_FLAG_OUT(yes, no) yes
+#else
+# define ASM_FLAG_OUT(yes, no) no
+#endif
+
#endif /* __X86_ASM_DEFNS_H__ */
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index d71de04..8d43efd 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -526,6 +526,7 @@ enum vmx_insn_errno
VMX_INSN_VMPTRLD_INVALID_PHYADDR = 9,
VMX_INSN_UNSUPPORTED_VMCS_COMPONENT = 12,
VMX_INSN_VMXON_IN_VMX_ROOT = 15,
+ VMX_INSN_FAIL_INVALID = ~0,
};
void vmx_disable_intercept_for_msr(struct vcpu *v, u32 msr, int type);
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index 4bf4d50..9bfe71a 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -306,6 +306,12 @@ extern uint8_t posted_intr_vector;
#define INVVPID_ALL_CONTEXT 2
#define INVVPID_SINGLE_CONTEXT_RETAINING_GLOBAL 3
+#ifdef HAVE_GAS_VMX
+# define GAS_VMX_OP(yes, no) yes
+#else
+# define GAS_VMX_OP(yes, no) no
+#endif
+
static always_inline void __vmptrld(u64 addr)
{
asm volatile (
@@ -423,6 +429,29 @@ static inline bool_t __vmread_safe(unsigned long field, unsigned long *value)
return okay;
}
+static inline enum vmx_insn_errno vmwrite_safe(unsigned long field,
+ unsigned long value)
+{
+ unsigned long ret = 0;
+ bool fail_invalid, fail_valid;
+
+ asm volatile ( GAS_VMX_OP("vmwrite %[value], %[field]\n\t",
+ VMWRITE_OPCODE MODRM_EAX_ECX)
+ ASM_FLAG_OUT(, "setc %[invalid]\n\t")
+ ASM_FLAG_OUT(, "setz %[valid]\n\t")
+ : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid),
+ ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid)
+ : [field] GAS_VMX_OP("r", "a") (field),
+ [value] GAS_VMX_OP("rm", "c") (value));
+
+ if ( unlikely(fail_invalid) )
+ ret = VMX_INSN_FAIL_INVALID;
+ else if ( unlikely(fail_valid) )
+ __vmread(VM_INSTRUCTION_ERROR, &ret);
+
+ return ret;
+}
+
static always_inline void __invept(unsigned long type, u64 eptp, u64 gpa)
{
struct {
--
2.9.3
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH v3 1/4] x86/vmx: introduce vmwrite_safe()
2017-02-08 10:09 ` [PATCH v3 1/4] x86/vmx: introduce vmwrite_safe() Sergey Dyasli
@ 2017-02-08 13:33 ` Jan Beulich
2017-02-09 0:24 ` Tian, Kevin
1 sibling, 0 replies; 10+ messages in thread
From: Jan Beulich @ 2017-02-08 13:33 UTC (permalink / raw)
To: Sergey Dyasli; +Cc: Andrew Cooper, Kevin Tian, Jun Nakajima, xen-devel
>>> On 08.02.17 at 11:09, <sergey.dyasli@citrix.com> wrote:
> Any fail during the original __vmwrite() leads to BUG() which can be
> easily exploited from a guest in the nested vmx mode.
>
> The new function returns error code depending on the outcome:
>
> VMsucceed: 0
> VMfailValid: VM Instruction Error Number
> VMfailInvalid: a new VMX_INSN_FAIL_INVALID
>
> A new macro GAS_VMX_OP is introduced in order to improve the
> readability of asm. Existing ASM_FLAG_OUT macro is reused and copied
> into asm_defns.h
>
> Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v3 1/4] x86/vmx: introduce vmwrite_safe()
2017-02-08 10:09 ` [PATCH v3 1/4] x86/vmx: introduce vmwrite_safe() Sergey Dyasli
2017-02-08 13:33 ` Jan Beulich
@ 2017-02-09 0:24 ` Tian, Kevin
1 sibling, 0 replies; 10+ messages in thread
From: Tian, Kevin @ 2017-02-09 0:24 UTC (permalink / raw)
To: Sergey Dyasli, xen-devel; +Cc: Andrew Cooper, Jan Beulich, Nakajima, Jun
> From: Sergey Dyasli [mailto:sergey.dyasli@citrix.com]
> Sent: Wednesday, February 08, 2017 6:10 PM
>
> Any fail during the original __vmwrite() leads to BUG() which can be
> easily exploited from a guest in the nested vmx mode.
>
> The new function returns error code depending on the outcome:
>
> VMsucceed: 0
> VMfailValid: VM Instruction Error Number
> VMfailInvalid: a new VMX_INSN_FAIL_INVALID
>
> A new macro GAS_VMX_OP is introduced in order to improve the
> readability of asm. Existing ASM_FLAG_OUT macro is reused and copied
> into asm_defns.h
>
> Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH v3 2/4] x86/vmx: improve vmread_safe()
2017-02-08 10:09 [PATCH v3 0/4] x86/vvmx: correctly emulate VMREAD and VMWRITE Sergey Dyasli
2017-02-08 10:09 ` [PATCH v3 1/4] x86/vmx: introduce vmwrite_safe() Sergey Dyasli
@ 2017-02-08 10:09 ` Sergey Dyasli
2017-02-08 13:35 ` Jan Beulich
2017-02-09 0:25 ` Tian, Kevin
2017-02-08 10:09 ` [PATCH v3 3/4] x86/vvmx: correctly emulate VMWRITE Sergey Dyasli
2017-02-08 10:09 ` [PATCH v3 4/4] x86/vvmx: correctly emulate VMREAD Sergey Dyasli
3 siblings, 2 replies; 10+ messages in thread
From: Sergey Dyasli @ 2017-02-08 10:09 UTC (permalink / raw)
To: xen-devel
Cc: Andrew Cooper, Kevin Tian, Jan Beulich, Jun Nakajima, Sergey Dyasli
The original function doesn't distinguish between Valid and Invalid
VMfails. Improved function returns error code depending on the outcome:
VMsucceed: 0
VMfailValid: VM Instruction Error Number
VMfailInvalid: VMX_INSN_FAIL_INVALID (~0)
Existing users of __vmread_safe() are updated and double underscore
prefix is removed from the function's name because such prefixes are
reserved to a compiler.
Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
---
v2 --> v3:
* vmread_safe() now returns "enum vmx_insn_errno" (32-bit value)
xen/arch/x86/hvm/vmx/vmcs.c | 2 +-
xen/arch/x86/hvm/vmx/vvmx.c | 2 +-
xen/include/asm-x86/hvm/vmx/vmx.h | 41 +++++++++++++++++----------------------
3 files changed, 20 insertions(+), 25 deletions(-)
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 59ef199..24f7570 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -1711,7 +1711,7 @@ static inline unsigned long vmr(unsigned long field)
{
unsigned long val;
- return __vmread_safe(field, &val) ? val : 0;
+ return vmread_safe(field, &val) ? 0 : val;
}
#define vmr16(fld) ({ \
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 105a3c0..31ac360 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -932,7 +932,7 @@ static inline void shadow_to_vvmcs(const struct vcpu *v, unsigned int field)
{
unsigned long value;
- if ( __vmread_safe(field, &value) )
+ if ( vmread_safe(field, &value) == 0 )
set_vvmcs(v, field, value);
}
diff --git a/xen/include/asm-x86/hvm/vmx/vmx.h b/xen/include/asm-x86/hvm/vmx/vmx.h
index 9bfe71a..3e809d4 100644
--- a/xen/include/asm-x86/hvm/vmx/vmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vmx.h
@@ -401,32 +401,27 @@ static always_inline void __vmwrite(unsigned long field, unsigned long value)
);
}
-static inline bool_t __vmread_safe(unsigned long field, unsigned long *value)
+static inline enum vmx_insn_errno vmread_safe(unsigned long field,
+ unsigned long *value)
{
- bool_t okay;
+ unsigned long ret = 0;
+ bool fail_invalid, fail_valid;
- asm volatile (
-#ifdef HAVE_GAS_VMX
- "vmread %2, %1\n\t"
-#else
- VMREAD_OPCODE MODRM_EAX_ECX
-#endif
- /* CF==1 or ZF==1 --> rc = 0 */
-#ifdef __GCC_ASM_FLAG_OUTPUTS__
- : "=@ccnbe" (okay),
-#else
- "setnbe %0"
- : "=qm" (okay),
-#endif
-#ifdef HAVE_GAS_VMX
- "=rm" (*value)
- : "r" (field));
-#else
- "=c" (*value)
- : "a" (field));
-#endif
+ asm volatile ( GAS_VMX_OP("vmread %[field], %[value]\n\t",
+ VMREAD_OPCODE MODRM_EAX_ECX)
+ ASM_FLAG_OUT(, "setc %[invalid]\n\t")
+ ASM_FLAG_OUT(, "setz %[valid]\n\t")
+ : ASM_FLAG_OUT("=@ccc", [invalid] "=rm") (fail_invalid),
+ ASM_FLAG_OUT("=@ccz", [valid] "=rm") (fail_valid),
+ [value] GAS_VMX_OP("=rm", "=c") (*value)
+ : [field] GAS_VMX_OP("r", "a") (field));
+
+ if ( unlikely(fail_invalid) )
+ ret = VMX_INSN_FAIL_INVALID;
+ else if ( unlikely(fail_valid) )
+ __vmread(VM_INSTRUCTION_ERROR, &ret);
- return okay;
+ return ret;
}
static inline enum vmx_insn_errno vmwrite_safe(unsigned long field,
--
2.9.3
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH v3 2/4] x86/vmx: improve vmread_safe()
2017-02-08 10:09 ` [PATCH v3 2/4] x86/vmx: improve vmread_safe() Sergey Dyasli
@ 2017-02-08 13:35 ` Jan Beulich
2017-02-09 0:25 ` Tian, Kevin
1 sibling, 0 replies; 10+ messages in thread
From: Jan Beulich @ 2017-02-08 13:35 UTC (permalink / raw)
To: Sergey Dyasli; +Cc: Andrew Cooper, Kevin Tian, Jun Nakajima, xen-devel
>>> On 08.02.17 at 11:09, <sergey.dyasli@citrix.com> wrote:
> The original function doesn't distinguish between Valid and Invalid
> VMfails. Improved function returns error code depending on the outcome:
>
> VMsucceed: 0
> VMfailValid: VM Instruction Error Number
> VMfailInvalid: VMX_INSN_FAIL_INVALID (~0)
>
> Existing users of __vmread_safe() are updated and double underscore
> prefix is removed from the function's name because such prefixes are
> reserved to a compiler.
>
> Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 10+ messages in thread
* Re: [PATCH v3 2/4] x86/vmx: improve vmread_safe()
2017-02-08 10:09 ` [PATCH v3 2/4] x86/vmx: improve vmread_safe() Sergey Dyasli
2017-02-08 13:35 ` Jan Beulich
@ 2017-02-09 0:25 ` Tian, Kevin
1 sibling, 0 replies; 10+ messages in thread
From: Tian, Kevin @ 2017-02-09 0:25 UTC (permalink / raw)
To: Sergey Dyasli, xen-devel; +Cc: Andrew Cooper, Jan Beulich, Nakajima, Jun
> From: Sergey Dyasli [mailto:sergey.dyasli@citrix.com]
> Sent: Wednesday, February 08, 2017 6:10 PM
>
> The original function doesn't distinguish between Valid and Invalid
> VMfails. Improved function returns error code depending on the outcome:
>
> VMsucceed: 0
> VMfailValid: VM Instruction Error Number
> VMfailInvalid: VMX_INSN_FAIL_INVALID (~0)
>
> Existing users of __vmread_safe() are updated and double underscore
> prefix is removed from the function's name because such prefixes are
> reserved to a compiler.
>
> Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
Acked-by: Kevin Tian <kevin.tian@intel.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH v3 3/4] x86/vvmx: correctly emulate VMWRITE
2017-02-08 10:09 [PATCH v3 0/4] x86/vvmx: correctly emulate VMREAD and VMWRITE Sergey Dyasli
2017-02-08 10:09 ` [PATCH v3 1/4] x86/vmx: introduce vmwrite_safe() Sergey Dyasli
2017-02-08 10:09 ` [PATCH v3 2/4] x86/vmx: improve vmread_safe() Sergey Dyasli
@ 2017-02-08 10:09 ` Sergey Dyasli
2017-02-08 13:42 ` Jan Beulich
2017-02-08 10:09 ` [PATCH v3 4/4] x86/vvmx: correctly emulate VMREAD Sergey Dyasli
3 siblings, 1 reply; 10+ messages in thread
From: Sergey Dyasli @ 2017-02-08 10:09 UTC (permalink / raw)
To: xen-devel
Cc: Andrew Cooper, Kevin Tian, Jan Beulich, Jun Nakajima, Sergey Dyasli
There is an issue with the original __vmwrite() in nested vmx mode:
emulation of a guest's VMWRITE with invalid arguments leads to BUG().
Fix this by using vmwrite_safe() and reporting any kind of VMfail back
to the guest.
A new safe versions of set_vvmcs() macro and related functions are
introduced because of new function signatures and lots of existing
users.
Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
---
v2 --> v3:
* set_vvmcs_safe() and related functions are added to match
get_vvmcs_safe()
* virtual_vmcs_vmwrite_safe() is added in order to preserve the old
behaviour for existing callers
xen/arch/x86/hvm/vmx/vmcs.c | 12 ++++++++++++
xen/arch/x86/hvm/vmx/vvmx.c | 20 +++++++++++++++++++-
xen/include/asm-x86/hvm/vmx/vmcs.h | 2 ++
xen/include/asm-x86/hvm/vmx/vvmx.h | 8 ++++++++
4 files changed, 41 insertions(+), 1 deletion(-)
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 24f7570..1a429a3 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -950,6 +950,18 @@ void virtual_vmcs_vmwrite(const struct vcpu *v, u32 vmcs_encoding, u64 val)
virtual_vmcs_exit(v);
}
+enum vmx_insn_errno virtual_vmcs_vmwrite_safe(const struct vcpu *v,
+ u32 vmcs_encoding, u64 val)
+{
+ enum vmx_insn_errno ret;
+
+ virtual_vmcs_enter(v);
+ ret = vmwrite_safe(vmcs_encoding, val);
+ virtual_vmcs_exit(v);
+
+ return ret;
+}
+
/*
* This function is only called in a vCPU's initialization phase,
* so we can update the posted-interrupt descriptor in non-atomic way.
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 31ac360..402d5dc 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -305,6 +305,19 @@ void set_vvmcs_real(const struct vcpu *v, u32 encoding, u64 val)
virtual_vmcs_vmwrite(v, encoding, val);
}
+enum vmx_insn_errno set_vvmcs_virtual_safe(void *vvmcs, u32 encoding, u64 val)
+{
+ set_vvmcs_virtual(vvmcs, encoding, val);
+
+ return 0;
+}
+
+enum vmx_insn_errno set_vvmcs_real_safe(const struct vcpu *v, u32 encoding,
+ u64 val)
+{
+ return virtual_vmcs_vmwrite_safe(v, encoding, val);
+}
+
static unsigned long reg_read(struct cpu_user_regs *regs,
enum vmx_regs_enc index)
{
@@ -1740,13 +1753,18 @@ int nvmx_handle_vmwrite(struct cpu_user_regs *regs)
unsigned long operand;
u64 vmcs_encoding;
bool_t okay = 1;
+ enum vmx_insn_errno err;
if ( decode_vmx_inst(regs, &decode, &operand, 0)
!= X86EMUL_OKAY )
return X86EMUL_EXCEPTION;
vmcs_encoding = reg_read(regs, decode.reg2);
- set_vvmcs(v, vmcs_encoding, operand);
+ if ( (err = set_vvmcs_safe(v, vmcs_encoding, operand)) )
+ {
+ vmfail(regs, err);
+ return X86EMUL_OKAY;
+ }
switch ( vmcs_encoding & ~VMCS_HIGH(0) )
{
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 8d43efd..1e0fce5 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -542,6 +542,8 @@ void virtual_vmcs_enter(const struct vcpu *);
void virtual_vmcs_exit(const struct vcpu *);
u64 virtual_vmcs_vmread(const struct vcpu *, u32 encoding);
void virtual_vmcs_vmwrite(const struct vcpu *, u32 encoding, u64 val);
+enum vmx_insn_errno virtual_vmcs_vmwrite_safe(const struct vcpu *v,
+ u32 vmcs_encoding, u64 val);
static inline int vmx_add_guest_msr(u32 msr)
{
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h
index 242e524..e49a000 100644
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
@@ -183,6 +183,9 @@ u64 get_vvmcs_virtual(void *vvmcs, u32 encoding);
u64 get_vvmcs_real(const struct vcpu *, u32 encoding);
void set_vvmcs_virtual(void *vvmcs, u32 encoding, u64 val);
void set_vvmcs_real(const struct vcpu *, u32 encoding, u64 val);
+enum vmx_insn_errno set_vvmcs_virtual_safe(void *vvmcs, u32 encoding, u64 val);
+enum vmx_insn_errno set_vvmcs_real_safe(const struct vcpu *, u32 encoding,
+ u64 val);
#define get_vvmcs(vcpu, encoding) \
(cpu_has_vmx_vmcs_shadowing ? \
@@ -194,6 +197,11 @@ void set_vvmcs_real(const struct vcpu *, u32 encoding, u64 val);
set_vvmcs_real(vcpu, encoding, val) : \
set_vvmcs_virtual(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val))
+#define set_vvmcs_safe(vcpu, encoding, val) \
+ (cpu_has_vmx_vmcs_shadowing ? \
+ set_vvmcs_real_safe(vcpu, encoding, val) : \
+ set_vvmcs_virtual_safe(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val))
+
uint64_t get_shadow_eptp(struct vcpu *v);
void nvmx_destroy_vmcs(struct vcpu *v);
--
2.9.3
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 10+ messages in thread
* Re: [PATCH v3 3/4] x86/vvmx: correctly emulate VMWRITE
2017-02-08 10:09 ` [PATCH v3 3/4] x86/vvmx: correctly emulate VMWRITE Sergey Dyasli
@ 2017-02-08 13:42 ` Jan Beulich
0 siblings, 0 replies; 10+ messages in thread
From: Jan Beulich @ 2017-02-08 13:42 UTC (permalink / raw)
To: Sergey Dyasli; +Cc: Andrew Cooper, Kevin Tian, Jun Nakajima, xen-devel
>>> On 08.02.17 at 11:09, <sergey.dyasli@citrix.com> wrote:
> --- a/xen/arch/x86/hvm/vmx/vvmx.c
> +++ b/xen/arch/x86/hvm/vmx/vvmx.c
> @@ -305,6 +305,19 @@ void set_vvmcs_real(const struct vcpu *v, u32 encoding, u64 val)
> virtual_vmcs_vmwrite(v, encoding, val);
> }
>
> +enum vmx_insn_errno set_vvmcs_virtual_safe(void *vvmcs, u32 encoding, u64 val)
> +{
> + set_vvmcs_virtual(vvmcs, encoding, val);
> +
> + return 0;
I think the growing number of literal zeros requires the introduction
of a "no error" enumerator value, probably even to be used by the
two earlier patches (which, if you did so, wouldn't invalidate my R-b).
Furthermore, didn't earlier discussion result in it being desirable to
at least add a comment here clarifying that the unconditional
returning of success isn't really the intended behavior (i.e. more
work is needed here)?
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 10+ messages in thread
* [PATCH v3 4/4] x86/vvmx: correctly emulate VMREAD
2017-02-08 10:09 [PATCH v3 0/4] x86/vvmx: correctly emulate VMREAD and VMWRITE Sergey Dyasli
` (2 preceding siblings ...)
2017-02-08 10:09 ` [PATCH v3 3/4] x86/vvmx: correctly emulate VMWRITE Sergey Dyasli
@ 2017-02-08 10:09 ` Sergey Dyasli
3 siblings, 0 replies; 10+ messages in thread
From: Sergey Dyasli @ 2017-02-08 10:09 UTC (permalink / raw)
To: xen-devel
Cc: Andrew Cooper, Kevin Tian, Jan Beulich, Jun Nakajima, Sergey Dyasli
There is an issue with the original __vmread() in nested vmx mode:
emulation of a guest's VMREAD with invalid arguments leads to BUG().
Fix this by using vmread_safe() and reporting any kind of VMfail back
to the guest.
A new safe versions of get_vvmcs() macro and related functions are
introduced because of new function signatures and lots of existing
users.
Signed-off-by: Sergey Dyasli <sergey.dyasli@citrix.com>
---
v2 --> v3:
* virtual_vmcs_vmread_safe() is added in order to preserve the old
behaviour for existing callers
xen/arch/x86/hvm/vmx/vmcs.c | 12 ++++++++++++
xen/arch/x86/hvm/vmx/vvmx.c | 19 ++++++++++++++++++-
xen/include/asm-x86/hvm/vmx/vmcs.h | 2 ++
xen/include/asm-x86/hvm/vmx/vvmx.h | 8 ++++++++
4 files changed, 40 insertions(+), 1 deletion(-)
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 1a429a3..2fd2d06 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -943,6 +943,18 @@ u64 virtual_vmcs_vmread(const struct vcpu *v, u32 vmcs_encoding)
return res;
}
+enum vmx_insn_errno virtual_vmcs_vmread_safe(const struct vcpu *v,
+ u32 vmcs_encoding, u64 *val)
+{
+ enum vmx_insn_errno ret;
+
+ virtual_vmcs_enter(v);
+ ret = vmread_safe(vmcs_encoding, val);
+ virtual_vmcs_exit(v);
+
+ return ret;
+}
+
void virtual_vmcs_vmwrite(const struct vcpu *v, u32 vmcs_encoding, u64 val)
{
virtual_vmcs_enter(v);
diff --git a/xen/arch/x86/hvm/vmx/vvmx.c b/xen/arch/x86/hvm/vmx/vvmx.c
index 402d5dc..a82ac50 100644
--- a/xen/arch/x86/hvm/vmx/vvmx.c
+++ b/xen/arch/x86/hvm/vmx/vvmx.c
@@ -264,6 +264,19 @@ u64 get_vvmcs_real(const struct vcpu *v, u32 encoding)
return virtual_vmcs_vmread(v, encoding);
}
+enum vmx_insn_errno get_vvmcs_virtual_safe(void *vvmcs, u32 encoding, u64 *val)
+{
+ *val = get_vvmcs_virtual(vvmcs, encoding);
+
+ return 0;
+}
+
+enum vmx_insn_errno get_vvmcs_real_safe(const struct vcpu *v, u32 encoding,
+ u64 *val)
+{
+ return virtual_vmcs_vmread_safe(v, encoding, val);
+}
+
void set_vvmcs_virtual(void *vvmcs, u32 vmcs_encoding, u64 val)
{
union vmcs_encoding enc;
@@ -1727,7 +1740,11 @@ int nvmx_handle_vmread(struct cpu_user_regs *regs)
if ( rc != X86EMUL_OKAY )
return rc;
- value = get_vvmcs(v, reg_read(regs, decode.reg2));
+ if ( (rc = get_vvmcs_safe(v, reg_read(regs, decode.reg2), &value)) )
+ {
+ vmfail(regs, rc);
+ return X86EMUL_OKAY;
+ }
switch ( decode.type ) {
case VMX_INST_MEMREG_TYPE_MEMORY:
diff --git a/xen/include/asm-x86/hvm/vmx/vmcs.h b/xen/include/asm-x86/hvm/vmx/vmcs.h
index 1e0fce5..ec4db43 100644
--- a/xen/include/asm-x86/hvm/vmx/vmcs.h
+++ b/xen/include/asm-x86/hvm/vmx/vmcs.h
@@ -541,6 +541,8 @@ int vmx_check_msr_bitmap(unsigned long *msr_bitmap, u32 msr, int access_type);
void virtual_vmcs_enter(const struct vcpu *);
void virtual_vmcs_exit(const struct vcpu *);
u64 virtual_vmcs_vmread(const struct vcpu *, u32 encoding);
+enum vmx_insn_errno virtual_vmcs_vmread_safe(const struct vcpu *v,
+ u32 vmcs_encoding, u64 *val);
void virtual_vmcs_vmwrite(const struct vcpu *, u32 encoding, u64 val);
enum vmx_insn_errno virtual_vmcs_vmwrite_safe(const struct vcpu *v,
u32 vmcs_encoding, u64 val);
diff --git a/xen/include/asm-x86/hvm/vmx/vvmx.h b/xen/include/asm-x86/hvm/vmx/vvmx.h
index e49a000..ca2fb25 100644
--- a/xen/include/asm-x86/hvm/vmx/vvmx.h
+++ b/xen/include/asm-x86/hvm/vmx/vvmx.h
@@ -183,6 +183,9 @@ u64 get_vvmcs_virtual(void *vvmcs, u32 encoding);
u64 get_vvmcs_real(const struct vcpu *, u32 encoding);
void set_vvmcs_virtual(void *vvmcs, u32 encoding, u64 val);
void set_vvmcs_real(const struct vcpu *, u32 encoding, u64 val);
+enum vmx_insn_errno get_vvmcs_virtual_safe(void *vvmcs, u32 encoding, u64 *val);
+enum vmx_insn_errno get_vvmcs_real_safe(const struct vcpu *, u32 encoding,
+ u64 *val);
enum vmx_insn_errno set_vvmcs_virtual_safe(void *vvmcs, u32 encoding, u64 val);
enum vmx_insn_errno set_vvmcs_real_safe(const struct vcpu *, u32 encoding,
u64 val);
@@ -197,6 +200,11 @@ enum vmx_insn_errno set_vvmcs_real_safe(const struct vcpu *, u32 encoding,
set_vvmcs_real(vcpu, encoding, val) : \
set_vvmcs_virtual(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val))
+#define get_vvmcs_safe(vcpu, encoding, val) \
+ (cpu_has_vmx_vmcs_shadowing ? \
+ get_vvmcs_real_safe(vcpu, encoding, val) : \
+ get_vvmcs_virtual_safe(vcpu_nestedhvm(vcpu).nv_vvmcx, encoding, val))
+
#define set_vvmcs_safe(vcpu, encoding, val) \
(cpu_has_vmx_vmcs_shadowing ? \
set_vvmcs_real_safe(vcpu, encoding, val) : \
--
2.9.3
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 10+ messages in thread