* [PATCH 0/5 v3] KVM: nSVM: Check addresses of MSR bitmap and IO bitmap tables on vmrun of nested guests
@ 2021-02-03 0:40 Krish Sadhukhan
2021-02-03 0:40 ` [PATCH 1/5 v3] KVM: SVM: Move IOPM_ALLOC_ORDER and MSRPM_ALLOC_ORDER #defines to svm.h Krish Sadhukhan
` (4 more replies)
0 siblings, 5 replies; 7+ messages in thread
From: Krish Sadhukhan @ 2021-02-03 0:40 UTC (permalink / raw)
To: kvm; +Cc: pbonzini, jmattson, seanjc
v2 -> v3:
1. Moved the #defines for IOPM_ALLOC_ORDER and MSRPM_ALLOC_ORDER so
that they can be used by nested_vmcb_check_controls()
2. Fixed the wrong check in nested_vmcb_check_controls() in patch# 2
(which was patch# 1 in v2).
3. Added a clean-up patch for nested_svm_vmrun().
[PATCH 1/5 v3] KVM: SVM: Move IOPM_ALLOC_ORDER and MSRPM_ALLOC_ORDER
[PATCH 2/5 v3] nSVM: Check addresses of MSR and IO bitmap
[PATCH 3/5 v3] KVM: nSVM: Cleanup in nested_svm_vmrun()
[PATCH 4/5 v3] Test: nSVM: Test MSR and IO bitmap address
[PATCH 5/5 v3] Test: SVM: Use ALIGN macro when aligning 'io_bitmap_area'
arch/x86/kvm/svm/nested.c | 63 +++++++++++++++++++++++++++--------------------
arch/x86/kvm/svm/svm.c | 3 ---
arch/x86/kvm/svm/svm.h | 3 +++
3 files changed, 39 insertions(+), 30 deletions(-)
Krish Sadhukhan (3):
KVM: SVM: Move IOPM_ALLOC_ORDER and MSRPM_ALLOC_ORDER #defines to svm.h
nSVM: Check addresses of MSR and IO bitmap
KVM: nSVM: Cleanup in nested_svm_vmrun()
x86/svm.c | 2 +-
x86/svm_tests.c | 38 ++++++++++++++++++++++++++++++++++++++
2 files changed, 39 insertions(+), 1 deletion(-)
Krish Sadhukhan (2):
nSVM: Test MSR and IO bitmap address
SVM: Use ALIGN macro when aligning 'io_bitmap_area'
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH 1/5 v3] KVM: SVM: Move IOPM_ALLOC_ORDER and MSRPM_ALLOC_ORDER #defines to svm.h
2021-02-03 0:40 [PATCH 0/5 v3] KVM: nSVM: Check addresses of MSR bitmap and IO bitmap tables on vmrun of nested guests Krish Sadhukhan
@ 2021-02-03 0:40 ` Krish Sadhukhan
2021-02-03 0:40 ` [PATCH 2/5 v3] nSVM: Check addresses of MSR and IO bitmap Krish Sadhukhan
` (3 subsequent siblings)
4 siblings, 0 replies; 7+ messages in thread
From: Krish Sadhukhan @ 2021-02-03 0:40 UTC (permalink / raw)
To: kvm; +Cc: pbonzini, jmattson, seanjc
These #defines will be used by nested.c in the next patch. So move these to
svm.h.
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
arch/x86/kvm/svm/svm.c | 3 ---
arch/x86/kvm/svm/svm.h | 3 +++
2 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index f923e14e87df..1641cb8ac5dd 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -55,9 +55,6 @@ static const struct x86_cpu_id svm_cpu_id[] = {
MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
#endif
-#define IOPM_ALLOC_ORDER 2
-#define MSRPM_ALLOC_ORDER 1
-
#define SEG_TYPE_LDT 2
#define SEG_TYPE_BUSY_TSS16 3
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 0fe874ae5498..f529a259a03e 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -42,6 +42,9 @@ static const struct svm_host_save_msrs {
};
#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
+#define IOPM_ALLOC_ORDER 2
+#define MSRPM_ALLOC_ORDER 1
+
#define MAX_DIRECT_ACCESS_MSRS 18
#define MSRPM_OFFSETS 16
extern u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
--
2.27.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 2/5 v3] nSVM: Check addresses of MSR and IO bitmap
2021-02-03 0:40 [PATCH 0/5 v3] KVM: nSVM: Check addresses of MSR bitmap and IO bitmap tables on vmrun of nested guests Krish Sadhukhan
2021-02-03 0:40 ` [PATCH 1/5 v3] KVM: SVM: Move IOPM_ALLOC_ORDER and MSRPM_ALLOC_ORDER #defines to svm.h Krish Sadhukhan
@ 2021-02-03 0:40 ` Krish Sadhukhan
2021-02-03 8:23 ` Paolo Bonzini
2021-02-03 0:40 ` [PATCH 3/5 v3] KVM: nSVM: Cleanup in nested_svm_vmrun() Krish Sadhukhan
` (2 subsequent siblings)
4 siblings, 1 reply; 7+ messages in thread
From: Krish Sadhukhan @ 2021-02-03 0:40 UTC (permalink / raw)
To: kvm; +Cc: pbonzini, jmattson, seanjc
According to section "Canonicalization and Consistency Checks" in APM vol 2,
the following guest state is illegal:
"The MSR or IOIO intercept tables extend to a physical address that
is greater than or equal to the maximum supported physical address."
Also check that these addresses are aligned on page boundary.
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
arch/x86/kvm/svm/nested.c | 20 ++++++++++++++------
1 file changed, 14 insertions(+), 6 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 7a605ad8254d..caf285e643db 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -214,7 +214,8 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
return true;
}
-static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
+static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
+ struct vmcb_control_area *control)
{
if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0)
return false;
@@ -226,10 +227,17 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
!npt_enabled)
return false;
+ if (!page_address_valid(vcpu, control->msrpm_base_pa +
+ MSRPM_ALLOC_ORDER * PAGE_SIZE))
+ return false;
+ if (!page_address_valid(vcpu, control->iopm_base_pa +
+ IOPM_ALLOC_ORDER * PAGE_SIZE))
+ return false;
+
return true;
}
-static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
+static bool nested_vmcb_checks(struct kvm_vcpu *vcpu, struct vmcb *vmcb12)
{
bool vmcb12_lma;
@@ -258,10 +266,10 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
(vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
return false;
}
- if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
+ if (!kvm_is_valid_cr4(vcpu, vmcb12->save.cr4))
return false;
- return nested_vmcb_check_controls(&vmcb12->control);
+ return nested_vmcb_check_controls(vcpu, &vmcb12->control);
}
static void load_nested_vmcb_control(struct vcpu_svm *svm,
@@ -488,7 +496,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
if (WARN_ON_ONCE(!svm->nested.initialized))
return -EINVAL;
- if (!nested_vmcb_checks(svm, vmcb12)) {
+ if (!nested_vmcb_checks(&svm->vcpu, vmcb12)) {
vmcb12->control.exit_code = SVM_EXIT_ERR;
vmcb12->control.exit_code_hi = 0;
vmcb12->control.exit_info_1 = 0;
@@ -1176,7 +1184,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
goto out_free;
ret = -EINVAL;
- if (!nested_vmcb_check_controls(ctl))
+ if (!nested_vmcb_check_controls(vcpu, ctl))
goto out_free;
/*
--
2.27.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 3/5 v3] KVM: nSVM: Cleanup in nested_svm_vmrun()
2021-02-03 0:40 [PATCH 0/5 v3] KVM: nSVM: Check addresses of MSR bitmap and IO bitmap tables on vmrun of nested guests Krish Sadhukhan
2021-02-03 0:40 ` [PATCH 1/5 v3] KVM: SVM: Move IOPM_ALLOC_ORDER and MSRPM_ALLOC_ORDER #defines to svm.h Krish Sadhukhan
2021-02-03 0:40 ` [PATCH 2/5 v3] nSVM: Check addresses of MSR and IO bitmap Krish Sadhukhan
@ 2021-02-03 0:40 ` Krish Sadhukhan
2021-02-03 0:40 ` [PATCH 4/5 v3] Test: nSVM: Test MSR and IO bitmap address Krish Sadhukhan
2021-02-03 0:40 ` [PATCH 5/5 v3] Test: SVM: Use ALIGN macro when aligning 'io_bitmap_area' Krish Sadhukhan
4 siblings, 0 replies; 7+ messages in thread
From: Krish Sadhukhan @ 2021-02-03 0:40 UTC (permalink / raw)
To: kvm; +Cc: pbonzini, jmattson, seanjc
Use local variables to derefence svm->vcpu and svm->vmcb as they make the
code tidier.
Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
arch/x86/kvm/svm/nested.c | 45 ++++++++++++++++++++-------------------
1 file changed, 23 insertions(+), 22 deletions(-)
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index caf285e643db..e9228fdac9b7 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -470,33 +470,34 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
{
int ret;
struct vmcb *vmcb12;
+ struct kvm_vcpu *vcpu = &svm->vcpu;
struct vmcb *hsave = svm->nested.hsave;
struct vmcb *vmcb = svm->vmcb;
struct kvm_host_map map;
u64 vmcb12_gpa;
- if (is_smm(&svm->vcpu)) {
- kvm_queue_exception(&svm->vcpu, UD_VECTOR);
+ if (is_smm(vcpu)) {
+ kvm_queue_exception(vcpu, UD_VECTOR);
return 1;
}
- vmcb12_gpa = svm->vmcb->save.rax;
- ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb12_gpa), &map);
+ vmcb12_gpa = vmcb->save.rax;
+ ret = kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map);
if (ret == -EINVAL) {
- kvm_inject_gp(&svm->vcpu, 0);
+ kvm_inject_gp(vcpu, 0);
return 1;
} else if (ret) {
- return kvm_skip_emulated_instruction(&svm->vcpu);
+ return kvm_skip_emulated_instruction(vcpu);
}
- ret = kvm_skip_emulated_instruction(&svm->vcpu);
+ ret = kvm_skip_emulated_instruction(vcpu);
vmcb12 = map.hva;
if (WARN_ON_ONCE(!svm->nested.initialized))
return -EINVAL;
- if (!nested_vmcb_checks(&svm->vcpu, vmcb12)) {
+ if (!nested_vmcb_checks(vcpu, vmcb12)) {
vmcb12->control.exit_code = SVM_EXIT_ERR;
vmcb12->control.exit_code_hi = 0;
vmcb12->control.exit_info_1 = 0;
@@ -504,7 +505,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
goto out;
}
- trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb12_gpa,
+ trace_kvm_nested_vmrun(vmcb->save.rip, vmcb12_gpa,
vmcb12->save.rip,
vmcb12->control.int_ctl,
vmcb12->control.event_inj,
@@ -518,8 +519,8 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
vmcb12->control.intercepts[INTERCEPT_WORD5]);
/* Clear internal status */
- kvm_clear_exception_queue(&svm->vcpu);
- kvm_clear_interrupt_queue(&svm->vcpu);
+ kvm_clear_exception_queue(vcpu);
+ kvm_clear_interrupt_queue(vcpu);
/*
* Save the old vmcb, so we don't need to pick what we save, but can
@@ -531,17 +532,17 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
hsave->save.ds = vmcb->save.ds;
hsave->save.gdtr = vmcb->save.gdtr;
hsave->save.idtr = vmcb->save.idtr;
- hsave->save.efer = svm->vcpu.arch.efer;
- hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
- hsave->save.cr4 = svm->vcpu.arch.cr4;
- hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
- hsave->save.rip = kvm_rip_read(&svm->vcpu);
+ hsave->save.efer = vcpu->arch.efer;
+ hsave->save.cr0 = kvm_read_cr0(vcpu);
+ hsave->save.cr4 = vcpu->arch.cr4;
+ hsave->save.rflags = kvm_get_rflags(vcpu);
+ hsave->save.rip = kvm_rip_read(vcpu);
hsave->save.rsp = vmcb->save.rsp;
hsave->save.rax = vmcb->save.rax;
if (npt_enabled)
hsave->save.cr3 = vmcb->save.cr3;
else
- hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
+ hsave->save.cr3 = kvm_read_cr3(vcpu);
copy_vmcb_control_area(&hsave->control, &vmcb->control);
@@ -556,15 +557,15 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
out_exit_err:
svm->nested.nested_run_pending = 0;
- svm->vmcb->control.exit_code = SVM_EXIT_ERR;
- svm->vmcb->control.exit_code_hi = 0;
- svm->vmcb->control.exit_info_1 = 0;
- svm->vmcb->control.exit_info_2 = 0;
+ vmcb->control.exit_code = SVM_EXIT_ERR;
+ vmcb->control.exit_code_hi = 0;
+ vmcb->control.exit_info_1 = 0;
+ vmcb->control.exit_info_2 = 0;
nested_svm_vmexit(svm);
out:
- kvm_vcpu_unmap(&svm->vcpu, &map, true);
+ kvm_vcpu_unmap(vcpu, &map, true);
return ret;
}
--
2.27.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 4/5 v3] Test: nSVM: Test MSR and IO bitmap address
2021-02-03 0:40 [PATCH 0/5 v3] KVM: nSVM: Check addresses of MSR bitmap and IO bitmap tables on vmrun of nested guests Krish Sadhukhan
` (2 preceding siblings ...)
2021-02-03 0:40 ` [PATCH 3/5 v3] KVM: nSVM: Cleanup in nested_svm_vmrun() Krish Sadhukhan
@ 2021-02-03 0:40 ` Krish Sadhukhan
2021-02-03 0:40 ` [PATCH 5/5 v3] Test: SVM: Use ALIGN macro when aligning 'io_bitmap_area' Krish Sadhukhan
4 siblings, 0 replies; 7+ messages in thread
From: Krish Sadhukhan @ 2021-02-03 0:40 UTC (permalink / raw)
To: kvm; +Cc: pbonzini, jmattson, seanjc
According to section "Canonicalization and Consistency Checks" in APM vol 2,
the following guest state is illegal:
"The MSR or IOIO intercept tables extend to a physical address that
is greater than or equal to the maximum supported physical address."
Also test that these addresses are aligned on page boundary.
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
x86/svm_tests.c | 38 ++++++++++++++++++++++++++++++++++++++
1 file changed, 38 insertions(+)
diff --git a/x86/svm_tests.c b/x86/svm_tests.c
index dc86efd..929a3e1 100644
--- a/x86/svm_tests.c
+++ b/x86/svm_tests.c
@@ -2304,6 +2304,43 @@ static void test_dr(void)
vmcb->save.dr7 = dr_saved;
}
+extern u8 msr_bitmap_area[];
+extern u8 io_bitmap_area[];
+
+#define TEST_BITMAP_ADDR(prot_type, bitmap_addr, msg) { \
+ vmcb->control.intercept = 1ULL << prot_type; \
+ addr_unalign = virt_to_phys(bitmap_addr); \
+ if (prot_type == INTERCEPT_MSR_PROT) \
+ vmcb->control.msrpm_base_pa = addr_unalign; \
+ else \
+ vmcb->control.iopm_base_pa = addr_unalign; \
+ report(svm_vmrun() == SVM_EXIT_ERR, "Test %s address: %lx", msg,\
+ addr_unalign); \
+ vmcb->control.msrpm_base_pa = addr_spill_beyond_ram; \
+ report(svm_vmrun() == SVM_EXIT_ERR, "Test %s address: %lx", msg,\
+ addr_spill_beyond_ram); \
+} \
+
+/*
+ * If the MSR or IOIO intercept table extends to a physical address that
+ * is greater than or equal to the maximum supported physical address, the
+ * guest state is illegal.
+ *
+ * [ APM vol 2]
+ */
+static void test_msrpm_iopm_bitmap_addrs(void)
+{
+ u64 addr_unalign;
+ u64 addr_spill_beyond_ram =
+ (u64)(((u64)1 << cpuid_maxphyaddr()) - 4096);
+
+ /* MSR bitmap address */
+ TEST_BITMAP_ADDR(INTERCEPT_MSR_PROT, msr_bitmap_area, "MSRPM");
+
+ /* MSR bitmap address */
+ TEST_BITMAP_ADDR(INTERCEPT_IOIO_PROT, io_bitmap_area, "IOPM");
+}
+
static void svm_guest_state_test(void)
{
test_set_guest(basic_guest_main);
@@ -2313,6 +2350,7 @@ static void svm_guest_state_test(void)
test_cr3();
test_cr4();
test_dr();
+ test_msrpm_iopm_bitmap_addrs();
}
struct svm_test svm_tests[] = {
--
2.27.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH 5/5 v3] Test: SVM: Use ALIGN macro when aligning 'io_bitmap_area'
2021-02-03 0:40 [PATCH 0/5 v3] KVM: nSVM: Check addresses of MSR bitmap and IO bitmap tables on vmrun of nested guests Krish Sadhukhan
` (3 preceding siblings ...)
2021-02-03 0:40 ` [PATCH 4/5 v3] Test: nSVM: Test MSR and IO bitmap address Krish Sadhukhan
@ 2021-02-03 0:40 ` Krish Sadhukhan
4 siblings, 0 replies; 7+ messages in thread
From: Krish Sadhukhan @ 2021-02-03 0:40 UTC (permalink / raw)
To: kvm; +Cc: pbonzini, jmattson, seanjc
Since the macro is available and we already use it for MSR bitmap table, use
it for aligning IO bitmap table also.
Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
---
x86/svm.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/x86/svm.c b/x86/svm.c
index a1808c7..846cf2a 100644
--- a/x86/svm.c
+++ b/x86/svm.c
@@ -298,7 +298,7 @@ static void setup_svm(void)
wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_SVME);
wrmsr(MSR_EFER, rdmsr(MSR_EFER) | EFER_NX);
- io_bitmap = (void *) (((ulong)io_bitmap_area + 4095) & ~4095);
+ io_bitmap = (void *) ALIGN((ulong)io_bitmap_area, PAGE_SIZE);
msr_bitmap = (void *) ALIGN((ulong)msr_bitmap_area, PAGE_SIZE);
--
2.27.0
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH 2/5 v3] nSVM: Check addresses of MSR and IO bitmap
2021-02-03 0:40 ` [PATCH 2/5 v3] nSVM: Check addresses of MSR and IO bitmap Krish Sadhukhan
@ 2021-02-03 8:23 ` Paolo Bonzini
0 siblings, 0 replies; 7+ messages in thread
From: Paolo Bonzini @ 2021-02-03 8:23 UTC (permalink / raw)
To: Krish Sadhukhan, kvm; +Cc: jmattson, seanjc
On 03/02/21 01:40, Krish Sadhukhan wrote:
> According to section "Canonicalization and Consistency Checks" in APM vol 2,
> the following guest state is illegal:
>
> "The MSR or IOIO intercept tables extend to a physical address that
> is greater than or equal to the maximum supported physical address."
>
> Also check that these addresses are aligned on page boundary.
>
> Signed-off-by: Krish Sadhukhan <krish.sadhukhan@oracle.com>
> ---
> arch/x86/kvm/svm/nested.c | 20 ++++++++++++++------
> 1 file changed, 14 insertions(+), 6 deletions(-)
>
> diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
> index 7a605ad8254d..caf285e643db 100644
> --- a/arch/x86/kvm/svm/nested.c
> +++ b/arch/x86/kvm/svm/nested.c
> @@ -214,7 +214,8 @@ static bool svm_get_nested_state_pages(struct kvm_vcpu *vcpu)
> return true;
> }
>
> -static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
> +static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu,
> + struct vmcb_control_area *control)
> {
> if ((vmcb_is_intercept(control, INTERCEPT_VMRUN)) == 0)
> return false;
> @@ -226,10 +227,17 @@ static bool nested_vmcb_check_controls(struct vmcb_control_area *control)
> !npt_enabled)
> return false;
>
> + if (!page_address_valid(vcpu, control->msrpm_base_pa +
> + MSRPM_ALLOC_ORDER * PAGE_SIZE))
> + return false;
> + if (!page_address_valid(vcpu, control->iopm_base_pa +
> + IOPM_ALLOC_ORDER * PAGE_SIZE))
There are four problems:
1) The value does not have to be page-aligned
2) you also have an off-by-one here, the value to be checked is the last
byte of the previous page
3) ORDER is a shift count not a number of pages
4) there could be an overflow
1-3 can be fixed by something like this:
if (!page_address_valid(vcpu,
PAGE_ALIGN(control->xyz_pa) +
((PAGE_SIZE << XYZ_ALLOC_ORDER) - 1)));
but it's even better to extract everything to a new function and not use
page_address_valid at all.
static inline nested_check_pa(struct kvm_vcpu *vcpu, uint64_t pa,
unsigned int order)
{
uint64_t last_pa = PAGE_ALIGN(pa) + (PAGE_SIZE << order) - 1;
return last_pa > pa && !(last_pa >> cpuid_maxphyaddr(vcpu));
}
Paolo
> + return false;
> +
> return true;
> }
>
> -static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
> +static bool nested_vmcb_checks(struct kvm_vcpu *vcpu, struct vmcb *vmcb12)
> {
> bool vmcb12_lma;
>
> @@ -258,10 +266,10 @@ static bool nested_vmcb_checks(struct vcpu_svm *svm, struct vmcb *vmcb12)
> (vmcb12->save.cr3 & MSR_CR3_LONG_MBZ_MASK))
> return false;
> }
> - if (!kvm_is_valid_cr4(&svm->vcpu, vmcb12->save.cr4))
> + if (!kvm_is_valid_cr4(vcpu, vmcb12->save.cr4))
> return false;
>
> - return nested_vmcb_check_controls(&vmcb12->control);
> + return nested_vmcb_check_controls(vcpu, &vmcb12->control);
> }
>
> static void load_nested_vmcb_control(struct vcpu_svm *svm,
> @@ -488,7 +496,7 @@ int nested_svm_vmrun(struct vcpu_svm *svm)
> if (WARN_ON_ONCE(!svm->nested.initialized))
> return -EINVAL;
>
> - if (!nested_vmcb_checks(svm, vmcb12)) {
> + if (!nested_vmcb_checks(&svm->vcpu, vmcb12)) {
> vmcb12->control.exit_code = SVM_EXIT_ERR;
> vmcb12->control.exit_code_hi = 0;
> vmcb12->control.exit_info_1 = 0;
> @@ -1176,7 +1184,7 @@ static int svm_set_nested_state(struct kvm_vcpu *vcpu,
> goto out_free;
>
> ret = -EINVAL;
> - if (!nested_vmcb_check_controls(ctl))
> + if (!nested_vmcb_check_controls(vcpu, ctl))
> goto out_free;
>
> /*
>
^ permalink raw reply [flat|nested] 7+ messages in thread
end of thread, other threads:[~2021-02-03 8:24 UTC | newest]
Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-02-03 0:40 [PATCH 0/5 v3] KVM: nSVM: Check addresses of MSR bitmap and IO bitmap tables on vmrun of nested guests Krish Sadhukhan
2021-02-03 0:40 ` [PATCH 1/5 v3] KVM: SVM: Move IOPM_ALLOC_ORDER and MSRPM_ALLOC_ORDER #defines to svm.h Krish Sadhukhan
2021-02-03 0:40 ` [PATCH 2/5 v3] nSVM: Check addresses of MSR and IO bitmap Krish Sadhukhan
2021-02-03 8:23 ` Paolo Bonzini
2021-02-03 0:40 ` [PATCH 3/5 v3] KVM: nSVM: Cleanup in nested_svm_vmrun() Krish Sadhukhan
2021-02-03 0:40 ` [PATCH 4/5 v3] Test: nSVM: Test MSR and IO bitmap address Krish Sadhukhan
2021-02-03 0:40 ` [PATCH 5/5 v3] Test: SVM: Use ALIGN macro when aligning 'io_bitmap_area' Krish Sadhukhan
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.