All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH kvm-unit-tests] svm: Verify a pending interrupt queued before entering the guest is not lost
@ 2019-11-05 15:12 Cathy Avery
  2019-11-05 16:45 ` Paolo Bonzini
  0 siblings, 1 reply; 2+ messages in thread
From: Cathy Avery @ 2019-11-05 15:12 UTC (permalink / raw)
  To: kvm, pbonzini

This patch is based on Liran Aloni <liran.alon@oracle.com>
commit fd056f5b89ac for vmx.

The test configures VMCB to intercept external-interrupts and then
queues an interrupt by disabling interrupts and issue a self-IPI.
At this point, we enter guest and we expect CPU to immediately exit
guest on external-interrupt. To complete the test, we then re-enable
interrupts, verify interrupt is dispatched and re-enter guest to verify
it completes execution.

Signed-off-by: Cathy Avery <cavery@redhat.com>
---
 x86/svm.c | 93 +++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 93 insertions(+)

diff --git a/x86/svm.c b/x86/svm.c
index 4ddfaa4..6d2ab98 100644
--- a/x86/svm.c
+++ b/x86/svm.c
@@ -7,6 +7,8 @@
 #include "smp.h"
 #include "types.h"
 #include "alloc_page.h"
+#include "isr.h"
+#include "apic.h"
 
 #define SVM_EXIT_MAX_DR_INTERCEPT 0x3f
 
@@ -774,6 +776,13 @@ static int get_test_stage(struct test *test)
     return test->scratch;
 }
 
+static void set_test_stage(struct test *test, int s)
+{
+    barrier();
+    test->scratch = s;
+    barrier();
+}
+
 static void inc_test_stage(struct test *test)
 {
     barrier();
@@ -1292,6 +1301,88 @@ static bool lat_svm_insn_check(struct test *test)
             latclgi_min, clgi_sum / LATENCY_RUNS);
     return true;
 }
+
+bool pending_event_ipi_fired;
+bool pending_event_guest_run;
+
+static void pending_event_ipi_isr(isr_regs_t *regs)
+{
+    pending_event_ipi_fired = true;
+    eoi();
+}
+
+static void pending_event_prepare(struct test *test)
+{
+    int ipi_vector = 0xf1;
+
+    default_prepare(test);
+
+    pending_event_ipi_fired = false;
+
+    handle_irq(ipi_vector, pending_event_ipi_isr);
+
+    pending_event_guest_run = false;
+
+    test->vmcb->control.intercept |= (1ULL << INTERCEPT_INTR);
+    test->vmcb->control.int_ctl |= V_INTR_MASKING_MASK;
+
+    apic_icr_write(APIC_DEST_SELF | APIC_DEST_PHYSICAL |
+                  APIC_DM_FIXED | ipi_vector, 0);
+
+    set_test_stage(test, 0);
+}
+
+static void pending_event_test(struct test *test)
+{
+    pending_event_guest_run = true;
+}
+
+static bool pending_event_finished(struct test *test)
+{
+    switch (get_test_stage(test)) {
+    case 0:
+        if (test->vmcb->control.exit_code != SVM_EXIT_INTR) {
+            report("VMEXIT not due to pending interrupt. Exit reason 0x%x",
+            false, test->vmcb->control.exit_code);
+            return true;
+        }
+
+        test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
+        test->vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
+
+        if (pending_event_guest_run) {
+            report("Guest ran before host received IPI\n", false);
+            return true;
+        }
+
+        irq_enable();
+        asm volatile ("nop");
+        irq_disable();
+
+        if (!pending_event_ipi_fired) {
+            report("Pending interrupt not dispatched after IRQ enabled\n", false);
+            return true;
+        }
+        break;
+
+    case 1:
+        if (!pending_event_guest_run) {
+            report("Guest did not resume when no interrupt\n", false);
+            return true;
+        }
+        break;
+    }
+
+    inc_test_stage(test);
+
+    return get_test_stage(test) == 2;
+}
+
+static bool pending_event_check(struct test *test)
+{
+    return get_test_stage(test) == 2;
+}
+
 static struct test tests[] = {
     { "null", default_supported, default_prepare, null_test,
       default_finished, null_check },
@@ -1342,6 +1433,8 @@ static struct test tests[] = {
       latency_finished, latency_check },
     { "latency_svm_insn", default_supported, lat_svm_insn_prepare, null_test,
       lat_svm_insn_finished, lat_svm_insn_check },
+    { "pending_event", default_supported, pending_event_prepare,
+      pending_event_test, pending_event_finished, pending_event_check },
 };
 
 int main(int ac, char **av)
-- 
2.20.1


^ permalink raw reply related	[flat|nested] 2+ messages in thread

* Re: [PATCH kvm-unit-tests] svm: Verify a pending interrupt queued before entering the guest is not lost
  2019-11-05 15:12 [PATCH kvm-unit-tests] svm: Verify a pending interrupt queued before entering the guest is not lost Cathy Avery
@ 2019-11-05 16:45 ` Paolo Bonzini
  0 siblings, 0 replies; 2+ messages in thread
From: Paolo Bonzini @ 2019-11-05 16:45 UTC (permalink / raw)
  To: Cathy Avery, kvm

On 05/11/19 16:12, Cathy Avery wrote:
> +static void pending_event_prepare(struct test *test)
> +{
> +    int ipi_vector = 0xf1;
> +
> +    default_prepare(test);
> +

I think this test should call VMRUN with EFLAGS.IF=1.  The GIF/IF
interaction is as follows:

- in the host, IF is simply ignored with GIF=0.  VMRUN atomically sets
GIF to 1. Therefore, interrupts are wholly masked between CLGI and VMRUN
and between VMRUN and STGI, but not during VMRUN.  Currently, interrupts
are masked during all of VMRUN actually, because default_prepare clears
the interrupt flag.

- in the guest, if VINTR_MASKING=0 (in kvm-unit-tests: bit 24 of
int_ctl), IF governs whether host interrupts are delivered even while
the guest is running.

- if VINTR_MASKING=1, however, the pre-VMRUN value of IF (that's stored
in HF_HIF_MASK, let's call it HIF from now on) governs whether host
interrupts are delivered.

Actually, I think HIF=1 is a good default for most tests, so I would
start with something like

diff --git a/x86/svm.c b/x86/svm.c
index 4ddfaa4..7db3798 100644
--- a/x86/svm.c
+++ b/x86/svm.c
@@ -254,6 +255,7 @@ static void test_run(struct test *test,
     u64 vmcb_phys = virt_to_phys(vmcb);
     u64 guest_stack[10000];

+    irq_disable();
     test->vmcb = vmcb;
     test->prepare(test);
     vmcb->save.rip = (ulong)test_thunk;
@@ -269,7 +271,9 @@ static void test_run(struct test *test,
             "mov regs, %%r15\n\t"       // rax
             "mov %%r15, 0x1f8(%0)\n\t"
             LOAD_GPR_C
+            "sti \n\t"		// only used if V_INTR_MASKING=1
             "vmrun \n\t"
+            "cli \n\t"
             SAVE_GPR_C
             "mov 0x170(%0), %%r15\n\t"  // rflags
             "mov %%r15, regs+0x80\n\t"
@@ -284,6 +288,7 @@ static void test_run(struct test *test, struct vmcb
*vmcb)
 	tsc_end = rdtsc();
         ++test->exits;
     } while (!test->finished(test));
+    irq_enable();

     report("%s", test->succeeded(test), test->name);
 }
@@ -301,7 +306,6 @@ static bool default_supported(void)
 static void default_prepare(struct test *test)
 {
     vmcb_ident(test->vmcb);
-    cli();
 }

 static bool default_finished(struct test *test)

and see if it breaks something.  Then, it's probably useful to modify
your prepare callback to set IF=1 in the regs.rflags, since otherwise
the interrupts shouldn't be processed anyway.

> +        test->vmcb->control.intercept &= ~(1ULL << INTERCEPT_INTR);
> +        test->vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
> +

I think these adjustments are not needed.  However, you could add two
other tests:

- one which runs with V_INTR_MASKING=1 and HIF=0.  In that case, the
VMMCALL should be reached without a prior SVM_EXIT_INTR vmexit.

- one which runs with V_INTR_MASKING=0 and EFLAGS.IF=1.  In that case,
the VMMCALL should be reached without a prior SVM_EXIT_INTR vmexit, and
the interrupt should be delivered while in guest mode, before
pending_event_guest_run is set.

Thanks,

Paolo

^ permalink raw reply related	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2019-11-05 16:45 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2019-11-05 15:12 [PATCH kvm-unit-tests] svm: Verify a pending interrupt queued before entering the guest is not lost Cathy Avery
2019-11-05 16:45 ` Paolo Bonzini

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.