All of lore.kernel.org
 help / color / mirror / Atom feed
From: James Houghton <jthoughton@google.com>
To: Anish Moorthy <amoorthy@google.com>
Cc: pbonzini@redhat.com, maz@kernel.org, oliver.upton@linux.dev,
	seanjc@google.com, bgardon@google.com, dmatlack@google.com,
	ricarkol@google.com, axelrasmussen@google.com, peterx@redhat.com,
	kvm@vger.kernel.org, kvmarm@lists.linux.dev
Subject: Re: [PATCH v3 22/22] KVM: selftests: Handle memory fault exits in demand_paging_test
Date: Thu, 27 Apr 2023 08:48:08 -0700	[thread overview]
Message-ID: <CADrL8HUQWFtY+2XgT0f6fBhWrdTCvQ1n-99k9oV_5UiFnamv9Q@mail.gmail.com> (raw)
In-Reply-To: <20230412213510.1220557-23-amoorthy@google.com>

On Wed, Apr 12, 2023 at 2:35 PM Anish Moorthy <amoorthy@google.com> wrote:
>
> Demonstrate a (very basic) scheme for supporting memory fault exits.
>
> From the vCPU threads:
> 1. Simply issue UFFDIO_COPY/CONTINUEs in response to memory fault exits,
>    with the purpose of establishing the absent mappings. Do so with
>    wake_waiters=false to avoid serializing on the userfaultfd wait queue
>    locks.
>
> 2. When the UFFDIO_COPY/CONTINUE in (1) fails with EEXIST,
>    assume that the mapping was already established but is currently
>    absent [A] and attempt to populate it using MADV_POPULATE_WRITE.
>
> Issue UFFDIO_COPY/CONTINUEs from the reader threads as well, but with
> wake_waiters=true to ensure that any threads sleeping on the uffd are
> eventually woken up.
>
> A real VMM would track whether it had already COPY/CONTINUEd pages (eg,
> via a bitmap) to avoid calls destined to EEXIST. However, even the
> naive approach is enough to demonstrate the performance advantages of
> KVM_EXIT_MEMORY_FAULT.
>
> [A] In reality it is much likelier that the vCPU thread simply lost a
>     race to establish the mapping for the page.
>
> Signed-off-by: Anish Moorthy <amoorthy@google.com>
> Acked-by: James Houghton <jthoughton@google.com>
> ---
>  .../selftests/kvm/demand_paging_test.c        | 209 +++++++++++++-----
>  1 file changed, 155 insertions(+), 54 deletions(-)
>
> diff --git a/tools/testing/selftests/kvm/demand_paging_test.c b/tools/testing/selftests/kvm/demand_paging_test.c
> index e84dde345edbc..668bd63d944e7 100644
> --- a/tools/testing/selftests/kvm/demand_paging_test.c
> +++ b/tools/testing/selftests/kvm/demand_paging_test.c
> @@ -15,6 +15,7 @@
>  #include <time.h>
>  #include <pthread.h>
>  #include <linux/userfaultfd.h>
> +#include <sys/mman.h>
>  #include <sys/syscall.h>
>
>  #include "kvm_util.h"
> @@ -31,6 +32,57 @@ static uint64_t guest_percpu_mem_size = DEFAULT_PER_VCPU_MEM_SIZE;
>  static size_t demand_paging_size;
>  static char *guest_data_prototype;
>
> +static int num_uffds;
> +static size_t uffd_region_size;
> +static struct uffd_desc **uffd_descs;
> +/*
> + * Delay when demand paging is performed through userfaultfd or directly by
> + * vcpu_worker in the case of a KVM_EXIT_MEMORY_FAULT.
> + */
> +static useconds_t uffd_delay;
> +static int uffd_mode;
> +
> +
> +static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t hva,
> +                                                                       bool is_vcpu);
> +
> +static void madv_write_or_err(uint64_t gpa)
> +{
> +       int r;
> +       void *hva = addr_gpa2hva(memstress_args.vm, gpa);
> +
> +       r = madvise(hva, demand_paging_size, MADV_POPULATE_WRITE);
> +       TEST_ASSERT(r == 0,
> +                               "MADV_POPULATE_WRITE on hva 0x%lx (gpa 0x%lx) fail, errno %i\n",
> +                               (uintptr_t) hva, gpa, errno);
> +}
> +
> +static void ready_page(uint64_t gpa)
> +{
> +       int r, uffd;
> +
> +       /*
> +        * This test only registers memslot 1 w/ userfaultfd. Any accesses outside
> +        * the registered ranges should fault in the physical pages through
> +        * MADV_POPULATE_WRITE.
> +        */
> +       if ((gpa < memstress_args.gpa)
> +               || (gpa >= memstress_args.gpa + memstress_args.size)) {
> +               madv_write_or_err(gpa);
> +       } else {
> +               if (uffd_delay)
> +                       usleep(uffd_delay);
> +
> +               uffd = uffd_descs[(gpa - memstress_args.gpa) / uffd_region_size]->uffd;
> +
> +               r = handle_uffd_page_request(uffd_mode, uffd,
> +                                       (uint64_t) addr_gpa2hva(memstress_args.vm, gpa), true);
> +
> +               if (r == EEXIST)
> +                       madv_write_or_err(gpa);
> +       }
> +}
> +
>  static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
>  {
>         struct kvm_vcpu *vcpu = vcpu_args->vcpu;
> @@ -42,25 +94,36 @@ static void vcpu_worker(struct memstress_vcpu_args *vcpu_args)
>
>         clock_gettime(CLOCK_MONOTONIC, &start);
>
> -       /* Let the guest access its memory */
> -       ret = _vcpu_run(vcpu);
> -       TEST_ASSERT(ret == 0, "vcpu_run failed: %d\n", ret);
> -       if (get_ucall(vcpu, NULL) != UCALL_SYNC) {
> -               TEST_ASSERT(false,
> -                           "Invalid guest sync status: exit_reason=%s\n",
> -                           exit_reason_str(run->exit_reason));
> -       }
> +       while (true) {
> +               /* Let the guest access its memory */
> +               ret = _vcpu_run(vcpu);
> +               TEST_ASSERT(ret == 0
> +                                       || (errno == EFAULT
> +                                               && run->exit_reason == KVM_EXIT_MEMORY_FAULT),
> +                                       "vcpu_run failed: %d\n", ret);
> +               if (ret != 0 && get_ucall(vcpu, NULL) != UCALL_SYNC) {
> +
> +                       if (run->exit_reason == KVM_EXIT_MEMORY_FAULT) {
> +                               ready_page(run->memory_fault.gpa);
> +                               continue;
> +                       }
> +
> +                       TEST_ASSERT(false,
> +                                               "Invalid guest sync status: exit_reason=%s\n",
> +                                               exit_reason_str(run->exit_reason));
> +               }
>
> -       ts_diff = timespec_elapsed(start);
> -       PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_idx,
> -                      ts_diff.tv_sec, ts_diff.tv_nsec);
> +               ts_diff = timespec_elapsed(start);
> +               PER_VCPU_DEBUG("vCPU %d execution time: %ld.%.9lds\n", vcpu_idx,
> +                                          ts_diff.tv_sec, ts_diff.tv_nsec);
> +               break;
> +       }
>  }
>
> -static int handle_uffd_page_request(int uffd_mode, int uffd,
> -               struct uffd_msg *msg)
> +static int handle_uffd_page_request(int uffd_mode, int uffd, uint64_t hva,
> +                                                                       bool is_vcpu)
>  {
>         pid_t tid = syscall(__NR_gettid);
> -       uint64_t addr = msg->arg.pagefault.address;
>         struct timespec start;
>         struct timespec ts_diff;
>         int r;
> @@ -71,56 +134,78 @@ static int handle_uffd_page_request(int uffd_mode, int uffd,
>                 struct uffdio_copy copy;
>
>                 copy.src = (uint64_t)guest_data_prototype;
> -               copy.dst = addr;
> +               copy.dst = hva;
>                 copy.len = demand_paging_size;
> -               copy.mode = 0;
> +               copy.mode = UFFDIO_COPY_MODE_DONTWAKE;
>
> -               r = ioctl(uffd, UFFDIO_COPY, &copy);
>                 /*
> -                * With multiple vCPU threads fault on a single page and there are
> -                * multiple readers for the UFFD, at least one of the UFFDIO_COPYs
> -                * will fail with EEXIST: handle that case without signaling an
> -                * error.
> +                * With multiple vCPU threads and at least one of multiple reader threads
> +                * or vCPU memory faults, multiple vCPUs accessing an absent page will
> +                * almost certainly cause some thread doing the UFFDIO_COPY here to get
> +                * EEXIST: make sure to allow that case.
>                  */
> -               if (r == -1 && errno != EEXIST) {
> -                       pr_info("Failed UFFDIO_COPY in 0x%lx from thread %d, errno = %d\n",
> -                                       addr, tid, errno);
> -                       return r;
> -               }
> +               r = ioctl(uffd, UFFDIO_COPY, &copy);
> +               TEST_ASSERT(r == 0 || errno == EEXIST,
> +                       "Thread 0x%x failed UFFDIO_COPY on hva 0x%lx, errno = %d",
> +                       gettid(), hva, errno);
>         } else if (uffd_mode == UFFDIO_REGISTER_MODE_MINOR) {
> +               /* The comments in the UFFDIO_COPY branch also apply here. */
>                 struct uffdio_continue cont = {0};
>
> -               cont.range.start = addr;
> +               cont.range.start = hva;
>                 cont.range.len = demand_paging_size;
> +               cont.mode = UFFDIO_CONTINUE_MODE_DONTWAKE;
>
>                 r = ioctl(uffd, UFFDIO_CONTINUE, &cont);
> -               /* See the note about EEXISTs in the UFFDIO_COPY branch. */
> -               if (r == -1 && errno != EEXIST) {
> -                       pr_info("Failed UFFDIO_CONTINUE in 0x%lx, thread %d, errno = %d\n",
> -                                       addr, tid, errno);
> -                       return r;
> -               }
> +               TEST_ASSERT(r == 0 || errno == EEXIST,
> +                       "Thread 0x%x failed UFFDIO_CONTINUE on hva 0x%lx, errno = %d",
> +                       gettid(), hva, errno);
>         } else {
>                 TEST_FAIL("Invalid uffd mode %d", uffd_mode);
>         }
>
> +       /*
> +        * If the above UFFDIO_COPY/CONTINUE fails with EEXIST, it will do so without
> +        * waking threads waiting on the UFFD: make sure that happens here.
> +        */

This comment sounds a little bit strange because we're always passing
MODE_DONTWAKE to UFFDIO_COPY/CONTINUE.

You *could* update the comment to reflect what this test is really
doing, but I think you actually probably want the test to do what the
comment suggests. That is, I think the code you should write should:
1. DONTWAKE if is_vcpu
2. UFFDIO_WAKE if !is_vcpu && UFFDIO_COPY/CONTINUE failed (with
EEXIST, but we would have already crashed if it weren't).

This way, we can save a syscall with almost no added complexity, and
the existing userfaultfd tests remain basically untouched (i.e., no
longer always need an explicit UFFDIO_WAKE).

Thanks!

> +       if (!is_vcpu) {
> +               struct uffdio_range range = {
> +                       .start = hva,
> +                       .len = demand_paging_size
> +               };
> +               r = ioctl(uffd, UFFDIO_WAKE, &range);
> +               TEST_ASSERT(
> +                       r == 0,
> +                       "Thread 0x%x failed UFFDIO_WAKE on hva 0x%lx, errno = %d",
> +                       gettid(), hva, errno);
> +       }
> +
>         ts_diff = timespec_elapsed(start);
>
>         PER_PAGE_DEBUG("UFFD page-in %d \t%ld ns\n", tid,
>                        timespec_to_ns(ts_diff));
>         PER_PAGE_DEBUG("Paged in %ld bytes at 0x%lx from thread %d\n",
> -                      demand_paging_size, addr, tid);
> +                      demand_paging_size, hva, tid);
>
>         return 0;
>  }
>
> +static int handle_uffd_page_request_from_uffd(int uffd_mode, int uffd,
> +                               struct uffd_msg *msg)
> +{
> +       TEST_ASSERT(msg->event == UFFD_EVENT_PAGEFAULT,
> +               "Received uffd message with event %d != UFFD_EVENT_PAGEFAULT",
> +               msg->event);
> +       return handle_uffd_page_request(uffd_mode, uffd,
> +                                       msg->arg.pagefault.address, false);
> +}
> +
>  struct test_params {
> -       int uffd_mode;
>         bool single_uffd;
> -       useconds_t uffd_delay;
>         int readers_per_uffd;
>         enum vm_mem_backing_src_type src_type;
>         bool partition_vcpu_memory_access;
> +       bool memfault_exits;
>  };
>
>  static void prefault_mem(void *alias, uint64_t len)
> @@ -137,15 +222,26 @@ static void prefault_mem(void *alias, uint64_t len)
>  static void run_test(enum vm_guest_mode mode, void *arg)
>  {
>         struct test_params *p = arg;
> -       struct uffd_desc **uffd_descs = NULL;
>         struct timespec start;
>         struct timespec ts_diff;
>         struct kvm_vm *vm;
> -       int i, num_uffds = 0;
> -       uint64_t uffd_region_size;
> +       int i;
> +       uint32_t slot_flags = 0;
> +       bool uffd_memfault_exits = uffd_mode && p->memfault_exits;
> +
> +       if (uffd_memfault_exits) {
> +               TEST_ASSERT(kvm_has_cap(KVM_CAP_ABSENT_MAPPING_FAULT) > 0,
> +                                       "KVM does not have KVM_CAP_ABSENT_MAPPING_FAULT");
> +               slot_flags = KVM_MEM_ABSENT_MAPPING_FAULT;
> +       }
>
>         vm = memstress_create_vm(mode, nr_vcpus, guest_percpu_mem_size,
> -                               1, 0, p->src_type, p->partition_vcpu_memory_access);
> +                               1, slot_flags, p->src_type, p->partition_vcpu_memory_access);
> +
> +       if (uffd_memfault_exits) {
> +               vm_enable_cap(vm,
> +                                         KVM_CAP_MEMORY_FAULT_INFO, KVM_MEMORY_FAULT_INFO_ENABLE);
> +       }
>
>         demand_paging_size = get_backing_src_pagesz(p->src_type);
>
> @@ -154,12 +250,12 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>                     "Failed to allocate buffer for guest data pattern");
>         memset(guest_data_prototype, 0xAB, demand_paging_size);
>
> -       if (p->uffd_mode) {
> +       if (uffd_mode) {
>                 num_uffds = p->single_uffd ? 1 : nr_vcpus;
>                 uffd_region_size = nr_vcpus * guest_percpu_mem_size / num_uffds;
>
>                 uffd_descs = malloc(num_uffds * sizeof(struct uffd_desc *));
> -               TEST_ASSERT(uffd_descs, "Memory allocation failed");
> +               TEST_ASSERT(uffd_descs, "Failed to allocate memory of uffd descriptors");
>
>                 for (i = 0; i < num_uffds; i++) {
>                         struct memstress_vcpu_args *vcpu_args;
> @@ -179,10 +275,10 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>                          * requests.
>                          */
>                         uffd_descs[i] = uffd_setup_demand_paging(
> -                               p->uffd_mode, p->uffd_delay, vcpu_hva,
> +                               uffd_mode, uffd_delay, vcpu_hva,
>                                 uffd_region_size,
>                                 p->readers_per_uffd,
> -                               &handle_uffd_page_request);
> +                               &handle_uffd_page_request_from_uffd);
>                 }
>         }
>
> @@ -196,7 +292,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>         ts_diff = timespec_elapsed(start);
>         pr_info("All vCPU threads joined\n");
>
> -       if (p->uffd_mode) {
> +       if (uffd_mode) {
>                 /* Tell the user fault fd handler threads to quit */
>                 for (i = 0; i < num_uffds; i++)
>                         uffd_stop_demand_paging(uffd_descs[i]);
> @@ -211,7 +307,7 @@ static void run_test(enum vm_guest_mode mode, void *arg)
>         memstress_destroy_vm(vm);
>
>         free(guest_data_prototype);
> -       if (p->uffd_mode)
> +       if (uffd_mode)
>                 free(uffd_descs);
>  }
>
> @@ -220,7 +316,7 @@ static void help(char *name)
>         puts("");
>         printf("usage: %s [-h] [-m vm_mode] [-u uffd_mode] [-a]\n"
>                    "          [-d uffd_delay_usec] [-r readers_per_uffd] [-b memory]\n"
> -                  "          [-s type] [-v vcpus] [-o]\n", name);
> +                  "          [-w] [-s type] [-v vcpus] [-o]\n", name);
>         guest_modes_help();
>         printf(" -u: use userfaultfd to handle vCPU page faults. Mode is a\n"
>                "     UFFD registration mode: 'MISSING' or 'MINOR'.\n");
> @@ -231,6 +327,7 @@ static void help(char *name)
>                "     FD handler to simulate demand paging\n"
>                "     overheads. Ignored without -u.\n");
>         printf(" -r: Set the number of reader threads per uffd.\n");
> +       printf(" -w: Enable kvm cap for memory fault exits.\n");
>         printf(" -b: specify the size of the memory region which should be\n"
>                "     demand paged by each vCPU. e.g. 10M or 3G.\n"
>                "     Default: 1G\n");
> @@ -250,29 +347,30 @@ int main(int argc, char *argv[])
>                 .partition_vcpu_memory_access = true,
>                 .readers_per_uffd = 1,
>                 .single_uffd = false,
> +               .memfault_exits = false,
>         };
>         int opt;
>
>         guest_modes_append_default();
>
> -       while ((opt = getopt(argc, argv, "ahom:u:d:b:s:v:r:")) != -1) {
> +       while ((opt = getopt(argc, argv, "ahowm:u:d:b:s:v:r:")) != -1) {
>                 switch (opt) {
>                 case 'm':
>                         guest_modes_cmdline(optarg);
>                         break;
>                 case 'u':
>                         if (!strcmp("MISSING", optarg))
> -                               p.uffd_mode = UFFDIO_REGISTER_MODE_MISSING;
> +                               uffd_mode = UFFDIO_REGISTER_MODE_MISSING;
>                         else if (!strcmp("MINOR", optarg))
> -                               p.uffd_mode = UFFDIO_REGISTER_MODE_MINOR;
> -                       TEST_ASSERT(p.uffd_mode, "UFFD mode must be 'MISSING' or 'MINOR'.");
> +                               uffd_mode = UFFDIO_REGISTER_MODE_MINOR;
> +                       TEST_ASSERT(uffd_mode, "UFFD mode must be 'MISSING' or 'MINOR'.");
>                         break;
>                 case 'a':
>                         p.single_uffd = true;
>                         break;
>                 case 'd':
> -                       p.uffd_delay = strtoul(optarg, NULL, 0);
> -                       TEST_ASSERT(p.uffd_delay >= 0, "A negative UFFD delay is not supported.");
> +                       uffd_delay = strtoul(optarg, NULL, 0);
> +                       TEST_ASSERT(uffd_delay >= 0, "A negative UFFD delay is not supported.");
>                         break;
>                 case 'b':
>                         guest_percpu_mem_size = parse_size(optarg);
> @@ -295,6 +393,9 @@ int main(int argc, char *argv[])
>                                                 "Invalid number of readers per uffd %d: must be >=1",
>                                                 p.readers_per_uffd);
>                         break;
> +               case 'w':
> +                       p.memfault_exits = true;
> +                       break;
>                 case 'h':
>                 default:
>                         help(argv[0]);
> @@ -302,7 +403,7 @@ int main(int argc, char *argv[])
>                 }
>         }
>
> -       if (p.uffd_mode == UFFDIO_REGISTER_MODE_MINOR &&
> +       if (uffd_mode == UFFDIO_REGISTER_MODE_MINOR &&
>             !backing_src_is_shared(p.src_type)) {
>                 TEST_FAIL("userfaultfd MINOR mode requires shared memory; pick a different -s");
>         }
> --
> 2.40.0.577.gac1e443424-goog
>

  parent reply	other threads:[~2023-04-27 15:48 UTC|newest]

Thread overview: 105+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-04-12 21:34 [PATCH v3 00/22] Improve scalability of KVM + userfaultfd live migration via annotated memory faults Anish Moorthy
2023-04-12 21:34 ` [PATCH v3 01/22] KVM: selftests: Allow many vCPUs and reader threads per UFFD in demand paging test Anish Moorthy
2023-04-19 13:51   ` Hoo Robert
2023-04-20 17:55     ` Anish Moorthy
2023-04-21 12:15       ` Robert Hoo
2023-04-21 16:21         ` Anish Moorthy
2023-04-12 21:34 ` [PATCH v3 02/22] KVM: selftests: Use EPOLL in userfaultfd_util reader threads and signal errors via TEST_ASSERT Anish Moorthy
2023-04-19 13:36   ` Hoo Robert
2023-04-19 23:26     ` Anish Moorthy
2023-04-12 21:34 ` [PATCH v3 03/22] KVM: Allow hva_pfn_fast() to resolve read-only faults Anish Moorthy
2023-04-12 21:34 ` [PATCH v3 04/22] KVM: x86: Set vCPU exit reason to KVM_EXIT_UNKNOWN at the start of KVM_RUN Anish Moorthy
2023-05-02 17:17   ` Anish Moorthy
2023-05-02 18:51     ` Sean Christopherson
2023-05-02 19:49       ` Anish Moorthy
2023-05-02 20:41         ` Sean Christopherson
2023-05-02 21:46           ` Anish Moorthy
2023-05-02 22:31             ` Sean Christopherson
2023-04-12 21:34 ` [PATCH v3 05/22] KVM: Add KVM_CAP_MEMORY_FAULT_INFO Anish Moorthy
2023-04-19 13:57   ` Hoo Robert
2023-04-20 18:09     ` Anish Moorthy
2023-04-21 12:28       ` Robert Hoo
2023-06-01 19:52   ` Oliver Upton
2023-06-01 20:30     ` Anish Moorthy
2023-06-01 21:29       ` Oliver Upton
2023-07-04 10:10   ` Kautuk Consul
2023-04-12 21:34 ` [PATCH v3 06/22] KVM: Add docstrings to __kvm_write_guest_page() and __kvm_read_guest_page() Anish Moorthy
2023-04-12 21:34 ` [PATCH v3 07/22] KVM: Annotate -EFAULTs from kvm_vcpu_write_guest_page() Anish Moorthy
2023-04-20 20:52   ` Peter Xu
2023-04-20 23:29     ` Anish Moorthy
2023-04-21 15:00       ` Peter Xu
2023-04-12 21:34 ` [PATCH v3 08/22] KVM: Annotate -EFAULTs from kvm_vcpu_read_guest_page() Anish Moorthy
2023-04-12 21:34 ` [PATCH v3 09/22] KVM: Annotate -EFAULTs from kvm_vcpu_map() Anish Moorthy
2023-04-20 20:53   ` Peter Xu
2023-04-20 23:34     ` Anish Moorthy
2023-04-21 14:58       ` Peter Xu
2023-04-12 21:34 ` [PATCH v3 10/22] KVM: x86: Annotate -EFAULTs from kvm_mmu_page_fault() Anish Moorthy
2023-04-12 21:34 ` [PATCH v3 11/22] KVM: x86: Annotate -EFAULTs from setup_vmgexit_scratch() Anish Moorthy
2023-04-12 21:35 ` [PATCH v3 12/22] KVM: x86: Annotate -EFAULTs from kvm_handle_page_fault() Anish Moorthy
2023-04-12 21:35 ` [PATCH v3 13/22] KVM: x86: Annotate -EFAULTs from kvm_hv_get_assist_page() Anish Moorthy
2023-04-12 21:35 ` [PATCH v3 14/22] KVM: x86: Annotate -EFAULTs from kvm_pv_clock_pairing() Anish Moorthy
2023-04-12 21:35 ` [PATCH v3 15/22] KVM: x86: Annotate -EFAULTs from direct_map() Anish Moorthy
2023-04-12 21:35 ` [PATCH v3 16/22] KVM: x86: Annotate -EFAULTs from kvm_handle_error_pfn() Anish Moorthy
2023-04-12 21:35 ` [PATCH v3 17/22] KVM: Introduce KVM_CAP_ABSENT_MAPPING_FAULT without implementation Anish Moorthy
2023-04-19 14:00   ` Hoo Robert
2023-04-20 18:23     ` Anish Moorthy
2023-04-24 21:02   ` Sean Christopherson
2023-06-01 16:04     ` Oliver Upton
2023-06-01 18:19   ` Oliver Upton
2023-06-01 18:59     ` Sean Christopherson
2023-06-01 19:29       ` Oliver Upton
2023-06-01 19:34         ` Sean Christopherson
2023-04-12 21:35 ` [PATCH v3 18/22] KVM: x86: Implement KVM_CAP_ABSENT_MAPPING_FAULT Anish Moorthy
2023-04-12 21:35 ` [PATCH v3 19/22] KVM: arm64: Annotate (some) -EFAULTs from user_mem_abort() Anish Moorthy
2023-04-12 21:35 ` [PATCH v3 20/22] KVM: arm64: Implement KVM_CAP_ABSENT_MAPPING_FAULT Anish Moorthy
2023-04-12 21:35 ` [PATCH v3 21/22] KVM: selftests: Add memslot_flags parameter to memstress_create_vm() Anish Moorthy
2023-04-12 21:35 ` [PATCH v3 22/22] KVM: selftests: Handle memory fault exits in demand_paging_test Anish Moorthy
2023-04-19 14:09   ` Hoo Robert
2023-04-19 16:40     ` Anish Moorthy
2023-04-20 22:47     ` Anish Moorthy
2023-04-27 15:48   ` James Houghton [this message]
2023-05-01 18:01     ` Anish Moorthy
2023-04-19 19:55 ` [PATCH v3 00/22] Improve scalability of KVM + userfaultfd live migration via annotated memory faults Peter Xu
2023-04-19 20:15   ` Axel Rasmussen
2023-04-19 21:05     ` Peter Xu
2023-04-19 21:53       ` Anish Moorthy
2023-04-20 21:29         ` Peter Xu
2023-04-21 16:58           ` Anish Moorthy
2023-04-21 17:39           ` Nadav Amit
2023-04-24 17:54             ` Anish Moorthy
2023-04-24 19:44               ` Nadav Amit
2023-04-24 20:35                 ` Sean Christopherson
2023-04-24 23:47                   ` Nadav Amit
2023-04-25  0:26                     ` Sean Christopherson
2023-04-25  0:37                       ` Nadav Amit
2023-04-25  0:15                 ` Anish Moorthy
2023-04-25  0:54                   ` Nadav Amit
2023-04-27 16:38                     ` James Houghton
2023-04-27 20:26                   ` Peter Xu
2023-05-03 19:45                     ` Anish Moorthy
2023-05-03 20:09                       ` Sean Christopherson
2023-05-03 21:18                       ` Peter Xu
2023-05-03 21:27                         ` Peter Xu
2023-05-03 21:42                           ` Sean Christopherson
2023-05-03 23:45                             ` Peter Xu
2023-05-04 19:09                               ` Peter Xu
2023-05-05 18:32                                 ` Anish Moorthy
2023-05-08  1:23                                   ` Peter Xu
2023-05-09 20:52                                     ` Anish Moorthy
2023-05-10 21:50                                       ` Peter Xu
2023-05-11 17:17                                         ` David Matlack
2023-05-11 17:33                                           ` Axel Rasmussen
2023-05-11 19:05                                             ` David Matlack
2023-05-11 19:45                                               ` Axel Rasmussen
2023-05-15 15:16                                                 ` Peter Xu
2023-05-15 15:05                                             ` Peter Xu
2023-05-15 17:16                                         ` Anish Moorthy
2023-05-05 20:05                               ` Nadav Amit
2023-05-08  1:12                                 ` Peter Xu
2023-04-20 23:42         ` Anish Moorthy
2023-05-09 22:19 ` David Matlack
2023-05-10 16:35   ` Anish Moorthy
2023-05-10 22:35   ` Sean Christopherson
2023-05-10 23:44     ` Anish Moorthy
2023-05-23 17:49     ` Anish Moorthy
2023-06-01 22:43       ` Oliver Upton

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CADrL8HUQWFtY+2XgT0f6fBhWrdTCvQ1n-99k9oV_5UiFnamv9Q@mail.gmail.com \
    --to=jthoughton@google.com \
    --cc=amoorthy@google.com \
    --cc=axelrasmussen@google.com \
    --cc=bgardon@google.com \
    --cc=dmatlack@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.linux.dev \
    --cc=maz@kernel.org \
    --cc=oliver.upton@linux.dev \
    --cc=pbonzini@redhat.com \
    --cc=peterx@redhat.com \
    --cc=ricarkol@google.com \
    --cc=seanjc@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.