All of lore.kernel.org
 help / color / mirror / Atom feed
From: Ben Gardon <bgardon@google.com>
To: Sean Christopherson <sean.j.christopherson@intel.com>
Cc: Marc Zyngier <maz@kernel.org>,
	Paolo Bonzini <pbonzini@redhat.com>,
	Arnd Bergmann <arnd@arndb.de>, James Morse <james.morse@arm.com>,
	Julien Thierry <julien.thierry.kdev@gmail.com>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	Jim Mattson <jmattson@google.com>, Joerg Roedel <joro@8bytes.org>,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, linux-mips@vger.kernel.org,
	kvm@vger.kernel.org, linux-arch@vger.kernel.org,
	linux-kernel@vger.kernel.org, Peter Feiner <pfeiner@google.com>,
	Peter Shier <pshier@google.com>,
	Junaid Shahid <junaids@google.com>,
	Christoffer Dall <christoffer.dall@arm.com>
Subject: Re: [PATCH v2 14/21] KVM: Move x86's version of struct kvm_mmu_memory_cache to common code
Date: Wed, 24 Jun 2020 11:08:08 -0700	[thread overview]
Message-ID: <CANgfPd_K8PhM26T3GB7BFoDNTLCi+OcYp6DGhXuJcxKMwvZrFg@mail.gmail.com> (raw)
In-Reply-To: <20200622200822.4426-15-sean.j.christopherson@intel.com>

On Mon, Jun 22, 2020 at 1:09 PM Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> Move x86's 'struct kvm_mmu_memory_cache' to common code in anticipation
> of moving the entire x86 implementation code to common KVM and reusing
> it for arm64 and MIPS.  Add a new architecture specific asm/kvm_types.h
> to control the existence and parameters of the struct.  The new header
> is needed to avoid a chicken-and-egg problem with asm/kvm_host.h as all
> architectures define instances of the struct in their vCPU structs.
>
> Add an asm-generic version of kvm_types.h to avoid having empty files on
> PPC and s390 in the long term, and for arm64 and mips in the short term.
>
> Suggested-by: Christoffer Dall <christoffer.dall@arm.com>
Reviewed-by: Ben Gardon <bgardon@google.com>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  arch/arm64/include/asm/Kbuild    |  1 +
>  arch/mips/include/asm/Kbuild     |  1 +
>  arch/powerpc/include/asm/Kbuild  |  1 +
>  arch/s390/include/asm/Kbuild     |  1 +
>  arch/x86/include/asm/kvm_host.h  | 13 -------------
>  arch/x86/include/asm/kvm_types.h |  7 +++++++
>  include/asm-generic/kvm_types.h  |  5 +++++
>  include/linux/kvm_types.h        | 19 +++++++++++++++++++
>  8 files changed, 35 insertions(+), 13 deletions(-)
>  create mode 100644 arch/x86/include/asm/kvm_types.h
>  create mode 100644 include/asm-generic/kvm_types.h
>
> diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
> index ff9cbb631212..35a68155cd0e 100644
> --- a/arch/arm64/include/asm/Kbuild
> +++ b/arch/arm64/include/asm/Kbuild
> @@ -1,5 +1,6 @@
>  # SPDX-License-Identifier: GPL-2.0
>  generic-y += early_ioremap.h
> +generic-y += kvm_types.h
>  generic-y += local64.h
>  generic-y += mcs_spinlock.h
>  generic-y += qrwlock.h
> diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
> index 8643d313890e..397e6d24d2ab 100644
> --- a/arch/mips/include/asm/Kbuild
> +++ b/arch/mips/include/asm/Kbuild
> @@ -5,6 +5,7 @@ generated-y += syscall_table_64_n32.h
>  generated-y += syscall_table_64_n64.h
>  generated-y += syscall_table_64_o32.h
>  generic-y += export.h
> +generic-y += kvm_types.h
>  generic-y += local64.h
>  generic-y += mcs_spinlock.h
>  generic-y += parport.h
> diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
> index dadbcf3a0b1e..2d444d09b553 100644
> --- a/arch/powerpc/include/asm/Kbuild
> +++ b/arch/powerpc/include/asm/Kbuild
> @@ -4,6 +4,7 @@ generated-y += syscall_table_64.h
>  generated-y += syscall_table_c32.h
>  generated-y += syscall_table_spu.h
>  generic-y += export.h
> +generic-y += kvm_types.h
>  generic-y += local64.h
>  generic-y += mcs_spinlock.h
>  generic-y += vtime.h
> diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
> index 83f6e85de7bc..319efa0e6d02 100644
> --- a/arch/s390/include/asm/Kbuild
> +++ b/arch/s390/include/asm/Kbuild
> @@ -6,5 +6,6 @@ generated-y += unistd_nr.h
>
>  generic-y += asm-offsets.h
>  generic-y += export.h
> +generic-y += kvm_types.h
>  generic-y += local64.h
>  generic-y += mcs_spinlock.h
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 67b84aa2984e..70832aa762e5 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -193,8 +193,6 @@ struct x86_exception;
>  enum x86_intercept;
>  enum x86_intercept_stage;
>
> -#define KVM_NR_MEM_OBJS 40
> -
>  #define KVM_NR_DB_REGS 4
>
>  #define DR6_BD         (1 << 13)
> @@ -245,17 +243,6 @@ enum x86_intercept_stage;
>
>  struct kvm_kernel_irq_routing_entry;
>
> -/*
> - * We don't want allocation failures within the mmu code, so we preallocate
> - * enough memory for a single page fault in a cache.
> - */
> -struct kvm_mmu_memory_cache {
> -       int nobjs;
> -       gfp_t gfp_zero;
> -       struct kmem_cache *kmem_cache;
> -       void *objects[KVM_NR_MEM_OBJS];
> -};
> -
>  /*
>   * the pages used as guest page table on soft mmu are tracked by
>   * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
> diff --git a/arch/x86/include/asm/kvm_types.h b/arch/x86/include/asm/kvm_types.h
> new file mode 100644
> index 000000000000..08f1b57d3b62
> --- /dev/null
> +++ b/arch/x86/include/asm/kvm_types.h
> @@ -0,0 +1,7 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_X86_KVM_TYPES_H
> +#define _ASM_X86_KVM_TYPES_H
> +
> +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
> +
> +#endif /* _ASM_X86_KVM_TYPES_H */
> diff --git a/include/asm-generic/kvm_types.h b/include/asm-generic/kvm_types.h
> new file mode 100644
> index 000000000000..2a82daf110f1
> --- /dev/null
> +++ b/include/asm-generic/kvm_types.h
> @@ -0,0 +1,5 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_GENERIC_KVM_TYPES_H
> +#define _ASM_GENERIC_KVM_TYPES_H
> +
> +#endif
> diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
> index 68e84cf42a3f..a7580f69dda0 100644
> --- a/include/linux/kvm_types.h
> +++ b/include/linux/kvm_types.h
> @@ -20,6 +20,8 @@ enum kvm_mr_change;
>
>  #include <linux/types.h>
>
> +#include <asm/kvm_types.h>
> +
>  /*
>   * Address types:
>   *
> @@ -58,4 +60,21 @@ struct gfn_to_pfn_cache {
>         bool dirty;
>  };
>
> +#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
> +/*
> + * Memory caches are used to preallocate memory ahead of various MMU flows,
> + * e.g. page fault handlers.  Gracefully handling allocation failures deep in
> + * MMU flows is problematic, as is triggering reclaim, I/O, etc... while
> + * holding MMU locks.  Note, these caches act more like prefetch buffers than
> + * classical caches, i.e. objects are not returned to the cache on being freed.
> + */
> +struct kvm_mmu_memory_cache {
> +       int nobjs;
> +       gfp_t gfp_zero;
> +       struct kmem_cache *kmem_cache;
> +       void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE];
> +};
> +#endif
> +
> +
>  #endif /* __KVM_TYPES_H__ */
> --
> 2.26.0
>

WARNING: multiple messages have this Message-ID (diff)
From: Ben Gardon <bgardon@google.com>
To: Sean Christopherson <sean.j.christopherson@intel.com>
Cc: linux-arch@vger.kernel.org, Junaid Shahid <junaids@google.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	kvm@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>,
	Marc Zyngier <maz@kernel.org>, Joerg Roedel <joro@8bytes.org>,
	Peter Shier <pshier@google.com>,
	linux-mips@vger.kernel.org, linux-kernel@vger.kernel.org,
	linux-arm-kernel@lists.infradead.org,
	Paolo Bonzini <pbonzini@redhat.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Peter Feiner <pfeiner@google.com>,
	kvmarm@lists.cs.columbia.edu, Jim Mattson <jmattson@google.com>
Subject: Re: [PATCH v2 14/21] KVM: Move x86's version of struct kvm_mmu_memory_cache to common code
Date: Wed, 24 Jun 2020 11:08:08 -0700	[thread overview]
Message-ID: <CANgfPd_K8PhM26T3GB7BFoDNTLCi+OcYp6DGhXuJcxKMwvZrFg@mail.gmail.com> (raw)
In-Reply-To: <20200622200822.4426-15-sean.j.christopherson@intel.com>

On Mon, Jun 22, 2020 at 1:09 PM Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> Move x86's 'struct kvm_mmu_memory_cache' to common code in anticipation
> of moving the entire x86 implementation code to common KVM and reusing
> it for arm64 and MIPS.  Add a new architecture specific asm/kvm_types.h
> to control the existence and parameters of the struct.  The new header
> is needed to avoid a chicken-and-egg problem with asm/kvm_host.h as all
> architectures define instances of the struct in their vCPU structs.
>
> Add an asm-generic version of kvm_types.h to avoid having empty files on
> PPC and s390 in the long term, and for arm64 and mips in the short term.
>
> Suggested-by: Christoffer Dall <christoffer.dall@arm.com>
Reviewed-by: Ben Gardon <bgardon@google.com>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  arch/arm64/include/asm/Kbuild    |  1 +
>  arch/mips/include/asm/Kbuild     |  1 +
>  arch/powerpc/include/asm/Kbuild  |  1 +
>  arch/s390/include/asm/Kbuild     |  1 +
>  arch/x86/include/asm/kvm_host.h  | 13 -------------
>  arch/x86/include/asm/kvm_types.h |  7 +++++++
>  include/asm-generic/kvm_types.h  |  5 +++++
>  include/linux/kvm_types.h        | 19 +++++++++++++++++++
>  8 files changed, 35 insertions(+), 13 deletions(-)
>  create mode 100644 arch/x86/include/asm/kvm_types.h
>  create mode 100644 include/asm-generic/kvm_types.h
>
> diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
> index ff9cbb631212..35a68155cd0e 100644
> --- a/arch/arm64/include/asm/Kbuild
> +++ b/arch/arm64/include/asm/Kbuild
> @@ -1,5 +1,6 @@
>  # SPDX-License-Identifier: GPL-2.0
>  generic-y += early_ioremap.h
> +generic-y += kvm_types.h
>  generic-y += local64.h
>  generic-y += mcs_spinlock.h
>  generic-y += qrwlock.h
> diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
> index 8643d313890e..397e6d24d2ab 100644
> --- a/arch/mips/include/asm/Kbuild
> +++ b/arch/mips/include/asm/Kbuild
> @@ -5,6 +5,7 @@ generated-y += syscall_table_64_n32.h
>  generated-y += syscall_table_64_n64.h
>  generated-y += syscall_table_64_o32.h
>  generic-y += export.h
> +generic-y += kvm_types.h
>  generic-y += local64.h
>  generic-y += mcs_spinlock.h
>  generic-y += parport.h
> diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
> index dadbcf3a0b1e..2d444d09b553 100644
> --- a/arch/powerpc/include/asm/Kbuild
> +++ b/arch/powerpc/include/asm/Kbuild
> @@ -4,6 +4,7 @@ generated-y += syscall_table_64.h
>  generated-y += syscall_table_c32.h
>  generated-y += syscall_table_spu.h
>  generic-y += export.h
> +generic-y += kvm_types.h
>  generic-y += local64.h
>  generic-y += mcs_spinlock.h
>  generic-y += vtime.h
> diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
> index 83f6e85de7bc..319efa0e6d02 100644
> --- a/arch/s390/include/asm/Kbuild
> +++ b/arch/s390/include/asm/Kbuild
> @@ -6,5 +6,6 @@ generated-y += unistd_nr.h
>
>  generic-y += asm-offsets.h
>  generic-y += export.h
> +generic-y += kvm_types.h
>  generic-y += local64.h
>  generic-y += mcs_spinlock.h
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 67b84aa2984e..70832aa762e5 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -193,8 +193,6 @@ struct x86_exception;
>  enum x86_intercept;
>  enum x86_intercept_stage;
>
> -#define KVM_NR_MEM_OBJS 40
> -
>  #define KVM_NR_DB_REGS 4
>
>  #define DR6_BD         (1 << 13)
> @@ -245,17 +243,6 @@ enum x86_intercept_stage;
>
>  struct kvm_kernel_irq_routing_entry;
>
> -/*
> - * We don't want allocation failures within the mmu code, so we preallocate
> - * enough memory for a single page fault in a cache.
> - */
> -struct kvm_mmu_memory_cache {
> -       int nobjs;
> -       gfp_t gfp_zero;
> -       struct kmem_cache *kmem_cache;
> -       void *objects[KVM_NR_MEM_OBJS];
> -};
> -
>  /*
>   * the pages used as guest page table on soft mmu are tracked by
>   * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
> diff --git a/arch/x86/include/asm/kvm_types.h b/arch/x86/include/asm/kvm_types.h
> new file mode 100644
> index 000000000000..08f1b57d3b62
> --- /dev/null
> +++ b/arch/x86/include/asm/kvm_types.h
> @@ -0,0 +1,7 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_X86_KVM_TYPES_H
> +#define _ASM_X86_KVM_TYPES_H
> +
> +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
> +
> +#endif /* _ASM_X86_KVM_TYPES_H */
> diff --git a/include/asm-generic/kvm_types.h b/include/asm-generic/kvm_types.h
> new file mode 100644
> index 000000000000..2a82daf110f1
> --- /dev/null
> +++ b/include/asm-generic/kvm_types.h
> @@ -0,0 +1,5 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_GENERIC_KVM_TYPES_H
> +#define _ASM_GENERIC_KVM_TYPES_H
> +
> +#endif
> diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
> index 68e84cf42a3f..a7580f69dda0 100644
> --- a/include/linux/kvm_types.h
> +++ b/include/linux/kvm_types.h
> @@ -20,6 +20,8 @@ enum kvm_mr_change;
>
>  #include <linux/types.h>
>
> +#include <asm/kvm_types.h>
> +
>  /*
>   * Address types:
>   *
> @@ -58,4 +60,21 @@ struct gfn_to_pfn_cache {
>         bool dirty;
>  };
>
> +#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
> +/*
> + * Memory caches are used to preallocate memory ahead of various MMU flows,
> + * e.g. page fault handlers.  Gracefully handling allocation failures deep in
> + * MMU flows is problematic, as is triggering reclaim, I/O, etc... while
> + * holding MMU locks.  Note, these caches act more like prefetch buffers than
> + * classical caches, i.e. objects are not returned to the cache on being freed.
> + */
> +struct kvm_mmu_memory_cache {
> +       int nobjs;
> +       gfp_t gfp_zero;
> +       struct kmem_cache *kmem_cache;
> +       void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE];
> +};
> +#endif
> +
> +
>  #endif /* __KVM_TYPES_H__ */
> --
> 2.26.0
>
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Ben Gardon <bgardon@google.com>
To: Sean Christopherson <sean.j.christopherson@intel.com>
Cc: linux-arch@vger.kernel.org, Junaid Shahid <junaids@google.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	Wanpeng Li <wanpengli@tencent.com>,
	kvm@vger.kernel.org, Arnd Bergmann <arnd@arndb.de>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Marc Zyngier <maz@kernel.org>, Joerg Roedel <joro@8bytes.org>,
	Peter Shier <pshier@google.com>,
	linux-mips@vger.kernel.org, linux-kernel@vger.kernel.org,
	James Morse <james.morse@arm.com>,
	linux-arm-kernel@lists.infradead.org,
	Paolo Bonzini <pbonzini@redhat.com>,
	Vitaly Kuznetsov <vkuznets@redhat.com>,
	Peter Feiner <pfeiner@google.com>,
	kvmarm@lists.cs.columbia.edu,
	Julien Thierry <julien.thierry.kdev@gmail.com>,
	Jim Mattson <jmattson@google.com>
Subject: Re: [PATCH v2 14/21] KVM: Move x86's version of struct kvm_mmu_memory_cache to common code
Date: Wed, 24 Jun 2020 11:08:08 -0700	[thread overview]
Message-ID: <CANgfPd_K8PhM26T3GB7BFoDNTLCi+OcYp6DGhXuJcxKMwvZrFg@mail.gmail.com> (raw)
In-Reply-To: <20200622200822.4426-15-sean.j.christopherson@intel.com>

On Mon, Jun 22, 2020 at 1:09 PM Sean Christopherson
<sean.j.christopherson@intel.com> wrote:
>
> Move x86's 'struct kvm_mmu_memory_cache' to common code in anticipation
> of moving the entire x86 implementation code to common KVM and reusing
> it for arm64 and MIPS.  Add a new architecture specific asm/kvm_types.h
> to control the existence and parameters of the struct.  The new header
> is needed to avoid a chicken-and-egg problem with asm/kvm_host.h as all
> architectures define instances of the struct in their vCPU structs.
>
> Add an asm-generic version of kvm_types.h to avoid having empty files on
> PPC and s390 in the long term, and for arm64 and mips in the short term.
>
> Suggested-by: Christoffer Dall <christoffer.dall@arm.com>
Reviewed-by: Ben Gardon <bgardon@google.com>
> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com>
> ---
>  arch/arm64/include/asm/Kbuild    |  1 +
>  arch/mips/include/asm/Kbuild     |  1 +
>  arch/powerpc/include/asm/Kbuild  |  1 +
>  arch/s390/include/asm/Kbuild     |  1 +
>  arch/x86/include/asm/kvm_host.h  | 13 -------------
>  arch/x86/include/asm/kvm_types.h |  7 +++++++
>  include/asm-generic/kvm_types.h  |  5 +++++
>  include/linux/kvm_types.h        | 19 +++++++++++++++++++
>  8 files changed, 35 insertions(+), 13 deletions(-)
>  create mode 100644 arch/x86/include/asm/kvm_types.h
>  create mode 100644 include/asm-generic/kvm_types.h
>
> diff --git a/arch/arm64/include/asm/Kbuild b/arch/arm64/include/asm/Kbuild
> index ff9cbb631212..35a68155cd0e 100644
> --- a/arch/arm64/include/asm/Kbuild
> +++ b/arch/arm64/include/asm/Kbuild
> @@ -1,5 +1,6 @@
>  # SPDX-License-Identifier: GPL-2.0
>  generic-y += early_ioremap.h
> +generic-y += kvm_types.h
>  generic-y += local64.h
>  generic-y += mcs_spinlock.h
>  generic-y += qrwlock.h
> diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
> index 8643d313890e..397e6d24d2ab 100644
> --- a/arch/mips/include/asm/Kbuild
> +++ b/arch/mips/include/asm/Kbuild
> @@ -5,6 +5,7 @@ generated-y += syscall_table_64_n32.h
>  generated-y += syscall_table_64_n64.h
>  generated-y += syscall_table_64_o32.h
>  generic-y += export.h
> +generic-y += kvm_types.h
>  generic-y += local64.h
>  generic-y += mcs_spinlock.h
>  generic-y += parport.h
> diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
> index dadbcf3a0b1e..2d444d09b553 100644
> --- a/arch/powerpc/include/asm/Kbuild
> +++ b/arch/powerpc/include/asm/Kbuild
> @@ -4,6 +4,7 @@ generated-y += syscall_table_64.h
>  generated-y += syscall_table_c32.h
>  generated-y += syscall_table_spu.h
>  generic-y += export.h
> +generic-y += kvm_types.h
>  generic-y += local64.h
>  generic-y += mcs_spinlock.h
>  generic-y += vtime.h
> diff --git a/arch/s390/include/asm/Kbuild b/arch/s390/include/asm/Kbuild
> index 83f6e85de7bc..319efa0e6d02 100644
> --- a/arch/s390/include/asm/Kbuild
> +++ b/arch/s390/include/asm/Kbuild
> @@ -6,5 +6,6 @@ generated-y += unistd_nr.h
>
>  generic-y += asm-offsets.h
>  generic-y += export.h
> +generic-y += kvm_types.h
>  generic-y += local64.h
>  generic-y += mcs_spinlock.h
> diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
> index 67b84aa2984e..70832aa762e5 100644
> --- a/arch/x86/include/asm/kvm_host.h
> +++ b/arch/x86/include/asm/kvm_host.h
> @@ -193,8 +193,6 @@ struct x86_exception;
>  enum x86_intercept;
>  enum x86_intercept_stage;
>
> -#define KVM_NR_MEM_OBJS 40
> -
>  #define KVM_NR_DB_REGS 4
>
>  #define DR6_BD         (1 << 13)
> @@ -245,17 +243,6 @@ enum x86_intercept_stage;
>
>  struct kvm_kernel_irq_routing_entry;
>
> -/*
> - * We don't want allocation failures within the mmu code, so we preallocate
> - * enough memory for a single page fault in a cache.
> - */
> -struct kvm_mmu_memory_cache {
> -       int nobjs;
> -       gfp_t gfp_zero;
> -       struct kmem_cache *kmem_cache;
> -       void *objects[KVM_NR_MEM_OBJS];
> -};
> -
>  /*
>   * the pages used as guest page table on soft mmu are tracked by
>   * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
> diff --git a/arch/x86/include/asm/kvm_types.h b/arch/x86/include/asm/kvm_types.h
> new file mode 100644
> index 000000000000..08f1b57d3b62
> --- /dev/null
> +++ b/arch/x86/include/asm/kvm_types.h
> @@ -0,0 +1,7 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_X86_KVM_TYPES_H
> +#define _ASM_X86_KVM_TYPES_H
> +
> +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 40
> +
> +#endif /* _ASM_X86_KVM_TYPES_H */
> diff --git a/include/asm-generic/kvm_types.h b/include/asm-generic/kvm_types.h
> new file mode 100644
> index 000000000000..2a82daf110f1
> --- /dev/null
> +++ b/include/asm-generic/kvm_types.h
> @@ -0,0 +1,5 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +#ifndef _ASM_GENERIC_KVM_TYPES_H
> +#define _ASM_GENERIC_KVM_TYPES_H
> +
> +#endif
> diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
> index 68e84cf42a3f..a7580f69dda0 100644
> --- a/include/linux/kvm_types.h
> +++ b/include/linux/kvm_types.h
> @@ -20,6 +20,8 @@ enum kvm_mr_change;
>
>  #include <linux/types.h>
>
> +#include <asm/kvm_types.h>
> +
>  /*
>   * Address types:
>   *
> @@ -58,4 +60,21 @@ struct gfn_to_pfn_cache {
>         bool dirty;
>  };
>
> +#ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE
> +/*
> + * Memory caches are used to preallocate memory ahead of various MMU flows,
> + * e.g. page fault handlers.  Gracefully handling allocation failures deep in
> + * MMU flows is problematic, as is triggering reclaim, I/O, etc... while
> + * holding MMU locks.  Note, these caches act more like prefetch buffers than
> + * classical caches, i.e. objects are not returned to the cache on being freed.
> + */
> +struct kvm_mmu_memory_cache {
> +       int nobjs;
> +       gfp_t gfp_zero;
> +       struct kmem_cache *kmem_cache;
> +       void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE];
> +};
> +#endif
> +
> +
>  #endif /* __KVM_TYPES_H__ */
> --
> 2.26.0
>

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2020-06-24 18:08 UTC|newest]

Thread overview: 67+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2020-06-22 20:08 [PATCH v2 00/21] KVM: Cleanup and unify kvm_mmu_memory_cache usage Sean Christopherson
2020-06-22 20:08 ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 01/21] KVM: x86/mmu: Track the associated kmem_cache in the MMU caches Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 02/21] KVM: x86/mmu: Consolidate "page" variant of memory cache helpers Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 03/21] KVM: x86/mmu: Use consistent "mc" name for kvm_mmu_memory_cache locals Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 04/21] KVM: x86/mmu: Remove superfluous gotos from mmu_topup_memory_caches() Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 05/21] KVM: x86/mmu: Try to avoid crashing KVM if a MMU memory cache is empty Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-24 18:03   ` Ben Gardon
2020-06-24 18:03     ` Ben Gardon
2020-06-24 18:03     ` Ben Gardon
2020-06-22 20:08 ` [PATCH v2 06/21] KVM: x86/mmu: Move fast_page_fault() call above mmu_topup_memory_caches() Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 07/21] KVM: x86/mmu: Topup memory caches after walking GVA->GPA Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 08/21] KVM: x86/mmu: Clean up the gorilla math in mmu_topup_memory_caches() Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 09/21] KVM: x86/mmu: Separate the memory caches for shadow pages and gfn arrays Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 10/21] KVM: x86/mmu: Make __GFP_ZERO a property of the memory cache Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 11/21] KVM: x86/mmu: Zero allocate shadow pages (outside of mmu_lock) Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 12/21] KVM: x86/mmu: Skip filling the gfn cache for guaranteed direct MMU topups Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 13/21] KVM: x86/mmu: Prepend "kvm_" to memory cache helpers that will be global Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 14/21] KVM: Move x86's version of struct kvm_mmu_memory_cache to common code Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-24 18:08   ` Ben Gardon [this message]
2020-06-24 18:08     ` Ben Gardon
2020-06-24 18:08     ` Ben Gardon
2020-06-22 20:08 ` [PATCH v2 15/21] KVM: Move x86's MMU memory cache helpers to common KVM code Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 16/21] KVM: arm64: Drop @max param from mmu_topup_memory_cache() Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 17/21] KVM: arm64: Use common code's approach for __GFP_ZERO with memory caches Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 18/21] KVM: arm64: Use common KVM implementation of MMU " Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 19/21] KVM: MIPS: Drop @max param from mmu_topup_memory_cache() Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 20/21] KVM: MIPS: Account pages used for GPA page tables Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-22 20:08 ` [PATCH v2 21/21] KVM: MIPS: Use common KVM implementation of MMU memory caches Sean Christopherson
2020-06-22 20:08   ` Sean Christopherson
2020-06-23 17:26 ` [PATCH v2 00/21] KVM: Cleanup and unify kvm_mmu_memory_cache usage Sean Christopherson
2020-06-23 17:26   ` Sean Christopherson
2020-06-23 17:26   ` Sean Christopherson

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CANgfPd_K8PhM26T3GB7BFoDNTLCi+OcYp6DGhXuJcxKMwvZrFg@mail.gmail.com \
    --to=bgardon@google.com \
    --cc=arnd@arndb.de \
    --cc=christoffer.dall@arm.com \
    --cc=james.morse@arm.com \
    --cc=jmattson@google.com \
    --cc=joro@8bytes.org \
    --cc=julien.thierry.kdev@gmail.com \
    --cc=junaids@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mips@vger.kernel.org \
    --cc=maz@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=pfeiner@google.com \
    --cc=pshier@google.com \
    --cc=sean.j.christopherson@intel.com \
    --cc=suzuki.poulose@arm.com \
    --cc=vkuznets@redhat.com \
    --cc=wanpengli@tencent.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.