All of lore.kernel.org
 help / color / mirror / Atom feed
From: Will Deacon <will@kernel.org>
To: Quentin Perret <qperret@google.com>
Cc: catalin.marinas@arm.com, maz@kernel.org, james.morse@arm.com,
	julien.thierry.kdev@gmail.com, suzuki.poulose@arm.com,
	android-kvm@google.com, linux-kernel@vger.kernel.org,
	kernel-team@android.com, kvmarm@lists.cs.columbia.edu,
	linux-arm-kernel@lists.infradead.org, tabba@google.com,
	mark.rutland@arm.com, dbrazdil@google.com, mate.toth-pal@arm.com,
	seanjc@google.com, robh+dt@kernel.org
Subject: Re: [PATCH v3 29/32] KVM: arm64: Wrap the host with a stage 2
Date: Fri, 5 Mar 2021 19:29:06 +0000	[thread overview]
Message-ID: <20210305192905.GE23633@willie-the-truck> (raw)
In-Reply-To: <20210302150002.3685113-30-qperret@google.com>

On Tue, Mar 02, 2021 at 02:59:59PM +0000, Quentin Perret wrote:
> When KVM runs in protected nVHE mode, make use of a stage 2 page-table
> to give the hypervisor some control over the host memory accesses. The
> host stage 2 is created lazily using large block mappings if possible,
> and will default to page mappings in absence of a better solution.
> 
> From this point on, memory accesses from the host to protected memory
> regions (e.g. marked PROT_NONE) are fatal and lead to hyp_panic().
> 
> Signed-off-by: Quentin Perret <qperret@google.com>
> ---
>  arch/arm64/include/asm/kvm_asm.h              |   1 +
>  arch/arm64/include/asm/kvm_cpufeature.h       |   2 +
>  arch/arm64/kernel/image-vars.h                |   3 +
>  arch/arm64/kvm/arm.c                          |  10 +
>  arch/arm64/kvm/hyp/include/nvhe/mem_protect.h |  34 +++
>  arch/arm64/kvm/hyp/nvhe/Makefile              |   2 +-
>  arch/arm64/kvm/hyp/nvhe/hyp-init.S            |   1 +
>  arch/arm64/kvm/hyp/nvhe/hyp-main.c            |  11 +
>  arch/arm64/kvm/hyp/nvhe/mem_protect.c         | 213 ++++++++++++++++++
>  arch/arm64/kvm/hyp/nvhe/setup.c               |   5 +
>  arch/arm64/kvm/hyp/nvhe/switch.c              |   7 +-
>  arch/arm64/kvm/hyp/nvhe/tlb.c                 |   4 +-
>  12 files changed, 286 insertions(+), 7 deletions(-)
>  create mode 100644 arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
>  create mode 100644 arch/arm64/kvm/hyp/nvhe/mem_protect.c

[...]

> diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> new file mode 100644
> index 000000000000..d293cb328cc4
> --- /dev/null
> +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> @@ -0,0 +1,34 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + * Copyright (C) 2020 Google LLC
> + * Author: Quentin Perret <qperret@google.com>
> + */
> +
> +#ifndef __KVM_NVHE_MEM_PROTECT__
> +#define __KVM_NVHE_MEM_PROTECT__
> +#include <linux/kvm_host.h>
> +#include <asm/kvm_hyp.h>
> +#include <asm/kvm_pgtable.h>
> +#include <asm/virt.h>
> +#include <nvhe/spinlock.h>
> +
> +struct host_kvm {
> +	struct kvm_arch arch;
> +	struct kvm_pgtable pgt;
> +	struct kvm_pgtable_mm_ops mm_ops;
> +	hyp_spinlock_t lock;
> +};
> +extern struct host_kvm host_kvm;
> +
> +int __pkvm_prot_finalize(void);
> +int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool);
> +void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
> +
> +static __always_inline void __load_host_stage2(void)
> +{
> +	if (static_branch_likely(&kvm_protected_mode_initialized))
> +		__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
> +	else
> +		write_sysreg(0, vttbr_el2);

I realise you've just moved the code, but why is this needed? All we've
done is point the stage-2 ttb at 0x0 (i.e. we haven't disabled anything).

> +#endif /* __KVM_NVHE_MEM_PROTECT__ */
> diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
> index e204ea77ab27..ce49795324a7 100644
> --- a/arch/arm64/kvm/hyp/nvhe/Makefile
> +++ b/arch/arm64/kvm/hyp/nvhe/Makefile
> @@ -14,7 +14,7 @@ lib-objs := $(addprefix ../../../lib/, $(lib-objs))
>  
>  obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
>  	 hyp-main.o hyp-smp.o psci-relay.o early_alloc.o stub.o page_alloc.o \
> -	 cache.o cpufeature.o setup.o mm.o
> +	 cache.o cpufeature.o setup.o mm.o mem_protect.o
>  obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
>  	 ../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
>  obj-y += $(lib-objs)
> diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
> index f312672d895e..6fa01b04954f 100644
> --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
> +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
> @@ -119,6 +119,7 @@ alternative_else_nop_endif
>  
>  	/* Invalidate the stale TLBs from Bootloader */
>  	tlbi	alle2
> +	tlbi	vmalls12e1
>  	dsb	sy
>  
>  	/*
> diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> index ae6503c9be15..f47028d3fd0a 100644
> --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> @@ -13,6 +13,7 @@
>  #include <asm/kvm_hyp.h>
>  #include <asm/kvm_mmu.h>
>  
> +#include <nvhe/mem_protect.h>
>  #include <nvhe/mm.h>
>  #include <nvhe/trap_handler.h>
>  
> @@ -151,6 +152,10 @@ static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ct
>  	cpu_reg(host_ctxt, 1) = __pkvm_create_private_mapping(phys, size, prot);
>  }
>  
> +static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
> +{
> +	cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
> +}
>  typedef void (*hcall_t)(struct kvm_cpu_context *);
>  
>  #define HANDLE_FUNC(x)	[__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
> @@ -174,6 +179,7 @@ static const hcall_t host_hcall[] = {
>  	HANDLE_FUNC(__pkvm_cpu_set_vector),
>  	HANDLE_FUNC(__pkvm_create_mappings),
>  	HANDLE_FUNC(__pkvm_create_private_mapping),
> +	HANDLE_FUNC(__pkvm_prot_finalize),
>  };
>  
>  static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
> @@ -226,6 +232,11 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
>  	case ESR_ELx_EC_SMC64:
>  		handle_host_smc(host_ctxt);
>  		break;
> +	case ESR_ELx_EC_IABT_LOW:
> +		fallthrough;
> +	case ESR_ELx_EC_DABT_LOW:
> +		handle_host_mem_abort(host_ctxt);
> +		break;
>  	default:
>  		hyp_panic();
>  	}
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> new file mode 100644
> index 000000000000..2252ad1a8945
> --- /dev/null
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -0,0 +1,213 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * Copyright (C) 2020 Google LLC
> + * Author: Quentin Perret <qperret@google.com>
> + */
> +
> +#include <linux/kvm_host.h>
> +#include <asm/kvm_cpufeature.h>
> +#include <asm/kvm_emulate.h>
> +#include <asm/kvm_hyp.h>
> +#include <asm/kvm_mmu.h>
> +#include <asm/kvm_pgtable.h>
> +#include <asm/stage2_pgtable.h>
> +
> +#include <hyp/switch.h>
> +
> +#include <nvhe/gfp.h>
> +#include <nvhe/memory.h>
> +#include <nvhe/mem_protect.h>
> +#include <nvhe/mm.h>
> +
> +extern unsigned long hyp_nr_cpus;
> +struct host_kvm host_kvm;
> +
> +struct hyp_pool host_s2_mem;
> +struct hyp_pool host_s2_dev;
> +
> +static void *host_s2_zalloc_pages_exact(size_t size)
> +{
> +	return hyp_alloc_pages(&host_s2_mem, get_order(size));
> +}
> +
> +static void *host_s2_zalloc_page(void *pool)
> +{
> +	return hyp_alloc_pages(pool, 0);
> +}
> +
> +static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool)
> +{
> +	unsigned long nr_pages, pfn;
> +	int ret;
> +
> +	pfn = hyp_virt_to_pfn(mem_pgt_pool);
> +	nr_pages = host_s2_mem_pgtable_pages();
> +	ret = hyp_pool_init(&host_s2_mem, pfn, nr_pages, 0);
> +	if (ret)
> +		return ret;
> +
> +	pfn = hyp_virt_to_pfn(dev_pgt_pool);
> +	nr_pages = host_s2_dev_pgtable_pages();
> +	ret = hyp_pool_init(&host_s2_dev, pfn, nr_pages, 0);
> +	if (ret)
> +		return ret;
> +
> +	host_kvm.mm_ops.zalloc_pages_exact = host_s2_zalloc_pages_exact;
> +	host_kvm.mm_ops.zalloc_page = host_s2_zalloc_page;
> +	host_kvm.mm_ops.phys_to_virt = hyp_phys_to_virt;
> +	host_kvm.mm_ops.virt_to_phys = hyp_virt_to_phys;
> +	host_kvm.mm_ops.page_count = hyp_page_count;
> +	host_kvm.mm_ops.get_page = hyp_get_page;
> +	host_kvm.mm_ops.put_page = hyp_put_page;

Same comment I had earlier on about struct initialisation -- I think you
can use the same trict here for the host_kvm.mm_ops.

> +
> +	return 0;
> +}
> +
> +static void prepare_host_vtcr(void)
> +{
> +	u32 parange, phys_shift;
> +	u64 mmfr0, mmfr1;
> +
> +	mmfr0 = arm64_ftr_reg_id_aa64mmfr0_el1.sys_val;
> +	mmfr1 = arm64_ftr_reg_id_aa64mmfr1_el1.sys_val;
> +
> +	/* The host stage 2 is id-mapped, so use parange for T0SZ */
> +	parange = kvm_get_parange(mmfr0);
> +	phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
> +
> +	host_kvm.arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
> +}
> +
> +int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool)
> +{
> +	struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
> +	int ret;
> +
> +	prepare_host_vtcr();
> +	hyp_spin_lock_init(&host_kvm.lock);
> +
> +	ret = prepare_s2_pools(mem_pgt_pool, dev_pgt_pool);
> +	if (ret)
> +		return ret;
> +
> +	ret = kvm_pgtable_stage2_init(&host_kvm.pgt, &host_kvm.arch,
> +				      &host_kvm.mm_ops);
> +	if (ret)
> +		return ret;
> +
> +	mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
> +	mmu->arch = &host_kvm.arch;
> +	mmu->pgt = &host_kvm.pgt;
> +	mmu->vmid.vmid_gen = 0;
> +	mmu->vmid.vmid = 0;
> +
> +	return 0;
> +}
> +
> +int __pkvm_prot_finalize(void)
> +{
> +	struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
> +	struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
> +
> +	params->vttbr = kvm_get_vttbr(mmu);
> +	params->vtcr = host_kvm.arch.vtcr;
> +	params->hcr_el2 |= HCR_VM;
> +	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
> +		params->hcr_el2 |= HCR_FWB;
> +	kvm_flush_dcache_to_poc(params, sizeof(*params));
> +
> +	write_sysreg(params->hcr_el2, hcr_el2);
> +	__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);

AFAICT, there's no ISB here. Do we need one before the TLB invalidation?

> +	__tlbi(vmalls12e1is);
> +	dsb(ish);

Given that the TLB is invalidated on the boot path, please can you add
a comment here about the stale entries which you need to invalidate?

Also, does this need to be inner-shareable? I thought this function ran on
each CPU.

> +	isb();
> +
> +	return 0;
> +}
> +
> +static void host_stage2_unmap_dev_all(void)
> +{
> +	struct kvm_pgtable *pgt = &host_kvm.pgt;
> +	struct memblock_region *reg;
> +	u64 addr = 0;
> +	int i;
> +
> +	/* Unmap all non-memory regions to recycle the pages */
> +	for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) {
> +		reg = &hyp_memory[i];
> +		kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr);
> +	}
> +	kvm_pgtable_stage2_unmap(pgt, addr, ULONG_MAX);

Is this just going to return -ERANGE?

> +static int host_stage2_idmap(u64 addr)
> +{
> +	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
> +	struct kvm_mem_range range;
> +	bool is_memory = find_mem_range(addr, &range);
> +	struct hyp_pool *pool = is_memory ? &host_s2_mem : &host_s2_dev;
> +	int ret;
> +
> +	if (is_memory)
> +		prot |= KVM_PGTABLE_PROT_X;
> +
> +	hyp_spin_lock(&host_kvm.lock);
> +	ret = kvm_pgtable_stage2_idmap_greedy(&host_kvm.pgt, addr, prot,
> +					      &range, pool);
> +	if (is_memory || ret != -ENOMEM)
> +		goto unlock;
> +	host_stage2_unmap_dev_all();
> +	ret = kvm_pgtable_stage2_idmap_greedy(&host_kvm.pgt, addr, prot,
> +					      &range, pool);

I find this _really_ hard to reason about, as range is passed by reference
and we don't reset it after the first call returns -ENOMEM for an MMIO
mapping. Maybe some commentary on why it's still valid here?

Other than that, this is looking really good to me.

Will

WARNING: multiple messages have this Message-ID (diff)
From: Will Deacon <will@kernel.org>
To: Quentin Perret <qperret@google.com>
Cc: android-kvm@google.com, catalin.marinas@arm.com,
	mate.toth-pal@arm.com, seanjc@google.com, tabba@google.com,
	linux-kernel@vger.kernel.org, robh+dt@kernel.org,
	linux-arm-kernel@lists.infradead.org, maz@kernel.org,
	kernel-team@android.com, kvmarm@lists.cs.columbia.edu
Subject: Re: [PATCH v3 29/32] KVM: arm64: Wrap the host with a stage 2
Date: Fri, 5 Mar 2021 19:29:06 +0000	[thread overview]
Message-ID: <20210305192905.GE23633@willie-the-truck> (raw)
In-Reply-To: <20210302150002.3685113-30-qperret@google.com>

On Tue, Mar 02, 2021 at 02:59:59PM +0000, Quentin Perret wrote:
> When KVM runs in protected nVHE mode, make use of a stage 2 page-table
> to give the hypervisor some control over the host memory accesses. The
> host stage 2 is created lazily using large block mappings if possible,
> and will default to page mappings in absence of a better solution.
> 
> From this point on, memory accesses from the host to protected memory
> regions (e.g. marked PROT_NONE) are fatal and lead to hyp_panic().
> 
> Signed-off-by: Quentin Perret <qperret@google.com>
> ---
>  arch/arm64/include/asm/kvm_asm.h              |   1 +
>  arch/arm64/include/asm/kvm_cpufeature.h       |   2 +
>  arch/arm64/kernel/image-vars.h                |   3 +
>  arch/arm64/kvm/arm.c                          |  10 +
>  arch/arm64/kvm/hyp/include/nvhe/mem_protect.h |  34 +++
>  arch/arm64/kvm/hyp/nvhe/Makefile              |   2 +-
>  arch/arm64/kvm/hyp/nvhe/hyp-init.S            |   1 +
>  arch/arm64/kvm/hyp/nvhe/hyp-main.c            |  11 +
>  arch/arm64/kvm/hyp/nvhe/mem_protect.c         | 213 ++++++++++++++++++
>  arch/arm64/kvm/hyp/nvhe/setup.c               |   5 +
>  arch/arm64/kvm/hyp/nvhe/switch.c              |   7 +-
>  arch/arm64/kvm/hyp/nvhe/tlb.c                 |   4 +-
>  12 files changed, 286 insertions(+), 7 deletions(-)
>  create mode 100644 arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
>  create mode 100644 arch/arm64/kvm/hyp/nvhe/mem_protect.c

[...]

> diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> new file mode 100644
> index 000000000000..d293cb328cc4
> --- /dev/null
> +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> @@ -0,0 +1,34 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + * Copyright (C) 2020 Google LLC
> + * Author: Quentin Perret <qperret@google.com>
> + */
> +
> +#ifndef __KVM_NVHE_MEM_PROTECT__
> +#define __KVM_NVHE_MEM_PROTECT__
> +#include <linux/kvm_host.h>
> +#include <asm/kvm_hyp.h>
> +#include <asm/kvm_pgtable.h>
> +#include <asm/virt.h>
> +#include <nvhe/spinlock.h>
> +
> +struct host_kvm {
> +	struct kvm_arch arch;
> +	struct kvm_pgtable pgt;
> +	struct kvm_pgtable_mm_ops mm_ops;
> +	hyp_spinlock_t lock;
> +};
> +extern struct host_kvm host_kvm;
> +
> +int __pkvm_prot_finalize(void);
> +int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool);
> +void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
> +
> +static __always_inline void __load_host_stage2(void)
> +{
> +	if (static_branch_likely(&kvm_protected_mode_initialized))
> +		__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
> +	else
> +		write_sysreg(0, vttbr_el2);

I realise you've just moved the code, but why is this needed? All we've
done is point the stage-2 ttb at 0x0 (i.e. we haven't disabled anything).

> +#endif /* __KVM_NVHE_MEM_PROTECT__ */
> diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
> index e204ea77ab27..ce49795324a7 100644
> --- a/arch/arm64/kvm/hyp/nvhe/Makefile
> +++ b/arch/arm64/kvm/hyp/nvhe/Makefile
> @@ -14,7 +14,7 @@ lib-objs := $(addprefix ../../../lib/, $(lib-objs))
>  
>  obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
>  	 hyp-main.o hyp-smp.o psci-relay.o early_alloc.o stub.o page_alloc.o \
> -	 cache.o cpufeature.o setup.o mm.o
> +	 cache.o cpufeature.o setup.o mm.o mem_protect.o
>  obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
>  	 ../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
>  obj-y += $(lib-objs)
> diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
> index f312672d895e..6fa01b04954f 100644
> --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
> +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
> @@ -119,6 +119,7 @@ alternative_else_nop_endif
>  
>  	/* Invalidate the stale TLBs from Bootloader */
>  	tlbi	alle2
> +	tlbi	vmalls12e1
>  	dsb	sy
>  
>  	/*
> diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> index ae6503c9be15..f47028d3fd0a 100644
> --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> @@ -13,6 +13,7 @@
>  #include <asm/kvm_hyp.h>
>  #include <asm/kvm_mmu.h>
>  
> +#include <nvhe/mem_protect.h>
>  #include <nvhe/mm.h>
>  #include <nvhe/trap_handler.h>
>  
> @@ -151,6 +152,10 @@ static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ct
>  	cpu_reg(host_ctxt, 1) = __pkvm_create_private_mapping(phys, size, prot);
>  }
>  
> +static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
> +{
> +	cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
> +}
>  typedef void (*hcall_t)(struct kvm_cpu_context *);
>  
>  #define HANDLE_FUNC(x)	[__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
> @@ -174,6 +179,7 @@ static const hcall_t host_hcall[] = {
>  	HANDLE_FUNC(__pkvm_cpu_set_vector),
>  	HANDLE_FUNC(__pkvm_create_mappings),
>  	HANDLE_FUNC(__pkvm_create_private_mapping),
> +	HANDLE_FUNC(__pkvm_prot_finalize),
>  };
>  
>  static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
> @@ -226,6 +232,11 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
>  	case ESR_ELx_EC_SMC64:
>  		handle_host_smc(host_ctxt);
>  		break;
> +	case ESR_ELx_EC_IABT_LOW:
> +		fallthrough;
> +	case ESR_ELx_EC_DABT_LOW:
> +		handle_host_mem_abort(host_ctxt);
> +		break;
>  	default:
>  		hyp_panic();
>  	}
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> new file mode 100644
> index 000000000000..2252ad1a8945
> --- /dev/null
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -0,0 +1,213 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * Copyright (C) 2020 Google LLC
> + * Author: Quentin Perret <qperret@google.com>
> + */
> +
> +#include <linux/kvm_host.h>
> +#include <asm/kvm_cpufeature.h>
> +#include <asm/kvm_emulate.h>
> +#include <asm/kvm_hyp.h>
> +#include <asm/kvm_mmu.h>
> +#include <asm/kvm_pgtable.h>
> +#include <asm/stage2_pgtable.h>
> +
> +#include <hyp/switch.h>
> +
> +#include <nvhe/gfp.h>
> +#include <nvhe/memory.h>
> +#include <nvhe/mem_protect.h>
> +#include <nvhe/mm.h>
> +
> +extern unsigned long hyp_nr_cpus;
> +struct host_kvm host_kvm;
> +
> +struct hyp_pool host_s2_mem;
> +struct hyp_pool host_s2_dev;
> +
> +static void *host_s2_zalloc_pages_exact(size_t size)
> +{
> +	return hyp_alloc_pages(&host_s2_mem, get_order(size));
> +}
> +
> +static void *host_s2_zalloc_page(void *pool)
> +{
> +	return hyp_alloc_pages(pool, 0);
> +}
> +
> +static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool)
> +{
> +	unsigned long nr_pages, pfn;
> +	int ret;
> +
> +	pfn = hyp_virt_to_pfn(mem_pgt_pool);
> +	nr_pages = host_s2_mem_pgtable_pages();
> +	ret = hyp_pool_init(&host_s2_mem, pfn, nr_pages, 0);
> +	if (ret)
> +		return ret;
> +
> +	pfn = hyp_virt_to_pfn(dev_pgt_pool);
> +	nr_pages = host_s2_dev_pgtable_pages();
> +	ret = hyp_pool_init(&host_s2_dev, pfn, nr_pages, 0);
> +	if (ret)
> +		return ret;
> +
> +	host_kvm.mm_ops.zalloc_pages_exact = host_s2_zalloc_pages_exact;
> +	host_kvm.mm_ops.zalloc_page = host_s2_zalloc_page;
> +	host_kvm.mm_ops.phys_to_virt = hyp_phys_to_virt;
> +	host_kvm.mm_ops.virt_to_phys = hyp_virt_to_phys;
> +	host_kvm.mm_ops.page_count = hyp_page_count;
> +	host_kvm.mm_ops.get_page = hyp_get_page;
> +	host_kvm.mm_ops.put_page = hyp_put_page;

Same comment I had earlier on about struct initialisation -- I think you
can use the same trict here for the host_kvm.mm_ops.

> +
> +	return 0;
> +}
> +
> +static void prepare_host_vtcr(void)
> +{
> +	u32 parange, phys_shift;
> +	u64 mmfr0, mmfr1;
> +
> +	mmfr0 = arm64_ftr_reg_id_aa64mmfr0_el1.sys_val;
> +	mmfr1 = arm64_ftr_reg_id_aa64mmfr1_el1.sys_val;
> +
> +	/* The host stage 2 is id-mapped, so use parange for T0SZ */
> +	parange = kvm_get_parange(mmfr0);
> +	phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
> +
> +	host_kvm.arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
> +}
> +
> +int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool)
> +{
> +	struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
> +	int ret;
> +
> +	prepare_host_vtcr();
> +	hyp_spin_lock_init(&host_kvm.lock);
> +
> +	ret = prepare_s2_pools(mem_pgt_pool, dev_pgt_pool);
> +	if (ret)
> +		return ret;
> +
> +	ret = kvm_pgtable_stage2_init(&host_kvm.pgt, &host_kvm.arch,
> +				      &host_kvm.mm_ops);
> +	if (ret)
> +		return ret;
> +
> +	mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
> +	mmu->arch = &host_kvm.arch;
> +	mmu->pgt = &host_kvm.pgt;
> +	mmu->vmid.vmid_gen = 0;
> +	mmu->vmid.vmid = 0;
> +
> +	return 0;
> +}
> +
> +int __pkvm_prot_finalize(void)
> +{
> +	struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
> +	struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
> +
> +	params->vttbr = kvm_get_vttbr(mmu);
> +	params->vtcr = host_kvm.arch.vtcr;
> +	params->hcr_el2 |= HCR_VM;
> +	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
> +		params->hcr_el2 |= HCR_FWB;
> +	kvm_flush_dcache_to_poc(params, sizeof(*params));
> +
> +	write_sysreg(params->hcr_el2, hcr_el2);
> +	__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);

AFAICT, there's no ISB here. Do we need one before the TLB invalidation?

> +	__tlbi(vmalls12e1is);
> +	dsb(ish);

Given that the TLB is invalidated on the boot path, please can you add
a comment here about the stale entries which you need to invalidate?

Also, does this need to be inner-shareable? I thought this function ran on
each CPU.

> +	isb();
> +
> +	return 0;
> +}
> +
> +static void host_stage2_unmap_dev_all(void)
> +{
> +	struct kvm_pgtable *pgt = &host_kvm.pgt;
> +	struct memblock_region *reg;
> +	u64 addr = 0;
> +	int i;
> +
> +	/* Unmap all non-memory regions to recycle the pages */
> +	for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) {
> +		reg = &hyp_memory[i];
> +		kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr);
> +	}
> +	kvm_pgtable_stage2_unmap(pgt, addr, ULONG_MAX);

Is this just going to return -ERANGE?

> +static int host_stage2_idmap(u64 addr)
> +{
> +	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
> +	struct kvm_mem_range range;
> +	bool is_memory = find_mem_range(addr, &range);
> +	struct hyp_pool *pool = is_memory ? &host_s2_mem : &host_s2_dev;
> +	int ret;
> +
> +	if (is_memory)
> +		prot |= KVM_PGTABLE_PROT_X;
> +
> +	hyp_spin_lock(&host_kvm.lock);
> +	ret = kvm_pgtable_stage2_idmap_greedy(&host_kvm.pgt, addr, prot,
> +					      &range, pool);
> +	if (is_memory || ret != -ENOMEM)
> +		goto unlock;
> +	host_stage2_unmap_dev_all();
> +	ret = kvm_pgtable_stage2_idmap_greedy(&host_kvm.pgt, addr, prot,
> +					      &range, pool);

I find this _really_ hard to reason about, as range is passed by reference
and we don't reset it after the first call returns -ENOMEM for an MMIO
mapping. Maybe some commentary on why it's still valid here?

Other than that, this is looking really good to me.

Will
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm

WARNING: multiple messages have this Message-ID (diff)
From: Will Deacon <will@kernel.org>
To: Quentin Perret <qperret@google.com>
Cc: catalin.marinas@arm.com, maz@kernel.org, james.morse@arm.com,
	julien.thierry.kdev@gmail.com, suzuki.poulose@arm.com,
	android-kvm@google.com, linux-kernel@vger.kernel.org,
	kernel-team@android.com, kvmarm@lists.cs.columbia.edu,
	linux-arm-kernel@lists.infradead.org, tabba@google.com,
	mark.rutland@arm.com, dbrazdil@google.com, mate.toth-pal@arm.com,
	seanjc@google.com, robh+dt@kernel.org
Subject: Re: [PATCH v3 29/32] KVM: arm64: Wrap the host with a stage 2
Date: Fri, 5 Mar 2021 19:29:06 +0000	[thread overview]
Message-ID: <20210305192905.GE23633@willie-the-truck> (raw)
In-Reply-To: <20210302150002.3685113-30-qperret@google.com>

On Tue, Mar 02, 2021 at 02:59:59PM +0000, Quentin Perret wrote:
> When KVM runs in protected nVHE mode, make use of a stage 2 page-table
> to give the hypervisor some control over the host memory accesses. The
> host stage 2 is created lazily using large block mappings if possible,
> and will default to page mappings in absence of a better solution.
> 
> From this point on, memory accesses from the host to protected memory
> regions (e.g. marked PROT_NONE) are fatal and lead to hyp_panic().
> 
> Signed-off-by: Quentin Perret <qperret@google.com>
> ---
>  arch/arm64/include/asm/kvm_asm.h              |   1 +
>  arch/arm64/include/asm/kvm_cpufeature.h       |   2 +
>  arch/arm64/kernel/image-vars.h                |   3 +
>  arch/arm64/kvm/arm.c                          |  10 +
>  arch/arm64/kvm/hyp/include/nvhe/mem_protect.h |  34 +++
>  arch/arm64/kvm/hyp/nvhe/Makefile              |   2 +-
>  arch/arm64/kvm/hyp/nvhe/hyp-init.S            |   1 +
>  arch/arm64/kvm/hyp/nvhe/hyp-main.c            |  11 +
>  arch/arm64/kvm/hyp/nvhe/mem_protect.c         | 213 ++++++++++++++++++
>  arch/arm64/kvm/hyp/nvhe/setup.c               |   5 +
>  arch/arm64/kvm/hyp/nvhe/switch.c              |   7 +-
>  arch/arm64/kvm/hyp/nvhe/tlb.c                 |   4 +-
>  12 files changed, 286 insertions(+), 7 deletions(-)
>  create mode 100644 arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
>  create mode 100644 arch/arm64/kvm/hyp/nvhe/mem_protect.c

[...]

> diff --git a/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> new file mode 100644
> index 000000000000..d293cb328cc4
> --- /dev/null
> +++ b/arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
> @@ -0,0 +1,34 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + * Copyright (C) 2020 Google LLC
> + * Author: Quentin Perret <qperret@google.com>
> + */
> +
> +#ifndef __KVM_NVHE_MEM_PROTECT__
> +#define __KVM_NVHE_MEM_PROTECT__
> +#include <linux/kvm_host.h>
> +#include <asm/kvm_hyp.h>
> +#include <asm/kvm_pgtable.h>
> +#include <asm/virt.h>
> +#include <nvhe/spinlock.h>
> +
> +struct host_kvm {
> +	struct kvm_arch arch;
> +	struct kvm_pgtable pgt;
> +	struct kvm_pgtable_mm_ops mm_ops;
> +	hyp_spinlock_t lock;
> +};
> +extern struct host_kvm host_kvm;
> +
> +int __pkvm_prot_finalize(void);
> +int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool);
> +void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
> +
> +static __always_inline void __load_host_stage2(void)
> +{
> +	if (static_branch_likely(&kvm_protected_mode_initialized))
> +		__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
> +	else
> +		write_sysreg(0, vttbr_el2);

I realise you've just moved the code, but why is this needed? All we've
done is point the stage-2 ttb at 0x0 (i.e. we haven't disabled anything).

> +#endif /* __KVM_NVHE_MEM_PROTECT__ */
> diff --git a/arch/arm64/kvm/hyp/nvhe/Makefile b/arch/arm64/kvm/hyp/nvhe/Makefile
> index e204ea77ab27..ce49795324a7 100644
> --- a/arch/arm64/kvm/hyp/nvhe/Makefile
> +++ b/arch/arm64/kvm/hyp/nvhe/Makefile
> @@ -14,7 +14,7 @@ lib-objs := $(addprefix ../../../lib/, $(lib-objs))
>  
>  obj-y := timer-sr.o sysreg-sr.o debug-sr.o switch.o tlb.o hyp-init.o host.o \
>  	 hyp-main.o hyp-smp.o psci-relay.o early_alloc.o stub.o page_alloc.o \
> -	 cache.o cpufeature.o setup.o mm.o
> +	 cache.o cpufeature.o setup.o mm.o mem_protect.o
>  obj-y += ../vgic-v3-sr.o ../aarch32.o ../vgic-v2-cpuif-proxy.o ../entry.o \
>  	 ../fpsimd.o ../hyp-entry.o ../exception.o ../pgtable.o
>  obj-y += $(lib-objs)
> diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-init.S b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
> index f312672d895e..6fa01b04954f 100644
> --- a/arch/arm64/kvm/hyp/nvhe/hyp-init.S
> +++ b/arch/arm64/kvm/hyp/nvhe/hyp-init.S
> @@ -119,6 +119,7 @@ alternative_else_nop_endif
>  
>  	/* Invalidate the stale TLBs from Bootloader */
>  	tlbi	alle2
> +	tlbi	vmalls12e1
>  	dsb	sy
>  
>  	/*
> diff --git a/arch/arm64/kvm/hyp/nvhe/hyp-main.c b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> index ae6503c9be15..f47028d3fd0a 100644
> --- a/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> +++ b/arch/arm64/kvm/hyp/nvhe/hyp-main.c
> @@ -13,6 +13,7 @@
>  #include <asm/kvm_hyp.h>
>  #include <asm/kvm_mmu.h>
>  
> +#include <nvhe/mem_protect.h>
>  #include <nvhe/mm.h>
>  #include <nvhe/trap_handler.h>
>  
> @@ -151,6 +152,10 @@ static void handle___pkvm_create_private_mapping(struct kvm_cpu_context *host_ct
>  	cpu_reg(host_ctxt, 1) = __pkvm_create_private_mapping(phys, size, prot);
>  }
>  
> +static void handle___pkvm_prot_finalize(struct kvm_cpu_context *host_ctxt)
> +{
> +	cpu_reg(host_ctxt, 1) = __pkvm_prot_finalize();
> +}
>  typedef void (*hcall_t)(struct kvm_cpu_context *);
>  
>  #define HANDLE_FUNC(x)	[__KVM_HOST_SMCCC_FUNC_##x] = (hcall_t)handle_##x
> @@ -174,6 +179,7 @@ static const hcall_t host_hcall[] = {
>  	HANDLE_FUNC(__pkvm_cpu_set_vector),
>  	HANDLE_FUNC(__pkvm_create_mappings),
>  	HANDLE_FUNC(__pkvm_create_private_mapping),
> +	HANDLE_FUNC(__pkvm_prot_finalize),
>  };
>  
>  static void handle_host_hcall(struct kvm_cpu_context *host_ctxt)
> @@ -226,6 +232,11 @@ void handle_trap(struct kvm_cpu_context *host_ctxt)
>  	case ESR_ELx_EC_SMC64:
>  		handle_host_smc(host_ctxt);
>  		break;
> +	case ESR_ELx_EC_IABT_LOW:
> +		fallthrough;
> +	case ESR_ELx_EC_DABT_LOW:
> +		handle_host_mem_abort(host_ctxt);
> +		break;
>  	default:
>  		hyp_panic();
>  	}
> diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> new file mode 100644
> index 000000000000..2252ad1a8945
> --- /dev/null
> +++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
> @@ -0,0 +1,213 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +/*
> + * Copyright (C) 2020 Google LLC
> + * Author: Quentin Perret <qperret@google.com>
> + */
> +
> +#include <linux/kvm_host.h>
> +#include <asm/kvm_cpufeature.h>
> +#include <asm/kvm_emulate.h>
> +#include <asm/kvm_hyp.h>
> +#include <asm/kvm_mmu.h>
> +#include <asm/kvm_pgtable.h>
> +#include <asm/stage2_pgtable.h>
> +
> +#include <hyp/switch.h>
> +
> +#include <nvhe/gfp.h>
> +#include <nvhe/memory.h>
> +#include <nvhe/mem_protect.h>
> +#include <nvhe/mm.h>
> +
> +extern unsigned long hyp_nr_cpus;
> +struct host_kvm host_kvm;
> +
> +struct hyp_pool host_s2_mem;
> +struct hyp_pool host_s2_dev;
> +
> +static void *host_s2_zalloc_pages_exact(size_t size)
> +{
> +	return hyp_alloc_pages(&host_s2_mem, get_order(size));
> +}
> +
> +static void *host_s2_zalloc_page(void *pool)
> +{
> +	return hyp_alloc_pages(pool, 0);
> +}
> +
> +static int prepare_s2_pools(void *mem_pgt_pool, void *dev_pgt_pool)
> +{
> +	unsigned long nr_pages, pfn;
> +	int ret;
> +
> +	pfn = hyp_virt_to_pfn(mem_pgt_pool);
> +	nr_pages = host_s2_mem_pgtable_pages();
> +	ret = hyp_pool_init(&host_s2_mem, pfn, nr_pages, 0);
> +	if (ret)
> +		return ret;
> +
> +	pfn = hyp_virt_to_pfn(dev_pgt_pool);
> +	nr_pages = host_s2_dev_pgtable_pages();
> +	ret = hyp_pool_init(&host_s2_dev, pfn, nr_pages, 0);
> +	if (ret)
> +		return ret;
> +
> +	host_kvm.mm_ops.zalloc_pages_exact = host_s2_zalloc_pages_exact;
> +	host_kvm.mm_ops.zalloc_page = host_s2_zalloc_page;
> +	host_kvm.mm_ops.phys_to_virt = hyp_phys_to_virt;
> +	host_kvm.mm_ops.virt_to_phys = hyp_virt_to_phys;
> +	host_kvm.mm_ops.page_count = hyp_page_count;
> +	host_kvm.mm_ops.get_page = hyp_get_page;
> +	host_kvm.mm_ops.put_page = hyp_put_page;

Same comment I had earlier on about struct initialisation -- I think you
can use the same trict here for the host_kvm.mm_ops.

> +
> +	return 0;
> +}
> +
> +static void prepare_host_vtcr(void)
> +{
> +	u32 parange, phys_shift;
> +	u64 mmfr0, mmfr1;
> +
> +	mmfr0 = arm64_ftr_reg_id_aa64mmfr0_el1.sys_val;
> +	mmfr1 = arm64_ftr_reg_id_aa64mmfr1_el1.sys_val;
> +
> +	/* The host stage 2 is id-mapped, so use parange for T0SZ */
> +	parange = kvm_get_parange(mmfr0);
> +	phys_shift = id_aa64mmfr0_parange_to_phys_shift(parange);
> +
> +	host_kvm.arch.vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift);
> +}
> +
> +int kvm_host_prepare_stage2(void *mem_pgt_pool, void *dev_pgt_pool)
> +{
> +	struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
> +	int ret;
> +
> +	prepare_host_vtcr();
> +	hyp_spin_lock_init(&host_kvm.lock);
> +
> +	ret = prepare_s2_pools(mem_pgt_pool, dev_pgt_pool);
> +	if (ret)
> +		return ret;
> +
> +	ret = kvm_pgtable_stage2_init(&host_kvm.pgt, &host_kvm.arch,
> +				      &host_kvm.mm_ops);
> +	if (ret)
> +		return ret;
> +
> +	mmu->pgd_phys = __hyp_pa(host_kvm.pgt.pgd);
> +	mmu->arch = &host_kvm.arch;
> +	mmu->pgt = &host_kvm.pgt;
> +	mmu->vmid.vmid_gen = 0;
> +	mmu->vmid.vmid = 0;
> +
> +	return 0;
> +}
> +
> +int __pkvm_prot_finalize(void)
> +{
> +	struct kvm_s2_mmu *mmu = &host_kvm.arch.mmu;
> +	struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
> +
> +	params->vttbr = kvm_get_vttbr(mmu);
> +	params->vtcr = host_kvm.arch.vtcr;
> +	params->hcr_el2 |= HCR_VM;
> +	if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
> +		params->hcr_el2 |= HCR_FWB;
> +	kvm_flush_dcache_to_poc(params, sizeof(*params));
> +
> +	write_sysreg(params->hcr_el2, hcr_el2);
> +	__load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);

AFAICT, there's no ISB here. Do we need one before the TLB invalidation?

> +	__tlbi(vmalls12e1is);
> +	dsb(ish);

Given that the TLB is invalidated on the boot path, please can you add
a comment here about the stale entries which you need to invalidate?

Also, does this need to be inner-shareable? I thought this function ran on
each CPU.

> +	isb();
> +
> +	return 0;
> +}
> +
> +static void host_stage2_unmap_dev_all(void)
> +{
> +	struct kvm_pgtable *pgt = &host_kvm.pgt;
> +	struct memblock_region *reg;
> +	u64 addr = 0;
> +	int i;
> +
> +	/* Unmap all non-memory regions to recycle the pages */
> +	for (i = 0; i < hyp_memblock_nr; i++, addr = reg->base + reg->size) {
> +		reg = &hyp_memory[i];
> +		kvm_pgtable_stage2_unmap(pgt, addr, reg->base - addr);
> +	}
> +	kvm_pgtable_stage2_unmap(pgt, addr, ULONG_MAX);

Is this just going to return -ERANGE?

> +static int host_stage2_idmap(u64 addr)
> +{
> +	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R | KVM_PGTABLE_PROT_W;
> +	struct kvm_mem_range range;
> +	bool is_memory = find_mem_range(addr, &range);
> +	struct hyp_pool *pool = is_memory ? &host_s2_mem : &host_s2_dev;
> +	int ret;
> +
> +	if (is_memory)
> +		prot |= KVM_PGTABLE_PROT_X;
> +
> +	hyp_spin_lock(&host_kvm.lock);
> +	ret = kvm_pgtable_stage2_idmap_greedy(&host_kvm.pgt, addr, prot,
> +					      &range, pool);
> +	if (is_memory || ret != -ENOMEM)
> +		goto unlock;
> +	host_stage2_unmap_dev_all();
> +	ret = kvm_pgtable_stage2_idmap_greedy(&host_kvm.pgt, addr, prot,
> +					      &range, pool);

I find this _really_ hard to reason about, as range is passed by reference
and we don't reset it after the first call returns -ENOMEM for an MMIO
mapping. Maybe some commentary on why it's still valid here?

Other than that, this is looking really good to me.

Will

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  reply	other threads:[~2021-03-05 19:29 UTC|newest]

Thread overview: 192+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-03-02 14:59 [PATCH v3 00/32] KVM: arm64: A stage 2 for the host Quentin Perret
2021-03-02 14:59 ` Quentin Perret
2021-03-02 14:59 ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 01/32] arm64: lib: Annotate {clear,copy}_page() as position-independent Quentin Perret
2021-03-02 14:59   ` [PATCH v3 01/32] arm64: lib: Annotate {clear, copy}_page() " Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 02/32] KVM: arm64: Link position-independent string routines into .hyp.text Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 03/32] arm64: kvm: Add standalone ticket spinlock implementation for use at hyp Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 04/32] KVM: arm64: Initialize kvm_nvhe_init_params early Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-04 13:39   ` Will Deacon
2021-03-04 13:39     ` Will Deacon
2021-03-04 13:39     ` Will Deacon
2021-03-02 14:59 ` [PATCH v3 05/32] KVM: arm64: Avoid free_page() in page-table allocator Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 06/32] KVM: arm64: Factor memory allocation out of pgtable.c Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-04 14:06   ` Will Deacon
2021-03-04 14:06     ` Will Deacon
2021-03-04 14:06     ` Will Deacon
2021-03-02 14:59 ` [PATCH v3 07/32] KVM: arm64: Introduce a BSS section for use at Hyp Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-04 14:09   ` Will Deacon
2021-03-04 14:09     ` Will Deacon
2021-03-04 14:09     ` Will Deacon
2021-03-02 14:59 ` [PATCH v3 08/32] KVM: arm64: Make kvm_call_hyp() a function call " Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 09/32] KVM: arm64: Allow using kvm_nvhe_sym() in hyp code Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 10/32] KVM: arm64: Introduce an early Hyp page allocator Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-04 14:38   ` Will Deacon
2021-03-04 14:38     ` Will Deacon
2021-03-04 14:38     ` Will Deacon
2021-03-02 14:59 ` [PATCH v3 11/32] KVM: arm64: Stub CONFIG_DEBUG_LIST at Hyp Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 12/32] KVM: arm64: Introduce a Hyp buddy page allocator Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-04 15:30   ` Will Deacon
2021-03-04 15:30     ` Will Deacon
2021-03-04 15:30     ` Will Deacon
2021-03-04 15:49     ` Quentin Perret
2021-03-04 15:49       ` Quentin Perret
2021-03-04 15:49       ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 13/32] KVM: arm64: Enable access to sanitized CPU features at EL2 Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 14/32] KVM: arm64: Factor out vector address calculation Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 15/32] KVM: arm64: Prepare the creation of s1 mappings at EL2 Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-04 18:47   ` Will Deacon
2021-03-04 18:47     ` Will Deacon
2021-03-04 18:47     ` Will Deacon
2021-03-02 14:59 ` [PATCH v3 16/32] KVM: arm64: Elevate hypervisor mappings creation " Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-04 19:25   ` Will Deacon
2021-03-04 19:25     ` Will Deacon
2021-03-04 19:25     ` Will Deacon
2021-03-05  9:14     ` Quentin Perret
2021-03-05  9:14       ` Quentin Perret
2021-03-05  9:14       ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 17/32] KVM: arm64: Use kvm_arch for stage 2 pgtable Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 18/32] KVM: arm64: Use kvm_arch in kvm_s2_mmu Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 19/32] KVM: arm64: Set host stage 2 using kvm_nvhe_init_params Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 20/32] KVM: arm64: Refactor kvm_arm_setup_stage2() Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-04 19:35   ` Will Deacon
2021-03-04 19:35     ` Will Deacon
2021-03-04 19:35     ` Will Deacon
2021-03-02 14:59 ` [PATCH v3 21/32] KVM: arm64: Refactor __load_guest_stage2() Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 22/32] KVM: arm64: Refactor __populate_fault_info() Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-04 19:39   ` Will Deacon
2021-03-04 19:39     ` Will Deacon
2021-03-04 19:39     ` Will Deacon
2021-03-02 14:59 ` [PATCH v3 23/32] KVM: arm64: Make memcache anonymous in pgtable allocator Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-04 19:44   ` Will Deacon
2021-03-04 19:44     ` Will Deacon
2021-03-04 19:44     ` Will Deacon
2021-03-02 14:59 ` [PATCH v3 24/32] KVM: arm64: Reserve memory for host stage 2 Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-04 19:49   ` Will Deacon
2021-03-04 19:49     ` Will Deacon
2021-03-04 19:49     ` Will Deacon
2021-03-05  9:17     ` Quentin Perret
2021-03-05  9:17       ` Quentin Perret
2021-03-05  9:17       ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 25/32] KVM: arm64: Sort the hypervisor memblocks Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-04 19:51   ` Will Deacon
2021-03-04 19:51     ` Will Deacon
2021-03-04 19:51     ` Will Deacon
2021-03-02 14:59 ` [PATCH v3 26/32] KVM: arm64: Introduce PROT_NONE mappings for stage 2 Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-04 20:00   ` Will Deacon
2021-03-04 20:00     ` Will Deacon
2021-03-04 20:00     ` Will Deacon
2021-03-05  9:52     ` Quentin Perret
2021-03-05  9:52       ` Quentin Perret
2021-03-05  9:52       ` Quentin Perret
2021-03-05 19:03       ` Will Deacon
2021-03-05 19:03         ` Will Deacon
2021-03-05 19:03         ` Will Deacon
2021-03-02 14:59 ` [PATCH v3 27/32] KVM: arm64: Refactor stage2_map_set_prot_attr() Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-04 20:03   ` Will Deacon
2021-03-04 20:03     ` Will Deacon
2021-03-04 20:03     ` Will Deacon
2021-03-05  9:18     ` Quentin Perret
2021-03-05  9:18       ` Quentin Perret
2021-03-05  9:18       ` Quentin Perret
2021-03-02 14:59 ` [PATCH v3 28/32] KVM: arm64: Add kvm_pgtable_stage2_idmap_greedy() Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-05 14:39   ` Will Deacon
2021-03-05 14:39     ` Will Deacon
2021-03-05 14:39     ` Will Deacon
2021-03-05 15:03     ` Quentin Perret
2021-03-05 15:03       ` Quentin Perret
2021-03-05 15:03       ` Quentin Perret
2021-03-05 16:59       ` Will Deacon
2021-03-05 16:59         ` Will Deacon
2021-03-05 16:59         ` Will Deacon
2021-03-02 14:59 ` [PATCH v3 29/32] KVM: arm64: Wrap the host with a stage 2 Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-02 14:59   ` Quentin Perret
2021-03-05 19:29   ` Will Deacon [this message]
2021-03-05 19:29     ` Will Deacon
2021-03-05 19:29     ` Will Deacon
2021-03-08  9:22     ` Quentin Perret
2021-03-08  9:22       ` Quentin Perret
2021-03-08  9:22       ` Quentin Perret
2021-03-08 12:46       ` Will Deacon
2021-03-08 12:46         ` Will Deacon
2021-03-08 12:46         ` Will Deacon
2021-03-08 13:38         ` Quentin Perret
2021-03-08 13:38           ` Quentin Perret
2021-03-08 13:38           ` Quentin Perret
2021-03-08 13:52           ` Will Deacon
2021-03-08 13:52             ` Will Deacon
2021-03-08 13:52             ` Will Deacon
2021-03-02 15:00 ` [PATCH v3 30/32] KVM: arm64: Page-align the .hyp sections Quentin Perret
2021-03-02 15:00   ` Quentin Perret
2021-03-02 15:00   ` Quentin Perret
2021-03-04 20:05   ` Will Deacon
2021-03-04 20:05     ` Will Deacon
2021-03-04 20:05     ` Will Deacon
2021-03-02 15:00 ` [PATCH v3 31/32] KVM: arm64: Disable PMU support in protected mode Quentin Perret
2021-03-02 15:00   ` Quentin Perret
2021-03-02 15:00   ` Quentin Perret
2021-03-05 19:02   ` Will Deacon
2021-03-05 19:02     ` Will Deacon
2021-03-05 19:02     ` Will Deacon
2021-03-02 15:00 ` [PATCH v3 32/32] KVM: arm64: Protect the .hyp sections from the host Quentin Perret
2021-03-02 15:00   ` Quentin Perret
2021-03-02 15:00   ` Quentin Perret
2021-03-05 19:13   ` Will Deacon
2021-03-05 19:13     ` Will Deacon
2021-03-05 19:13     ` Will Deacon

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210305192905.GE23633@willie-the-truck \
    --to=will@kernel.org \
    --cc=android-kvm@google.com \
    --cc=catalin.marinas@arm.com \
    --cc=dbrazdil@google.com \
    --cc=james.morse@arm.com \
    --cc=julien.thierry.kdev@gmail.com \
    --cc=kernel-team@android.com \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mark.rutland@arm.com \
    --cc=mate.toth-pal@arm.com \
    --cc=maz@kernel.org \
    --cc=qperret@google.com \
    --cc=robh+dt@kernel.org \
    --cc=seanjc@google.com \
    --cc=suzuki.poulose@arm.com \
    --cc=tabba@google.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.