All of lore.kernel.org
 help / color / mirror / Atom feed
From: Steven Price <steven.price@arm.com>
To: kvm@vger.kernel.org, kvmarm@lists.linux.dev
Cc: Steven Price <steven.price@arm.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Marc Zyngier <maz@kernel.org>, Will Deacon <will@kernel.org>,
	James Morse <james.morse@arm.com>,
	Oliver Upton <oliver.upton@linux.dev>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Zenghui Yu <yuzenghui@huawei.com>,
	linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org, Joey Gouly <joey.gouly@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	Fuad Tabba <tabba@google.com>,
	linux-coco@lists.linux.dev,
	Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Subject: [PATCH v2 21/43] arm64: RME: Runtime faulting of memory
Date: Fri, 12 Apr 2024 09:42:47 +0100	[thread overview]
Message-ID: <20240412084309.1733783-22-steven.price@arm.com> (raw)
In-Reply-To: <20240412084309.1733783-1-steven.price@arm.com>

At runtime if the realm guest accesses memory which hasn't yet been
mapped then KVM needs to either populate the region or fault the guest.

For memory in the lower (protected) region of IPA a fresh page is
provided to the RMM which will zero the contents. For memory in the
upper (shared) region of IPA, the memory from the memslot is mapped
into the realm VM non secure.

Signed-off-by: Steven Price <steven.price@arm.com>
---
 arch/arm64/include/asm/kvm_emulate.h |  10 ++
 arch/arm64/include/asm/kvm_rme.h     |  10 ++
 arch/arm64/kvm/mmu.c                 | 119 +++++++++++++++-
 arch/arm64/kvm/rme.c                 | 199 ++++++++++++++++++++++++---
 4 files changed, 316 insertions(+), 22 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 2209a7c6267f..d40d998d9be2 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -629,6 +629,16 @@ static inline bool kvm_realm_is_created(struct kvm *kvm)
 	return kvm_is_realm(kvm) && kvm_realm_state(kvm) != REALM_STATE_NONE;
 }
 
+static inline gpa_t kvm_gpa_stolen_bits(struct kvm *kvm)
+{
+	if (kvm_is_realm(kvm)) {
+		struct realm *realm = &kvm->arch.realm;
+
+		return BIT(realm->ia_bits - 1);
+	}
+	return 0;
+}
+
 static inline bool vcpu_is_rec(struct kvm_vcpu *vcpu)
 {
 	if (static_branch_unlikely(&kvm_rme_is_available))
diff --git a/arch/arm64/include/asm/kvm_rme.h b/arch/arm64/include/asm/kvm_rme.h
index 749f2eb97bd4..48c7766fadeb 100644
--- a/arch/arm64/include/asm/kvm_rme.h
+++ b/arch/arm64/include/asm/kvm_rme.h
@@ -103,6 +103,16 @@ void kvm_realm_unmap_range(struct kvm *kvm,
 			   unsigned long ipa,
 			   u64 size,
 			   bool unmap_private);
+int realm_map_protected(struct realm *realm,
+			unsigned long base_ipa,
+			struct page *dst_page,
+			unsigned long map_size,
+			struct kvm_mmu_memory_cache *memcache);
+int realm_map_non_secure(struct realm *realm,
+			 unsigned long ipa,
+			 struct page *page,
+			 unsigned long map_size,
+			 struct kvm_mmu_memory_cache *memcache);
 int realm_set_ipa_state(struct kvm_vcpu *vcpu,
 			unsigned long addr, unsigned long end,
 			unsigned long ripas);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 8a7b5449697f..50a49e4e2020 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -325,8 +325,13 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64
 
 	lockdep_assert_held_write(&kvm->mmu_lock);
 	WARN_ON(size & ~PAGE_MASK);
-	WARN_ON(stage2_apply_range(mmu, start, end, kvm_pgtable_stage2_unmap,
-				   may_block));
+
+	if (kvm_is_realm(kvm))
+		kvm_realm_unmap_range(kvm, start, size, !only_shared);
+	else
+		WARN_ON(stage2_apply_range(mmu, start, end,
+					   kvm_pgtable_stage2_unmap,
+					   may_block));
 }
 
 static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
@@ -340,7 +345,11 @@ static void stage2_flush_memslot(struct kvm *kvm,
 	phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
 	phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
 
-	stage2_apply_range_resched(&kvm->arch.mmu, addr, end, kvm_pgtable_stage2_flush);
+	if (kvm_is_realm(kvm))
+		kvm_realm_unmap_range(kvm, addr, end - addr, false);
+	else
+		stage2_apply_range_resched(&kvm->arch.mmu, addr, end,
+					   kvm_pgtable_stage2_flush);
 }
 
 /**
@@ -997,6 +1006,10 @@ void stage2_unmap_vm(struct kvm *kvm)
 	struct kvm_memory_slot *memslot;
 	int idx, bkt;
 
+	/* For realms this is handled by the RMM so nothing to do here */
+	if (kvm_is_realm(kvm))
+		return;
+
 	idx = srcu_read_lock(&kvm->srcu);
 	mmap_read_lock(current->mm);
 	write_lock(&kvm->mmu_lock);
@@ -1020,6 +1033,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
 	if (kvm_is_realm(kvm) &&
 	    (kvm_realm_state(kvm) != REALM_STATE_DEAD &&
 	     kvm_realm_state(kvm) != REALM_STATE_NONE)) {
+		unmap_stage2_range(mmu, 0, (~0ULL) & PAGE_MASK);
 		write_unlock(&kvm->mmu_lock);
 		kvm_realm_destroy_rtts(kvm, pgt->ia_bits);
 		return;
@@ -1383,6 +1397,69 @@ static bool kvm_vma_mte_allowed(struct vm_area_struct *vma)
 	return vma->vm_flags & VM_MTE_ALLOWED;
 }
 
+static int realm_map_ipa(struct kvm *kvm, phys_addr_t ipa,
+			 kvm_pfn_t pfn, unsigned long map_size,
+			 enum kvm_pgtable_prot prot,
+			 struct kvm_mmu_memory_cache *memcache)
+{
+	struct realm *realm = &kvm->arch.realm;
+	struct page *page = pfn_to_page(pfn);
+
+	if (WARN_ON(!(prot & KVM_PGTABLE_PROT_W)))
+		return -EFAULT;
+
+	if (!realm_is_addr_protected(realm, ipa))
+		return realm_map_non_secure(realm, ipa, page, map_size,
+					    memcache);
+
+	return realm_map_protected(realm, ipa, page, map_size, memcache);
+}
+
+static int private_memslot_fault(struct kvm_vcpu *vcpu,
+				 phys_addr_t fault_ipa,
+				 struct kvm_memory_slot *memslot)
+{
+	struct kvm *kvm = vcpu->kvm;
+	gpa_t gpa_stolen_mask = kvm_gpa_stolen_bits(kvm);
+	gfn_t gfn = (fault_ipa & ~gpa_stolen_mask) >> PAGE_SHIFT;
+	bool is_priv_gfn = !((fault_ipa & gpa_stolen_mask) == gpa_stolen_mask);
+	bool priv_exists = kvm_mem_is_private(kvm, gfn);
+	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+	int order;
+	kvm_pfn_t pfn;
+	int ret;
+
+	if (priv_exists != is_priv_gfn) {
+		kvm_prepare_memory_fault_exit(vcpu,
+					      fault_ipa & ~gpa_stolen_mask,
+					      PAGE_SIZE,
+					      kvm_is_write_fault(vcpu),
+					      false, is_priv_gfn);
+
+		return 0;
+	}
+
+	if (!is_priv_gfn) {
+		/* Not a private mapping, handling normally */
+		return -EAGAIN;
+	}
+
+	if (kvm_gmem_get_pfn(kvm, memslot, gfn, &pfn, &order))
+		return 1; /* Retry */
+
+	ret = kvm_mmu_topup_memory_cache(memcache,
+					 kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu));
+	if (ret)
+		return ret;
+
+	/* FIXME: Should be able to use bigger than PAGE_SIZE mappings */
+	ret = realm_map_ipa(kvm, fault_ipa, pfn, PAGE_SIZE, KVM_PGTABLE_PROT_W,
+			     memcache);
+	if (!ret)
+		return 1; /* Handled */
+	return ret;
+}
+
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 			  struct kvm_memory_slot *memslot, unsigned long hva,
 			  bool fault_is_perm)
@@ -1402,10 +1479,19 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	long vma_pagesize, fault_granule;
 	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
 	struct kvm_pgtable *pgt;
+	gpa_t gpa_stolen_mask = kvm_gpa_stolen_bits(vcpu->kvm);
 
 	if (fault_is_perm)
 		fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu);
 	write_fault = kvm_is_write_fault(vcpu);
+
+	/*
+	 * Realms cannot map protected pages read-only
+	 * FIXME: It should be possible to map unprotected pages read-only
+	 */
+	if (vcpu_is_rec(vcpu))
+		write_fault = true;
+
 	exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
 	VM_BUG_ON(write_fault && exec_fault);
 
@@ -1478,7 +1564,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
 		fault_ipa &= ~(vma_pagesize - 1);
 
-	gfn = fault_ipa >> PAGE_SHIFT;
+	gfn = (fault_ipa & ~gpa_stolen_mask) >> PAGE_SHIFT;
 	mte_allowed = kvm_vma_mte_allowed(vma);
 
 	vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED;
@@ -1538,7 +1624,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	 * If we are not forced to use page mapping, check if we are
 	 * backed by a THP and thus use block mapping if possible.
 	 */
-	if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
+	/* FIXME: We shouldn't need to disable this for realms */
+	if (vma_pagesize == PAGE_SIZE && !(force_pte || device || kvm_is_realm(kvm))) {
 		if (fault_is_perm && fault_granule > PAGE_SIZE)
 			vma_pagesize = fault_granule;
 		else
@@ -1584,6 +1671,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	 */
 	if (fault_is_perm && vma_pagesize == fault_granule)
 		ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
+	else if (kvm_is_realm(kvm))
+		ret = realm_map_ipa(kvm, fault_ipa, pfn, vma_pagesize,
+				    prot, memcache);
 	else
 		ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
 					     __pfn_to_phys(pfn), prot,
@@ -1638,6 +1728,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 	struct kvm_memory_slot *memslot;
 	unsigned long hva;
 	bool is_iabt, write_fault, writable;
+	gpa_t gpa_stolen_mask = kvm_gpa_stolen_bits(vcpu->kvm);
 	gfn_t gfn;
 	int ret, idx;
 
@@ -1693,8 +1784,15 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 
 	idx = srcu_read_lock(&vcpu->kvm->srcu);
 
-	gfn = fault_ipa >> PAGE_SHIFT;
+	gfn = (fault_ipa & ~gpa_stolen_mask) >> PAGE_SHIFT;
 	memslot = gfn_to_memslot(vcpu->kvm, gfn);
+
+	if (kvm_slot_can_be_private(memslot)) {
+		ret = private_memslot_fault(vcpu, fault_ipa, memslot);
+		if (ret != -EAGAIN)
+			goto out;
+	}
+
 	hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
 	write_fault = kvm_is_write_fault(vcpu);
 	if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
@@ -1738,6 +1836,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 		 * of the page size.
 		 */
 		fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
+		fault_ipa &= ~gpa_stolen_mask;
 		ret = io_mem_abort(vcpu, fault_ipa);
 		goto out_unlock;
 	}
@@ -1819,6 +1918,10 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 	if (!kvm->arch.mmu.pgt)
 		return false;
 
+	/* We don't support aging for Realms */
+	if (kvm_is_realm(kvm))
+		return true;
+
 	return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
 						   range->start << PAGE_SHIFT,
 						   size, true);
@@ -1831,6 +1934,10 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 	if (!kvm->arch.mmu.pgt)
 		return false;
 
+	/* We don't support aging for Realms */
+	if (kvm_is_realm(kvm))
+		return true;
+
 	return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
 						   range->start << PAGE_SHIFT,
 						   size, false);
diff --git a/arch/arm64/kvm/rme.c b/arch/arm64/kvm/rme.c
index 4aab507f896e..72f6f5f542c4 100644
--- a/arch/arm64/kvm/rme.c
+++ b/arch/arm64/kvm/rme.c
@@ -606,6 +606,170 @@ static int fold_rtt(struct realm *realm, unsigned long addr, int level)
 	return 0;
 }
 
+int realm_map_protected(struct realm *realm,
+			unsigned long base_ipa,
+			struct page *dst_page,
+			unsigned long map_size,
+			struct kvm_mmu_memory_cache *memcache)
+{
+	phys_addr_t dst_phys = page_to_phys(dst_page);
+	phys_addr_t rd = virt_to_phys(realm->rd);
+	unsigned long phys = dst_phys;
+	unsigned long ipa = base_ipa;
+	unsigned long size;
+	int map_level;
+	int ret = 0;
+
+	if (WARN_ON(!IS_ALIGNED(ipa, map_size)))
+		return -EINVAL;
+
+	switch (map_size) {
+	case PAGE_SIZE:
+		map_level = 3;
+		break;
+	case RME_L2_BLOCK_SIZE:
+		map_level = 2;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (map_level < RME_RTT_MAX_LEVEL) {
+		/*
+		 * A temporary RTT is needed during the map, precreate it,
+		 * however if there is an error (e.g. missing parent tables)
+		 * this will be handled below.
+		 */
+		realm_create_rtt_levels(realm, ipa, map_level,
+					RME_RTT_MAX_LEVEL, memcache);
+	}
+
+	for (size = 0; size < map_size; size += PAGE_SIZE) {
+		if (rmi_granule_delegate(phys)) {
+			struct rtt_entry rtt;
+
+			/*
+			 * It's possible we raced with another VCPU on the same
+			 * fault. If the entry exists and matches then exit
+			 * early and assume the other VCPU will handle the
+			 * mapping.
+			 */
+			if (rmi_rtt_read_entry(rd, ipa, RME_RTT_MAX_LEVEL, &rtt))
+				goto err;
+
+			// FIXME: For a block mapping this could race at level
+			// 2 or 3...
+			if (WARN_ON((rtt.walk_level != RME_RTT_MAX_LEVEL ||
+				     rtt.state != RMI_ASSIGNED ||
+				     rtt.desc != phys))) {
+				goto err;
+			}
+
+			return 0;
+		}
+
+		ret = rmi_data_create_unknown(rd, phys, ipa);
+
+		if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) {
+			/* Create missing RTTs and retry */
+			int level = RMI_RETURN_INDEX(ret);
+
+			ret = realm_create_rtt_levels(realm, ipa, level,
+						      RME_RTT_MAX_LEVEL,
+						      memcache);
+			WARN_ON(ret);
+			if (ret)
+				goto err_undelegate;
+
+			ret = rmi_data_create_unknown(rd, phys, ipa);
+		}
+		WARN_ON(ret);
+
+		if (ret)
+			goto err_undelegate;
+
+		phys += PAGE_SIZE;
+		ipa += PAGE_SIZE;
+	}
+
+	if (map_size == RME_L2_BLOCK_SIZE)
+		ret = fold_rtt(realm, base_ipa, map_level);
+	if (WARN_ON(ret))
+		goto err;
+
+	return 0;
+
+err_undelegate:
+	if (WARN_ON(rmi_granule_undelegate(phys))) {
+		/* Page can't be returned to NS world so is lost */
+		get_page(phys_to_page(phys));
+	}
+err:
+	while (size > 0) {
+		unsigned long data, top;
+
+		phys -= PAGE_SIZE;
+		size -= PAGE_SIZE;
+		ipa -= PAGE_SIZE;
+
+		WARN_ON(rmi_data_destroy(rd, ipa, &data, &top));
+
+		if (WARN_ON(rmi_granule_undelegate(phys))) {
+			/* Page can't be returned to NS world so is lost */
+			get_page(phys_to_page(phys));
+		}
+	}
+	return -ENXIO;
+}
+
+int realm_map_non_secure(struct realm *realm,
+			 unsigned long ipa,
+			 struct page *page,
+			 unsigned long map_size,
+			 struct kvm_mmu_memory_cache *memcache)
+{
+	phys_addr_t rd = virt_to_phys(realm->rd);
+	int map_level;
+	int ret = 0;
+	unsigned long desc = page_to_phys(page) |
+			     PTE_S2_MEMATTR(MT_S2_FWB_NORMAL) |
+			     /* FIXME: Read+Write permissions for now */
+			     (3 << 6) |
+			     PTE_SHARED;
+
+	if (WARN_ON(!IS_ALIGNED(ipa, map_size)))
+		return -EINVAL;
+
+	switch (map_size) {
+	case PAGE_SIZE:
+		map_level = 3;
+		break;
+	case RME_L2_BLOCK_SIZE:
+		map_level = 2;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = rmi_rtt_map_unprotected(rd, ipa, map_level, desc);
+
+	if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) {
+		/* Create missing RTTs and retry */
+		int level = RMI_RETURN_INDEX(ret);
+
+		ret = realm_create_rtt_levels(realm, ipa, level, map_level,
+					      memcache);
+		if (WARN_ON(ret))
+			return -ENXIO;
+
+		ret = rmi_rtt_map_unprotected(rd, ipa, map_level, desc);
+	}
+	if (WARN_ON(ret))
+		return -ENXIO;
+
+	return 0;
+}
+
 static int populate_par_region(struct kvm *kvm,
 			       phys_addr_t ipa_base,
 			       phys_addr_t ipa_end,
@@ -617,7 +781,6 @@ static int populate_par_region(struct kvm *kvm,
 	int idx;
 	phys_addr_t ipa;
 	int ret = 0;
-	struct page *tmp_page;
 	unsigned long data_flags = 0;
 
 	base_gfn = gpa_to_gfn(ipa_base);
@@ -639,9 +802,8 @@ static int populate_par_region(struct kvm *kvm,
 		goto out;
 	}
 
-	tmp_page = alloc_page(GFP_KERNEL);
-	if (!tmp_page) {
-		ret = -ENOMEM;
+	if (!kvm_slot_can_be_private(memslot)) {
+		ret = -EINVAL;
 		goto out;
 	}
 
@@ -714,31 +876,36 @@ static int populate_par_region(struct kvm *kvm,
 		for (offset = 0; offset < map_size && !ret;
 		     offset += PAGE_SIZE, page++) {
 			phys_addr_t page_ipa = ipa + offset;
+			kvm_pfn_t priv_pfn;
+			int order;
 
-			ret = realm_create_protected_data_page(realm, page_ipa,
-							       page, tmp_page,
-							       data_flags);
+			ret = kvm_gmem_get_pfn(kvm, memslot,
+					       page_ipa >> PAGE_SHIFT,
+					       &priv_pfn, &order);
+			if (ret)
+				break;
+
+			ret = realm_create_protected_data_page(
+					realm, page_ipa,
+					pfn_to_page(priv_pfn),
+					page, data_flags);
 		}
+
+		kvm_release_pfn_clean(pfn);
+
 		if (ret)
-			goto err_release_pfn;
+			break;
 
 		if (level == 2) {
 			ret = fold_rtt(realm, ipa, level);
 			if (ret)
-				goto err_release_pfn;
+				break;
 		}
 
 		ipa += map_size;
-		kvm_release_pfn_dirty(pfn);
-err_release_pfn:
-		if (ret) {
-			kvm_release_pfn_clean(pfn);
-			break;
-		}
 	}
 
 	mmap_read_unlock(current->mm);
-	__free_page(tmp_page);
 
 out:
 	srcu_read_unlock(&kvm->srcu, idx);
-- 
2.34.1


WARNING: multiple messages have this Message-ID (diff)
From: Steven Price <steven.price@arm.com>
To: kvm@vger.kernel.org, kvmarm@lists.linux.dev
Cc: Steven Price <steven.price@arm.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Marc Zyngier <maz@kernel.org>, Will Deacon <will@kernel.org>,
	James Morse <james.morse@arm.com>,
	Oliver Upton <oliver.upton@linux.dev>,
	Suzuki K Poulose <suzuki.poulose@arm.com>,
	Zenghui Yu <yuzenghui@huawei.com>,
	linux-arm-kernel@lists.infradead.org,
	linux-kernel@vger.kernel.org, Joey Gouly <joey.gouly@arm.com>,
	Alexandru Elisei <alexandru.elisei@arm.com>,
	Christoffer Dall <christoffer.dall@arm.com>,
	Fuad Tabba <tabba@google.com>,
	linux-coco@lists.linux.dev,
	Ganapatrao Kulkarni <gankulkarni@os.amperecomputing.com>
Subject: [PATCH v2 21/43] arm64: RME: Runtime faulting of memory
Date: Fri, 12 Apr 2024 09:42:47 +0100	[thread overview]
Message-ID: <20240412084309.1733783-22-steven.price@arm.com> (raw)
In-Reply-To: <20240412084309.1733783-1-steven.price@arm.com>

At runtime if the realm guest accesses memory which hasn't yet been
mapped then KVM needs to either populate the region or fault the guest.

For memory in the lower (protected) region of IPA a fresh page is
provided to the RMM which will zero the contents. For memory in the
upper (shared) region of IPA, the memory from the memslot is mapped
into the realm VM non secure.

Signed-off-by: Steven Price <steven.price@arm.com>
---
 arch/arm64/include/asm/kvm_emulate.h |  10 ++
 arch/arm64/include/asm/kvm_rme.h     |  10 ++
 arch/arm64/kvm/mmu.c                 | 119 +++++++++++++++-
 arch/arm64/kvm/rme.c                 | 199 ++++++++++++++++++++++++---
 4 files changed, 316 insertions(+), 22 deletions(-)

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 2209a7c6267f..d40d998d9be2 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -629,6 +629,16 @@ static inline bool kvm_realm_is_created(struct kvm *kvm)
 	return kvm_is_realm(kvm) && kvm_realm_state(kvm) != REALM_STATE_NONE;
 }
 
+static inline gpa_t kvm_gpa_stolen_bits(struct kvm *kvm)
+{
+	if (kvm_is_realm(kvm)) {
+		struct realm *realm = &kvm->arch.realm;
+
+		return BIT(realm->ia_bits - 1);
+	}
+	return 0;
+}
+
 static inline bool vcpu_is_rec(struct kvm_vcpu *vcpu)
 {
 	if (static_branch_unlikely(&kvm_rme_is_available))
diff --git a/arch/arm64/include/asm/kvm_rme.h b/arch/arm64/include/asm/kvm_rme.h
index 749f2eb97bd4..48c7766fadeb 100644
--- a/arch/arm64/include/asm/kvm_rme.h
+++ b/arch/arm64/include/asm/kvm_rme.h
@@ -103,6 +103,16 @@ void kvm_realm_unmap_range(struct kvm *kvm,
 			   unsigned long ipa,
 			   u64 size,
 			   bool unmap_private);
+int realm_map_protected(struct realm *realm,
+			unsigned long base_ipa,
+			struct page *dst_page,
+			unsigned long map_size,
+			struct kvm_mmu_memory_cache *memcache);
+int realm_map_non_secure(struct realm *realm,
+			 unsigned long ipa,
+			 struct page *page,
+			 unsigned long map_size,
+			 struct kvm_mmu_memory_cache *memcache);
 int realm_set_ipa_state(struct kvm_vcpu *vcpu,
 			unsigned long addr, unsigned long end,
 			unsigned long ripas);
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 8a7b5449697f..50a49e4e2020 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -325,8 +325,13 @@ static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64
 
 	lockdep_assert_held_write(&kvm->mmu_lock);
 	WARN_ON(size & ~PAGE_MASK);
-	WARN_ON(stage2_apply_range(mmu, start, end, kvm_pgtable_stage2_unmap,
-				   may_block));
+
+	if (kvm_is_realm(kvm))
+		kvm_realm_unmap_range(kvm, start, size, !only_shared);
+	else
+		WARN_ON(stage2_apply_range(mmu, start, end,
+					   kvm_pgtable_stage2_unmap,
+					   may_block));
 }
 
 static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
@@ -340,7 +345,11 @@ static void stage2_flush_memslot(struct kvm *kvm,
 	phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT;
 	phys_addr_t end = addr + PAGE_SIZE * memslot->npages;
 
-	stage2_apply_range_resched(&kvm->arch.mmu, addr, end, kvm_pgtable_stage2_flush);
+	if (kvm_is_realm(kvm))
+		kvm_realm_unmap_range(kvm, addr, end - addr, false);
+	else
+		stage2_apply_range_resched(&kvm->arch.mmu, addr, end,
+					   kvm_pgtable_stage2_flush);
 }
 
 /**
@@ -997,6 +1006,10 @@ void stage2_unmap_vm(struct kvm *kvm)
 	struct kvm_memory_slot *memslot;
 	int idx, bkt;
 
+	/* For realms this is handled by the RMM so nothing to do here */
+	if (kvm_is_realm(kvm))
+		return;
+
 	idx = srcu_read_lock(&kvm->srcu);
 	mmap_read_lock(current->mm);
 	write_lock(&kvm->mmu_lock);
@@ -1020,6 +1033,7 @@ void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu)
 	if (kvm_is_realm(kvm) &&
 	    (kvm_realm_state(kvm) != REALM_STATE_DEAD &&
 	     kvm_realm_state(kvm) != REALM_STATE_NONE)) {
+		unmap_stage2_range(mmu, 0, (~0ULL) & PAGE_MASK);
 		write_unlock(&kvm->mmu_lock);
 		kvm_realm_destroy_rtts(kvm, pgt->ia_bits);
 		return;
@@ -1383,6 +1397,69 @@ static bool kvm_vma_mte_allowed(struct vm_area_struct *vma)
 	return vma->vm_flags & VM_MTE_ALLOWED;
 }
 
+static int realm_map_ipa(struct kvm *kvm, phys_addr_t ipa,
+			 kvm_pfn_t pfn, unsigned long map_size,
+			 enum kvm_pgtable_prot prot,
+			 struct kvm_mmu_memory_cache *memcache)
+{
+	struct realm *realm = &kvm->arch.realm;
+	struct page *page = pfn_to_page(pfn);
+
+	if (WARN_ON(!(prot & KVM_PGTABLE_PROT_W)))
+		return -EFAULT;
+
+	if (!realm_is_addr_protected(realm, ipa))
+		return realm_map_non_secure(realm, ipa, page, map_size,
+					    memcache);
+
+	return realm_map_protected(realm, ipa, page, map_size, memcache);
+}
+
+static int private_memslot_fault(struct kvm_vcpu *vcpu,
+				 phys_addr_t fault_ipa,
+				 struct kvm_memory_slot *memslot)
+{
+	struct kvm *kvm = vcpu->kvm;
+	gpa_t gpa_stolen_mask = kvm_gpa_stolen_bits(kvm);
+	gfn_t gfn = (fault_ipa & ~gpa_stolen_mask) >> PAGE_SHIFT;
+	bool is_priv_gfn = !((fault_ipa & gpa_stolen_mask) == gpa_stolen_mask);
+	bool priv_exists = kvm_mem_is_private(kvm, gfn);
+	struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
+	int order;
+	kvm_pfn_t pfn;
+	int ret;
+
+	if (priv_exists != is_priv_gfn) {
+		kvm_prepare_memory_fault_exit(vcpu,
+					      fault_ipa & ~gpa_stolen_mask,
+					      PAGE_SIZE,
+					      kvm_is_write_fault(vcpu),
+					      false, is_priv_gfn);
+
+		return 0;
+	}
+
+	if (!is_priv_gfn) {
+		/* Not a private mapping, handling normally */
+		return -EAGAIN;
+	}
+
+	if (kvm_gmem_get_pfn(kvm, memslot, gfn, &pfn, &order))
+		return 1; /* Retry */
+
+	ret = kvm_mmu_topup_memory_cache(memcache,
+					 kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu));
+	if (ret)
+		return ret;
+
+	/* FIXME: Should be able to use bigger than PAGE_SIZE mappings */
+	ret = realm_map_ipa(kvm, fault_ipa, pfn, PAGE_SIZE, KVM_PGTABLE_PROT_W,
+			     memcache);
+	if (!ret)
+		return 1; /* Handled */
+	return ret;
+}
+
 static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 			  struct kvm_memory_slot *memslot, unsigned long hva,
 			  bool fault_is_perm)
@@ -1402,10 +1479,19 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	long vma_pagesize, fault_granule;
 	enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R;
 	struct kvm_pgtable *pgt;
+	gpa_t gpa_stolen_mask = kvm_gpa_stolen_bits(vcpu->kvm);
 
 	if (fault_is_perm)
 		fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu);
 	write_fault = kvm_is_write_fault(vcpu);
+
+	/*
+	 * Realms cannot map protected pages read-only
+	 * FIXME: It should be possible to map unprotected pages read-only
+	 */
+	if (vcpu_is_rec(vcpu))
+		write_fault = true;
+
 	exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu);
 	VM_BUG_ON(write_fault && exec_fault);
 
@@ -1478,7 +1564,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
 		fault_ipa &= ~(vma_pagesize - 1);
 
-	gfn = fault_ipa >> PAGE_SHIFT;
+	gfn = (fault_ipa & ~gpa_stolen_mask) >> PAGE_SHIFT;
 	mte_allowed = kvm_vma_mte_allowed(vma);
 
 	vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED;
@@ -1538,7 +1624,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	 * If we are not forced to use page mapping, check if we are
 	 * backed by a THP and thus use block mapping if possible.
 	 */
-	if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) {
+	/* FIXME: We shouldn't need to disable this for realms */
+	if (vma_pagesize == PAGE_SIZE && !(force_pte || device || kvm_is_realm(kvm))) {
 		if (fault_is_perm && fault_granule > PAGE_SIZE)
 			vma_pagesize = fault_granule;
 		else
@@ -1584,6 +1671,9 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
 	 */
 	if (fault_is_perm && vma_pagesize == fault_granule)
 		ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot);
+	else if (kvm_is_realm(kvm))
+		ret = realm_map_ipa(kvm, fault_ipa, pfn, vma_pagesize,
+				    prot, memcache);
 	else
 		ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize,
 					     __pfn_to_phys(pfn), prot,
@@ -1638,6 +1728,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 	struct kvm_memory_slot *memslot;
 	unsigned long hva;
 	bool is_iabt, write_fault, writable;
+	gpa_t gpa_stolen_mask = kvm_gpa_stolen_bits(vcpu->kvm);
 	gfn_t gfn;
 	int ret, idx;
 
@@ -1693,8 +1784,15 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 
 	idx = srcu_read_lock(&vcpu->kvm->srcu);
 
-	gfn = fault_ipa >> PAGE_SHIFT;
+	gfn = (fault_ipa & ~gpa_stolen_mask) >> PAGE_SHIFT;
 	memslot = gfn_to_memslot(vcpu->kvm, gfn);
+
+	if (kvm_slot_can_be_private(memslot)) {
+		ret = private_memslot_fault(vcpu, fault_ipa, memslot);
+		if (ret != -EAGAIN)
+			goto out;
+	}
+
 	hva = gfn_to_hva_memslot_prot(memslot, gfn, &writable);
 	write_fault = kvm_is_write_fault(vcpu);
 	if (kvm_is_error_hva(hva) || (write_fault && !writable)) {
@@ -1738,6 +1836,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu)
 		 * of the page size.
 		 */
 		fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
+		fault_ipa &= ~gpa_stolen_mask;
 		ret = io_mem_abort(vcpu, fault_ipa);
 		goto out_unlock;
 	}
@@ -1819,6 +1918,10 @@ bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 	if (!kvm->arch.mmu.pgt)
 		return false;
 
+	/* We don't support aging for Realms */
+	if (kvm_is_realm(kvm))
+		return true;
+
 	return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
 						   range->start << PAGE_SHIFT,
 						   size, true);
@@ -1831,6 +1934,10 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
 	if (!kvm->arch.mmu.pgt)
 		return false;
 
+	/* We don't support aging for Realms */
+	if (kvm_is_realm(kvm))
+		return true;
+
 	return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt,
 						   range->start << PAGE_SHIFT,
 						   size, false);
diff --git a/arch/arm64/kvm/rme.c b/arch/arm64/kvm/rme.c
index 4aab507f896e..72f6f5f542c4 100644
--- a/arch/arm64/kvm/rme.c
+++ b/arch/arm64/kvm/rme.c
@@ -606,6 +606,170 @@ static int fold_rtt(struct realm *realm, unsigned long addr, int level)
 	return 0;
 }
 
+int realm_map_protected(struct realm *realm,
+			unsigned long base_ipa,
+			struct page *dst_page,
+			unsigned long map_size,
+			struct kvm_mmu_memory_cache *memcache)
+{
+	phys_addr_t dst_phys = page_to_phys(dst_page);
+	phys_addr_t rd = virt_to_phys(realm->rd);
+	unsigned long phys = dst_phys;
+	unsigned long ipa = base_ipa;
+	unsigned long size;
+	int map_level;
+	int ret = 0;
+
+	if (WARN_ON(!IS_ALIGNED(ipa, map_size)))
+		return -EINVAL;
+
+	switch (map_size) {
+	case PAGE_SIZE:
+		map_level = 3;
+		break;
+	case RME_L2_BLOCK_SIZE:
+		map_level = 2;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	if (map_level < RME_RTT_MAX_LEVEL) {
+		/*
+		 * A temporary RTT is needed during the map, precreate it,
+		 * however if there is an error (e.g. missing parent tables)
+		 * this will be handled below.
+		 */
+		realm_create_rtt_levels(realm, ipa, map_level,
+					RME_RTT_MAX_LEVEL, memcache);
+	}
+
+	for (size = 0; size < map_size; size += PAGE_SIZE) {
+		if (rmi_granule_delegate(phys)) {
+			struct rtt_entry rtt;
+
+			/*
+			 * It's possible we raced with another VCPU on the same
+			 * fault. If the entry exists and matches then exit
+			 * early and assume the other VCPU will handle the
+			 * mapping.
+			 */
+			if (rmi_rtt_read_entry(rd, ipa, RME_RTT_MAX_LEVEL, &rtt))
+				goto err;
+
+			// FIXME: For a block mapping this could race at level
+			// 2 or 3...
+			if (WARN_ON((rtt.walk_level != RME_RTT_MAX_LEVEL ||
+				     rtt.state != RMI_ASSIGNED ||
+				     rtt.desc != phys))) {
+				goto err;
+			}
+
+			return 0;
+		}
+
+		ret = rmi_data_create_unknown(rd, phys, ipa);
+
+		if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) {
+			/* Create missing RTTs and retry */
+			int level = RMI_RETURN_INDEX(ret);
+
+			ret = realm_create_rtt_levels(realm, ipa, level,
+						      RME_RTT_MAX_LEVEL,
+						      memcache);
+			WARN_ON(ret);
+			if (ret)
+				goto err_undelegate;
+
+			ret = rmi_data_create_unknown(rd, phys, ipa);
+		}
+		WARN_ON(ret);
+
+		if (ret)
+			goto err_undelegate;
+
+		phys += PAGE_SIZE;
+		ipa += PAGE_SIZE;
+	}
+
+	if (map_size == RME_L2_BLOCK_SIZE)
+		ret = fold_rtt(realm, base_ipa, map_level);
+	if (WARN_ON(ret))
+		goto err;
+
+	return 0;
+
+err_undelegate:
+	if (WARN_ON(rmi_granule_undelegate(phys))) {
+		/* Page can't be returned to NS world so is lost */
+		get_page(phys_to_page(phys));
+	}
+err:
+	while (size > 0) {
+		unsigned long data, top;
+
+		phys -= PAGE_SIZE;
+		size -= PAGE_SIZE;
+		ipa -= PAGE_SIZE;
+
+		WARN_ON(rmi_data_destroy(rd, ipa, &data, &top));
+
+		if (WARN_ON(rmi_granule_undelegate(phys))) {
+			/* Page can't be returned to NS world so is lost */
+			get_page(phys_to_page(phys));
+		}
+	}
+	return -ENXIO;
+}
+
+int realm_map_non_secure(struct realm *realm,
+			 unsigned long ipa,
+			 struct page *page,
+			 unsigned long map_size,
+			 struct kvm_mmu_memory_cache *memcache)
+{
+	phys_addr_t rd = virt_to_phys(realm->rd);
+	int map_level;
+	int ret = 0;
+	unsigned long desc = page_to_phys(page) |
+			     PTE_S2_MEMATTR(MT_S2_FWB_NORMAL) |
+			     /* FIXME: Read+Write permissions for now */
+			     (3 << 6) |
+			     PTE_SHARED;
+
+	if (WARN_ON(!IS_ALIGNED(ipa, map_size)))
+		return -EINVAL;
+
+	switch (map_size) {
+	case PAGE_SIZE:
+		map_level = 3;
+		break;
+	case RME_L2_BLOCK_SIZE:
+		map_level = 2;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ret = rmi_rtt_map_unprotected(rd, ipa, map_level, desc);
+
+	if (RMI_RETURN_STATUS(ret) == RMI_ERROR_RTT) {
+		/* Create missing RTTs and retry */
+		int level = RMI_RETURN_INDEX(ret);
+
+		ret = realm_create_rtt_levels(realm, ipa, level, map_level,
+					      memcache);
+		if (WARN_ON(ret))
+			return -ENXIO;
+
+		ret = rmi_rtt_map_unprotected(rd, ipa, map_level, desc);
+	}
+	if (WARN_ON(ret))
+		return -ENXIO;
+
+	return 0;
+}
+
 static int populate_par_region(struct kvm *kvm,
 			       phys_addr_t ipa_base,
 			       phys_addr_t ipa_end,
@@ -617,7 +781,6 @@ static int populate_par_region(struct kvm *kvm,
 	int idx;
 	phys_addr_t ipa;
 	int ret = 0;
-	struct page *tmp_page;
 	unsigned long data_flags = 0;
 
 	base_gfn = gpa_to_gfn(ipa_base);
@@ -639,9 +802,8 @@ static int populate_par_region(struct kvm *kvm,
 		goto out;
 	}
 
-	tmp_page = alloc_page(GFP_KERNEL);
-	if (!tmp_page) {
-		ret = -ENOMEM;
+	if (!kvm_slot_can_be_private(memslot)) {
+		ret = -EINVAL;
 		goto out;
 	}
 
@@ -714,31 +876,36 @@ static int populate_par_region(struct kvm *kvm,
 		for (offset = 0; offset < map_size && !ret;
 		     offset += PAGE_SIZE, page++) {
 			phys_addr_t page_ipa = ipa + offset;
+			kvm_pfn_t priv_pfn;
+			int order;
 
-			ret = realm_create_protected_data_page(realm, page_ipa,
-							       page, tmp_page,
-							       data_flags);
+			ret = kvm_gmem_get_pfn(kvm, memslot,
+					       page_ipa >> PAGE_SHIFT,
+					       &priv_pfn, &order);
+			if (ret)
+				break;
+
+			ret = realm_create_protected_data_page(
+					realm, page_ipa,
+					pfn_to_page(priv_pfn),
+					page, data_flags);
 		}
+
+		kvm_release_pfn_clean(pfn);
+
 		if (ret)
-			goto err_release_pfn;
+			break;
 
 		if (level == 2) {
 			ret = fold_rtt(realm, ipa, level);
 			if (ret)
-				goto err_release_pfn;
+				break;
 		}
 
 		ipa += map_size;
-		kvm_release_pfn_dirty(pfn);
-err_release_pfn:
-		if (ret) {
-			kvm_release_pfn_clean(pfn);
-			break;
-		}
 	}
 
 	mmap_read_unlock(current->mm);
-	__free_page(tmp_page);
 
 out:
 	srcu_read_unlock(&kvm->srcu, idx);
-- 
2.34.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

  parent reply	other threads:[~2024-04-12  8:44 UTC|newest]

Thread overview: 256+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-04-12  8:40 [v2] Support for Arm CCA VMs on Linux Steven Price
2024-04-12  8:40 ` Steven Price
2024-04-11 18:54 ` Itaru Kitayama
2024-04-11 18:54   ` Itaru Kitayama
2024-04-15  8:14   ` Steven Price
2024-04-15  8:14     ` Steven Price
2024-04-12  8:41 ` [PATCH v2 00/14] arm64: Support for running as a guest in Arm CCA Steven Price
2024-04-12  8:41   ` Steven Price
2024-04-12  8:42   ` [PATCH v2 01/14] arm64: rsi: Add RSI definitions Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 02/14] arm64: Detect if in a realm and set RIPAS RAM Steven Price
2024-04-12  8:42     ` Steven Price
2024-05-10 17:35     ` Catalin Marinas
2024-05-10 17:35       ` Catalin Marinas
2024-05-14 10:18       ` Suzuki K Poulose
2024-05-14 10:18         ` Suzuki K Poulose
2024-05-16 14:32         ` Catalin Marinas
2024-05-16 14:32           ` Catalin Marinas
2024-05-15 15:03       ` Steven Price
2024-05-15 15:03         ` Steven Price
2024-04-12  8:42   ` [PATCH v2 03/14] arm64: realm: Query IPA size from the RMM Steven Price
2024-04-12  8:42     ` Steven Price
2024-05-13 14:03     ` Catalin Marinas
2024-05-13 14:03       ` Catalin Marinas
2024-05-16 15:13       ` Steven Price
2024-05-16 15:13         ` Steven Price
2024-04-12  8:42   ` [PATCH v2 04/14] arm64: Mark all I/O as non-secure shared Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 05/14] fixmap: Allow architecture overriding set_fixmap_io Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 06/14] arm64: Override set_fixmap_io Steven Price
2024-04-12  8:42     ` Steven Price
2024-05-13 16:14     ` Catalin Marinas
2024-05-13 16:14       ` Catalin Marinas
2024-05-14 10:21       ` Suzuki K Poulose
2024-05-14 10:21         ` Suzuki K Poulose
2024-04-12  8:42   ` [PATCH v2 07/14] arm64: Make the PHYS_MASK_SHIFT dynamic Steven Price
2024-04-12  8:42     ` Steven Price
2024-05-13 16:38     ` Catalin Marinas
2024-05-13 16:38       ` Catalin Marinas
2024-05-16 15:34       ` Steven Price
2024-05-16 15:34         ` Steven Price
2024-04-12  8:42   ` [PATCH v2 08/14] arm64: Enforce bounce buffers for realm DMA Steven Price
2024-04-12  8:42     ` Steven Price
2024-05-13 16:56     ` Catalin Marinas
2024-05-13 16:56       ` Catalin Marinas
2024-04-12  8:42   ` [PATCH v2 09/14] arm64: Enable memory encrypt for Realms Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-15  3:13     ` kernel test robot
2024-04-15  3:13       ` kernel test robot
2024-04-25 13:42       ` Suzuki K Poulose
2024-04-25 13:42         ` Suzuki K Poulose
2024-04-25 15:52         ` Steven Price
2024-04-25 15:52           ` Steven Price
2024-04-25 16:29         ` Suzuki K Poulose
2024-04-25 16:29           ` Suzuki K Poulose
2024-04-25 18:16           ` Emanuele Rocca
2024-04-25 18:16             ` Emanuele Rocca
2024-05-14 18:00     ` Catalin Marinas
2024-05-14 18:00       ` Catalin Marinas
2024-05-15 10:47       ` Suzuki K Poulose
2024-05-15 10:47         ` Suzuki K Poulose
2024-05-16  7:48         ` Catalin Marinas
2024-05-16  7:48           ` Catalin Marinas
2024-05-16  9:06           ` Suzuki K Poulose
2024-05-16  9:06             ` Suzuki K Poulose
2024-05-20 16:53         ` Catalin Marinas
2024-05-20 16:53           ` Catalin Marinas
2024-05-20 20:32           ` Michael Kelley
2024-05-20 20:32             ` Michael Kelley
2024-05-21 10:14             ` Catalin Marinas
2024-05-21 10:14               ` Catalin Marinas
2024-05-21 15:58               ` Michael Kelley
2024-05-21 15:58                 ` Michael Kelley
2024-04-12  8:42   ` [PATCH v2 10/14] arm64: Force device mappings to be non-secure shared Steven Price
2024-04-12  8:42     ` Steven Price
2024-05-15  9:01     ` Catalin Marinas
2024-05-15  9:01       ` Catalin Marinas
2024-05-15 11:00       ` Suzuki K Poulose
2024-05-15 11:00         ` Suzuki K Poulose
2024-05-17  9:34         ` Catalin Marinas
2024-05-17  9:34           ` Catalin Marinas
2024-04-12  8:42   ` [PATCH v2 11/14] efi: arm64: Map Device with Prot Shared Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 12/14] arm64: realm: Support nonsecure ITS emulation shared Steven Price
2024-04-12  8:42     ` Steven Price
2024-05-15 11:01     ` Catalin Marinas
2024-05-15 11:01       ` Catalin Marinas
2024-04-12  8:42   ` [PATCH v2 13/14] arm64: rsi: Interfaces to query attestation token Steven Price
2024-04-12  8:42     ` Steven Price
2024-05-15 11:10     ` Catalin Marinas
2024-05-15 11:10       ` Catalin Marinas
2024-04-12  8:42   ` [PATCH v2 14/14] virt: arm-cca-guest: TSM_REPORT support for realms Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-24 13:06     ` Thomas Fossati
2024-04-24 13:06       ` Thomas Fossati
2024-04-24 13:27       ` Suzuki K Poulose
2024-04-24 13:27         ` Suzuki K Poulose
2024-04-24 13:19     ` Suzuki K Poulose
2024-04-24 13:19       ` Suzuki K Poulose
2024-04-12  8:42 ` [PATCH v2 00/43] arm64: Support for Arm CCA in KVM Steven Price
2024-04-12  8:42   ` Steven Price
2024-04-12  8:42   ` [PATCH v2 01/43] KVM: Prepare for handling only shared mappings in mmu_notifier events Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-25  9:48     ` Fuad Tabba
2024-04-25  9:48       ` Fuad Tabba
2024-04-25 15:58       ` Steven Price
2024-04-25 15:58         ` Steven Price
2024-04-25 22:56         ` Sean Christopherson
2024-04-25 22:56           ` Sean Christopherson
2024-04-12  8:42   ` [PATCH v2 02/43] kvm: arm64: pgtable: Track the number of pages in the entry level Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 03/43] kvm: arm64: Include kvm_emulate.h in kvm/arm_psci.h Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 04/43] arm64: RME: Handle Granule Protection Faults (GPFs) Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-16 11:17     ` Suzuki K Poulose
2024-04-16 11:17       ` Suzuki K Poulose
2024-04-18 13:17       ` Steven Price
2024-04-18 13:17         ` Steven Price
2024-04-12  8:42   ` [PATCH v2 05/43] arm64: RME: Add SMC definitions for calling the RMM Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-16 12:38     ` Suzuki K Poulose
2024-04-16 12:38       ` Suzuki K Poulose
2024-04-18 13:17       ` Steven Price
2024-04-18 13:17         ` Steven Price
2024-04-12  8:42   ` [PATCH v2 06/43] arm64: RME: Add wrappers for RMI calls Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-16 13:14     ` Suzuki K Poulose
2024-04-16 13:14       ` Suzuki K Poulose
2024-04-19 11:18       ` Steven Price
2024-04-19 11:18         ` Steven Price
2024-04-12  8:42   ` [PATCH v2 07/43] arm64: RME: Check for RME support at KVM init Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-16 13:30     ` Suzuki K Poulose
2024-04-16 13:30       ` Suzuki K Poulose
2024-04-22 15:39       ` Steven Price
2024-04-22 15:39         ` Steven Price
2024-04-12  8:42   ` [PATCH v2 08/43] arm64: RME: Define the user ABI Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 09/43] arm64: RME: ioctls to create and configure realms Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-17  9:51     ` Suzuki K Poulose
2024-04-17  9:51       ` Suzuki K Poulose
2024-04-22 16:33       ` Steven Price
2024-04-22 16:33         ` Steven Price
2024-04-18 16:04     ` Suzuki K Poulose
2024-04-18 16:04       ` Suzuki K Poulose
2024-04-12  8:42   ` [PATCH v2 10/43] kvm: arm64: Expose debug HW register numbers for Realm Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 11/43] arm64: kvm: Allow passing machine type in KVM creation Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-17 10:20     ` Suzuki K Poulose
2024-04-17 10:20       ` Suzuki K Poulose
2024-04-12  8:42   ` [PATCH v2 12/43] arm64: RME: Keep a spare page delegated to the RMM Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-17 10:19     ` Suzuki K Poulose
2024-04-17 10:19       ` Suzuki K Poulose
2024-04-12  8:42   ` [PATCH v2 13/43] arm64: RME: RTT handling Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-17 13:37     ` Suzuki K Poulose
2024-04-17 13:37       ` Suzuki K Poulose
2024-04-24 10:59       ` Steven Price
2024-04-24 10:59         ` Steven Price
2024-04-12  8:42   ` [PATCH v2 14/43] arm64: RME: Allocate/free RECs to match vCPUs Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-18  9:23     ` Suzuki K Poulose
2024-04-18  9:23       ` Suzuki K Poulose
2024-04-12  8:42   ` [PATCH v2 15/43] arm64: RME: Support for the VGIC in realms Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 16/43] KVM: arm64: Support timers in realm RECs Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-18  9:30     ` Suzuki K Poulose
2024-04-18  9:30       ` Suzuki K Poulose
2024-04-12  8:42   ` [PATCH v2 17/43] arm64: RME: Allow VMM to set RIPAS Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-19  9:34     ` Suzuki K Poulose
2024-04-19  9:34       ` Suzuki K Poulose
2024-04-19 10:20       ` Suzuki K Poulose
2024-04-19 10:20         ` Suzuki K Poulose
2024-05-01 15:47       ` Steven Price
2024-05-01 15:47         ` Steven Price
2024-05-02 10:16         ` Suzuki K Poulose
2024-05-02 10:16           ` Suzuki K Poulose
2024-04-25  9:53     ` Fuad Tabba
2024-04-25  9:53       ` Fuad Tabba
2024-05-01 14:27     ` Jean-Philippe Brucker
2024-05-01 14:27       ` Jean-Philippe Brucker
2024-05-01 14:56       ` Suzuki K Poulose
2024-05-01 14:56         ` Suzuki K Poulose
2024-04-12  8:42   ` [PATCH v2 18/43] arm64: RME: Handle realm enter/exit Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-19 13:00     ` Suzuki K Poulose
2024-04-19 13:00       ` Suzuki K Poulose
2024-04-12  8:42   ` [PATCH v2 19/43] KVM: arm64: Handle realm MMIO emulation Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 20/43] arm64: RME: Allow populating initial contents Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-19 13:17     ` Suzuki K Poulose
2024-04-19 13:17       ` Suzuki K Poulose
2024-04-12  8:42   ` Steven Price [this message]
2024-04-12  8:42     ` [PATCH v2 21/43] arm64: RME: Runtime faulting of memory Steven Price
2024-04-25 10:43     ` Fuad Tabba
2024-04-25 10:43       ` Fuad Tabba
2024-04-12  8:42   ` [PATCH v2 22/43] KVM: arm64: Handle realm VCPU load Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 23/43] KVM: arm64: Validate register access for a Realm VM Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 24/43] KVM: arm64: Handle Realm PSCI requests Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 25/43] KVM: arm64: WARN on injected undef exceptions Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 26/43] arm64: Don't expose stolen time for realm guests Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 27/43] arm64: rme: allow userspace to inject aborts Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 28/43] arm64: rme: support RSI_HOST_CALL Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 29/43] arm64: rme: Allow checking SVE on VM instance Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 30/43] arm64: RME: Always use 4k pages for realms Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 31/43] arm64: rme: Prevent Device mappings for Realms Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 32/43] arm_pmu: Provide a mechanism for disabling the physical IRQ Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-12  8:42   ` [PATCH v2 33/43] arm64: rme: Enable PMU support with a realm guest Steven Price
2024-04-12  8:42     ` Steven Price
2024-04-13 23:44     ` kernel test robot
2024-04-13 23:44       ` kernel test robot
2024-04-18 16:06       ` Suzuki K Poulose
2024-04-18 16:06         ` Suzuki K Poulose
2024-04-12  8:43   ` [PATCH v2 34/43] kvm: rme: Hide KVM_CAP_READONLY_MEM for realm guests Steven Price
2024-04-12  8:43     ` Steven Price
2024-04-12  8:43   ` [PATCH v2 35/43] arm64: RME: Propagate number of breakpoints and watchpoints to userspace Steven Price
2024-04-12  8:43     ` Steven Price
2024-04-12  8:43   ` [PATCH v2 36/43] arm64: RME: Set breakpoint parameters through SET_ONE_REG Steven Price
2024-04-12  8:43     ` Steven Price
2024-04-12  8:43   ` [PATCH v2 37/43] arm64: RME: Initialize PMCR.N with number counter supported by RMM Steven Price
2024-04-12  8:43     ` Steven Price
2024-04-12  8:43   ` [PATCH v2 38/43] arm64: RME: Propagate max SVE vector length from RMM Steven Price
2024-04-12  8:43     ` Steven Price
2024-04-12  8:43   ` [PATCH v2 39/43] arm64: RME: Configure max SVE vector length for a Realm Steven Price
2024-04-12  8:43     ` Steven Price
2024-04-12  8:43   ` [PATCH v2 40/43] arm64: RME: Provide register list for unfinalized RME RECs Steven Price
2024-04-12  8:43     ` Steven Price
2024-04-12  8:43   ` [PATCH v2 41/43] arm64: RME: Provide accurate register list Steven Price
2024-04-12  8:43     ` Steven Price
2024-04-12  8:43   ` [PATCH v2 42/43] arm64: kvm: Expose support for private memory Steven Price
2024-04-12  8:43     ` Steven Price
2024-04-25 14:44     ` Fuad Tabba
2024-04-25 14:44       ` Fuad Tabba
2024-04-12  8:43   ` [PATCH v2 43/43] KVM: arm64: Allow activating realms Steven Price
2024-04-12  8:43     ` Steven Price
2024-04-12 16:52 ` [v2] Support for Arm CCA VMs on Linux Jean-Philippe Brucker
2024-04-12 16:52   ` Jean-Philippe Brucker

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20240412084309.1733783-22-steven.price@arm.com \
    --to=steven.price@arm.com \
    --cc=alexandru.elisei@arm.com \
    --cc=catalin.marinas@arm.com \
    --cc=christoffer.dall@arm.com \
    --cc=gankulkarni@os.amperecomputing.com \
    --cc=james.morse@arm.com \
    --cc=joey.gouly@arm.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.linux.dev \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-coco@lists.linux.dev \
    --cc=linux-kernel@vger.kernel.org \
    --cc=maz@kernel.org \
    --cc=oliver.upton@linux.dev \
    --cc=suzuki.poulose@arm.com \
    --cc=tabba@google.com \
    --cc=will@kernel.org \
    --cc=yuzenghui@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.