From: Will Deacon <will@kernel.org>
To: kvmarm@lists.linux.dev
Cc: "Will Deacon" <will@kernel.org>,
"Sean Christopherson" <seanjc@google.com>,
"Vincent Donnefort" <vdonnefort@google.com>,
"Alexandru Elisei" <alexandru.elisei@arm.com>,
"Catalin Marinas" <catalin.marinas@arm.com>,
"Philippe Mathieu-Daudé" <philmd@linaro.org>,
"James Morse" <james.morse@arm.com>,
"Chao Peng" <chao.p.peng@linux.intel.com>,
"Quentin Perret" <qperret@google.com>,
"Suzuki K Poulose" <suzuki.poulose@arm.com>,
"Mark Rutland" <mark.rutland@arm.com>,
"Fuad Tabba" <tabba@google.com>,
"Oliver Upton" <oliver.upton@linux.dev>,
"Marc Zyngier" <maz@kernel.org>,
kernel-team@android.com, kvm@vger.kernel.org,
linux-arm-kernel@lists.infradead.org
Subject: [PATCH v6 25/26] KVM: arm64: Clean out the odd handling of completer_addr
Date: Thu, 10 Nov 2022 19:02:58 +0000 [thread overview]
Message-ID: <20221110190259.26861-26-will@kernel.org> (raw)
In-Reply-To: <20221110190259.26861-1-will@kernel.org>
From: Oliver Upton <oliver.upton@linux.dev>
The layout of struct pkvm_mem_transition is a bit weird; the destination
address for the transition is actually stashed in the initiator address
context. Even weirder so, that address is thrown inside a union and
return from helpers by use of an out pointer.
Rip out the whole mess and move the destination address into the
destination context sub-struct. No functional change intended.
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
[will: Extended to include host/hyp donation paths]
Signed-off-by: Will Deacon <will@kernel.org>
---
arch/arm64/kvm/hyp/nvhe/mem_protect.c | 130 ++++++++++----------------
1 file changed, 48 insertions(+), 82 deletions(-)
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 94cd48f7850e..dfeddaf2a462 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -534,20 +534,12 @@ struct pkvm_mem_transition {
enum pkvm_component_id id;
/* Address in the initiator's address space */
u64 addr;
-
- union {
- struct {
- /* Address in the completer's address space */
- u64 completer_addr;
- } host;
- struct {
- u64 completer_addr;
- } hyp;
- };
} initiator;
struct {
enum pkvm_component_id id;
+ /* Address in the completer's address space */
+ u64 addr;
} completer;
};
@@ -619,53 +611,43 @@ static int __host_set_page_state_range(u64 addr, u64 size,
return host_stage2_idmap_locked(addr, size, prot);
}
-static int host_request_owned_transition(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
+static int host_request_owned_transition(const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
- *completer_addr = tx->initiator.host.completer_addr;
return __host_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
}
-static int host_request_unshare(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
+static int host_request_unshare(const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
- *completer_addr = tx->initiator.host.completer_addr;
return __host_check_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
}
-static int host_initiate_share(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
+static int host_initiate_share(const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
- *completer_addr = tx->initiator.host.completer_addr;
return __host_set_page_state_range(addr, size, PKVM_PAGE_SHARED_OWNED);
}
-static int host_initiate_unshare(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
+static int host_initiate_unshare(const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
- *completer_addr = tx->initiator.host.completer_addr;
return __host_set_page_state_range(addr, size, PKVM_PAGE_OWNED);
}
-static int host_initiate_donation(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
+static int host_initiate_donation(const struct pkvm_mem_transition *tx)
{
u8 owner_id = tx->completer.id;
u64 size = tx->nr_pages * PAGE_SIZE;
- *completer_addr = tx->initiator.host.completer_addr;
return host_stage2_set_owner_locked(tx->initiator.addr, size, owner_id);
}
@@ -686,17 +668,17 @@ static int __host_ack_transition(u64 addr, const struct pkvm_mem_transition *tx,
return __host_check_page_state_range(addr, size, state);
}
-static int host_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
+static int host_ack_donation(const struct pkvm_mem_transition *tx)
{
- return __host_ack_transition(addr, tx, PKVM_NOPAGE);
+ return __host_ack_transition(tx->completer.addr, tx, PKVM_NOPAGE);
}
-static int host_complete_donation(u64 addr, const struct pkvm_mem_transition *tx)
+static int host_complete_donation(const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u8 host_id = tx->completer.id;
- return host_stage2_set_owner_locked(addr, size, host_id);
+ return host_stage2_set_owner_locked(tx->completer.addr, size, host_id);
}
static enum pkvm_page_state hyp_get_page_state(kvm_pte_t pte)
@@ -719,23 +701,19 @@ static int __hyp_check_page_state_range(u64 addr, u64 size,
return check_page_state_range(&pkvm_pgtable, addr, size, &d);
}
-static int hyp_request_donation(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
+static int hyp_request_donation(const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
u64 addr = tx->initiator.addr;
- *completer_addr = tx->initiator.hyp.completer_addr;
return __hyp_check_page_state_range(addr, size, PKVM_PAGE_OWNED);
}
-static int hyp_initiate_donation(u64 *completer_addr,
- const struct pkvm_mem_transition *tx)
+static int hyp_initiate_donation(const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
int ret;
- *completer_addr = tx->initiator.hyp.completer_addr;
ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, tx->initiator.addr, size);
return (ret != size) ? -EFAULT : 0;
}
@@ -746,7 +724,7 @@ static bool __hyp_ack_skip_pgtable_check(const struct pkvm_mem_transition *tx)
tx->initiator.id != PKVM_ID_HOST);
}
-static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
+static int hyp_ack_share(const struct pkvm_mem_transition *tx,
enum kvm_pgtable_prot perms)
{
u64 size = tx->nr_pages * PAGE_SIZE;
@@ -757,12 +735,12 @@ static int hyp_ack_share(u64 addr, const struct pkvm_mem_transition *tx,
if (__hyp_ack_skip_pgtable_check(tx))
return 0;
- return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
+ return __hyp_check_page_state_range(tx->completer.addr, size, PKVM_NOPAGE);
}
-static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
+static int hyp_ack_unshare(const struct pkvm_mem_transition *tx)
{
- u64 size = tx->nr_pages * PAGE_SIZE;
+ u64 size = tx->nr_pages * PAGE_SIZE, addr = tx->completer.addr;
if (tx->initiator.id == PKVM_ID_HOST && hyp_page_count((void *)addr))
return -EBUSY;
@@ -774,38 +752,40 @@ static int hyp_ack_unshare(u64 addr, const struct pkvm_mem_transition *tx)
PKVM_PAGE_SHARED_BORROWED);
}
-static int hyp_ack_donation(u64 addr, const struct pkvm_mem_transition *tx)
+static int hyp_ack_donation(const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
if (__hyp_ack_skip_pgtable_check(tx))
return 0;
- return __hyp_check_page_state_range(addr, size, PKVM_NOPAGE);
+ return __hyp_check_page_state_range(tx->completer.addr, size,
+ PKVM_NOPAGE);
}
-static int hyp_complete_share(u64 addr, const struct pkvm_mem_transition *tx,
+static int hyp_complete_share(const struct pkvm_mem_transition *tx,
enum kvm_pgtable_prot perms)
{
- void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
+ void *start = (void *)tx->completer.addr;
+ void *end = start + (tx->nr_pages * PAGE_SIZE);
enum kvm_pgtable_prot prot;
prot = pkvm_mkstate(perms, PKVM_PAGE_SHARED_BORROWED);
return pkvm_create_mappings_locked(start, end, prot);
}
-static int hyp_complete_unshare(u64 addr, const struct pkvm_mem_transition *tx)
+static int hyp_complete_unshare(const struct pkvm_mem_transition *tx)
{
u64 size = tx->nr_pages * PAGE_SIZE;
- int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, addr, size);
+ int ret = kvm_pgtable_hyp_unmap(&pkvm_pgtable, tx->completer.addr, size);
return (ret != size) ? -EFAULT : 0;
}
-static int hyp_complete_donation(u64 addr,
- const struct pkvm_mem_transition *tx)
+static int hyp_complete_donation(const struct pkvm_mem_transition *tx)
{
- void *start = (void *)addr, *end = start + (tx->nr_pages * PAGE_SIZE);
+ void *start = (void *)tx->completer.addr;
+ void *end = start + (tx->nr_pages * PAGE_SIZE);
enum kvm_pgtable_prot prot = pkvm_mkstate(PAGE_HYP, PKVM_PAGE_OWNED);
return pkvm_create_mappings_locked(start, end, prot);
@@ -814,12 +794,11 @@ static int hyp_complete_donation(u64 addr,
static int check_share(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
- u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
- ret = host_request_owned_transition(&completer_addr, tx);
+ ret = host_request_owned_transition(tx);
break;
default:
ret = -EINVAL;
@@ -830,7 +809,7 @@ static int check_share(struct pkvm_mem_share *share)
switch (tx->completer.id) {
case PKVM_ID_HYP:
- ret = hyp_ack_share(completer_addr, tx, share->completer_prot);
+ ret = hyp_ack_share(tx, share->completer_prot);
break;
default:
ret = -EINVAL;
@@ -842,12 +821,11 @@ static int check_share(struct pkvm_mem_share *share)
static int __do_share(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
- u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
- ret = host_initiate_share(&completer_addr, tx);
+ ret = host_initiate_share(tx);
break;
default:
ret = -EINVAL;
@@ -858,7 +836,7 @@ static int __do_share(struct pkvm_mem_share *share)
switch (tx->completer.id) {
case PKVM_ID_HYP:
- ret = hyp_complete_share(completer_addr, tx, share->completer_prot);
+ ret = hyp_complete_share(tx, share->completer_prot);
break;
default:
ret = -EINVAL;
@@ -890,12 +868,11 @@ static int do_share(struct pkvm_mem_share *share)
static int check_unshare(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
- u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
- ret = host_request_unshare(&completer_addr, tx);
+ ret = host_request_unshare(tx);
break;
default:
ret = -EINVAL;
@@ -906,7 +883,7 @@ static int check_unshare(struct pkvm_mem_share *share)
switch (tx->completer.id) {
case PKVM_ID_HYP:
- ret = hyp_ack_unshare(completer_addr, tx);
+ ret = hyp_ack_unshare(tx);
break;
default:
ret = -EINVAL;
@@ -918,12 +895,11 @@ static int check_unshare(struct pkvm_mem_share *share)
static int __do_unshare(struct pkvm_mem_share *share)
{
const struct pkvm_mem_transition *tx = &share->tx;
- u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
- ret = host_initiate_unshare(&completer_addr, tx);
+ ret = host_initiate_unshare(tx);
break;
default:
ret = -EINVAL;
@@ -934,7 +910,7 @@ static int __do_unshare(struct pkvm_mem_share *share)
switch (tx->completer.id) {
case PKVM_ID_HYP:
- ret = hyp_complete_unshare(completer_addr, tx);
+ ret = hyp_complete_unshare(tx);
break;
default:
ret = -EINVAL;
@@ -966,15 +942,14 @@ static int do_unshare(struct pkvm_mem_share *share)
static int check_donation(struct pkvm_mem_donation *donation)
{
const struct pkvm_mem_transition *tx = &donation->tx;
- u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
- ret = host_request_owned_transition(&completer_addr, tx);
+ ret = host_request_owned_transition(tx);
break;
case PKVM_ID_HYP:
- ret = hyp_request_donation(&completer_addr, tx);
+ ret = hyp_request_donation(tx);
break;
default:
ret = -EINVAL;
@@ -985,10 +960,10 @@ static int check_donation(struct pkvm_mem_donation *donation)
switch (tx->completer.id) {
case PKVM_ID_HOST:
- ret = host_ack_donation(completer_addr, tx);
+ ret = host_ack_donation(tx);
break;
case PKVM_ID_HYP:
- ret = hyp_ack_donation(completer_addr, tx);
+ ret = hyp_ack_donation(tx);
break;
default:
ret = -EINVAL;
@@ -1000,15 +975,14 @@ static int check_donation(struct pkvm_mem_donation *donation)
static int __do_donate(struct pkvm_mem_donation *donation)
{
const struct pkvm_mem_transition *tx = &donation->tx;
- u64 completer_addr;
int ret;
switch (tx->initiator.id) {
case PKVM_ID_HOST:
- ret = host_initiate_donation(&completer_addr, tx);
+ ret = host_initiate_donation(tx);
break;
case PKVM_ID_HYP:
- ret = hyp_initiate_donation(&completer_addr, tx);
+ ret = hyp_initiate_donation(tx);
break;
default:
ret = -EINVAL;
@@ -1019,10 +993,10 @@ static int __do_donate(struct pkvm_mem_donation *donation)
switch (tx->completer.id) {
case PKVM_ID_HOST:
- ret = host_complete_donation(completer_addr, tx);
+ ret = host_complete_donation(tx);
break;
case PKVM_ID_HYP:
- ret = hyp_complete_donation(completer_addr, tx);
+ ret = hyp_complete_donation(tx);
break;
default:
ret = -EINVAL;
@@ -1062,12 +1036,10 @@ int __pkvm_host_share_hyp(u64 pfn)
.initiator = {
.id = PKVM_ID_HOST,
.addr = host_addr,
- .host = {
- .completer_addr = hyp_addr,
- },
},
.completer = {
.id = PKVM_ID_HYP,
+ .addr = hyp_addr,
},
},
.completer_prot = PAGE_HYP,
@@ -1095,12 +1067,10 @@ int __pkvm_host_unshare_hyp(u64 pfn)
.initiator = {
.id = PKVM_ID_HOST,
.addr = host_addr,
- .host = {
- .completer_addr = hyp_addr,
- },
},
.completer = {
.id = PKVM_ID_HYP,
+ .addr = hyp_addr,
},
},
.completer_prot = PAGE_HYP,
@@ -1128,12 +1098,10 @@ int __pkvm_host_donate_hyp(u64 pfn, u64 nr_pages)
.initiator = {
.id = PKVM_ID_HOST,
.addr = host_addr,
- .host = {
- .completer_addr = hyp_addr,
- },
},
.completer = {
.id = PKVM_ID_HYP,
+ .addr = hyp_addr,
},
},
};
@@ -1160,12 +1128,10 @@ int __pkvm_hyp_donate_host(u64 pfn, u64 nr_pages)
.initiator = {
.id = PKVM_ID_HYP,
.addr = hyp_addr,
- .hyp = {
- .completer_addr = host_addr,
- },
},
.completer = {
.id = PKVM_ID_HOST,
+ .addr = host_addr,
},
},
};
--
2.38.1.431.g37b22c650d-goog
next prev parent reply other threads:[~2022-11-10 19:05 UTC|newest]
Thread overview: 35+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-11-10 19:02 [PATCH v6 00/26] KVM: arm64: Introduce pKVM hyp VM and vCPU state at EL2 Will Deacon
2022-11-10 19:02 ` [PATCH v6 01/26] KVM: arm64: Move hyp refcount manipulation helpers to common header file Will Deacon
2022-11-10 19:02 ` [PATCH v6 02/26] KVM: arm64: Allow attaching of non-coalescable pages to a hyp pool Will Deacon
2022-11-10 19:02 ` [PATCH v6 03/26] KVM: arm64: Back the hypervisor 'struct hyp_page' array for all memory Will Deacon
2022-11-10 19:02 ` [PATCH v6 04/26] KVM: arm64: Fix-up hyp stage-1 refcounts for all pages mapped at EL2 Will Deacon
2022-11-10 19:02 ` [PATCH v6 05/26] KVM: arm64: Unify identifiers used to distinguish host and hypervisor Will Deacon
2022-11-10 19:02 ` [PATCH v6 06/26] KVM: arm64: Implement do_donate() helper for donating memory Will Deacon
2022-11-10 19:02 ` [PATCH v6 07/26] KVM: arm64: Prevent the donation of no-map pages Will Deacon
2022-11-10 19:02 ` [PATCH v6 08/26] KVM: arm64: Add helpers to pin memory shared with the hypervisor at EL2 Will Deacon
2022-11-10 19:02 ` [PATCH v6 09/26] KVM: arm64: Include asm/kvm_mmu.h in nvhe/mem_protect.h Will Deacon
2022-11-10 19:02 ` [PATCH v6 10/26] KVM: arm64: Add hyp_spinlock_t static initializer Will Deacon
2022-11-10 19:02 ` [PATCH v6 11/26] KVM: arm64: Rename 'host_kvm' to 'host_mmu' Will Deacon
2022-11-10 19:02 ` [PATCH v6 12/26] KVM: arm64: Add infrastructure to create and track pKVM instances at EL2 Will Deacon
2022-11-11 17:11 ` Marc Zyngier
2022-11-10 19:02 ` [PATCH v6 13/26] KVM: arm64: Instantiate pKVM hypervisor VM and vCPU structures from EL1 Will Deacon
2022-11-10 19:02 ` [PATCH v6 14/26] KVM: arm64: Add per-cpu fixmap infrastructure at EL2 Will Deacon
2022-11-10 19:02 ` [PATCH v6 15/26] KVM: arm64: Initialise hypervisor copies of host symbols unconditionally Will Deacon
2022-11-10 19:02 ` [PATCH v6 16/26] KVM: arm64: Provide I-cache invalidation by virtual address at EL2 Will Deacon
2022-11-10 19:02 ` [PATCH v6 17/26] KVM: arm64: Add generic hyp_memcache helpers Will Deacon
2022-11-10 19:02 ` [PATCH v6 18/26] KVM: arm64: Consolidate stage-2 initialisation into a single function Will Deacon
2022-11-10 19:02 ` [PATCH v6 19/26] KVM: arm64: Instantiate guest stage-2 page-tables at EL2 Will Deacon
2022-11-10 19:02 ` [PATCH v6 20/26] KVM: arm64: Return guest memory from EL2 via dedicated teardown memcache Will Deacon
2022-11-10 19:02 ` [PATCH v6 21/26] KVM: arm64: Unmap 'kvm_arm_hyp_percpu_base' from the host Will Deacon
2022-11-10 19:02 ` [PATCH v6 22/26] KVM: arm64: Maintain a copy of 'kvm_arm_vmid_bits' at EL2 Will Deacon
2022-11-10 19:02 ` [PATCH v6 23/26] KVM: arm64: Explicitly map 'kvm_vgic_global_state' " Will Deacon
2022-11-10 19:02 ` [PATCH v6 24/26] KVM: arm64: Don't unnecessarily map host kernel sections " Will Deacon
2022-11-10 19:02 ` Will Deacon [this message]
2022-11-10 19:02 ` [RFC PATCH v6 26/26] KVM: arm64: Use the pKVM hyp vCPU structure in handle___kvm_vcpu_run() Will Deacon
2022-11-11 16:54 ` [PATCH v6 00/26] KVM: arm64: Introduce pKVM hyp VM and vCPU state at EL2 Marc Zyngier
2022-11-11 19:42 ` Oliver Upton
2022-11-14 18:19 ` Will Deacon
2022-11-11 19:06 ` Marc Zyngier
2022-11-11 20:08 ` Oliver Upton
2022-11-12 11:34 ` Marc Zyngier
2022-11-14 19:30 ` Will Deacon
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20221110190259.26861-26-will@kernel.org \
--to=will@kernel.org \
--cc=alexandru.elisei@arm.com \
--cc=catalin.marinas@arm.com \
--cc=chao.p.peng@linux.intel.com \
--cc=james.morse@arm.com \
--cc=kernel-team@android.com \
--cc=kvm@vger.kernel.org \
--cc=kvmarm@lists.linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=mark.rutland@arm.com \
--cc=maz@kernel.org \
--cc=oliver.upton@linux.dev \
--cc=philmd@linaro.org \
--cc=qperret@google.com \
--cc=seanjc@google.com \
--cc=suzuki.poulose@arm.com \
--cc=tabba@google.com \
--cc=vdonnefort@google.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).