* [PATCH v2] KVM: arm64: Remove host_cpu_context member from vcpu structure
@ 2020-06-08 8:56 ` Marc Zyngier
0 siblings, 0 replies; 15+ messages in thread
From: Marc Zyngier @ 2020-06-08 8:56 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel
Cc: James Morse, Julien Thierry, Suzuki K Poulose, Mark Rutland,
kernel-team, Andrew Scull
For very long, we have kept this pointer back to the per-cpu
host state, despite having working per-cpu accessors at EL2
for some time now.
Recent investigations have shown that this pointer is easy
to abuse in preemptible context, which is a sure sign that
it would better be gone. Not to mention that a per-cpu
pointer is faster to access at all times.
Reported-by: Andrew Scull <ascull@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
Notes:
v2: Stick to this_cpu_ptr() in pmu.c, as this only used on the
kernel side and not the hypervisor.
arch/arm64/include/asm/kvm_host.h | 3 ---
arch/arm64/kvm/arm.c | 3 ---
arch/arm64/kvm/hyp/debug-sr.c | 4 ++--
arch/arm64/kvm/hyp/switch.c | 6 +++---
arch/arm64/kvm/hyp/sysreg-sr.c | 6 ++++--
arch/arm64/kvm/pmu.c | 8 ++------
6 files changed, 11 insertions(+), 19 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 59029e90b557..ada1faa92211 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -284,9 +284,6 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state;
- /* Pointer to host CPU context */
- struct kvm_cpu_context *host_cpu_context;
-
struct thread_info *host_thread_info; /* hyp VA */
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 14b747266607..6ddaa23ef346 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -340,10 +340,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
int *last_ran;
- kvm_host_data_t *cpu_data;
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
- cpu_data = this_cpu_ptr(&kvm_host_data);
/*
* We might get preempted before the vCPU actually runs, but
@@ -355,7 +353,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
vcpu->cpu = cpu;
- vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
kvm_vgic_load(vcpu);
kvm_timer_vcpu_load(vcpu);
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 0fc9872a1467..e95af204fec7 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
@@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index fc09c3dfa466..fc671426c14b 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -544,7 +544,7 @@ static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
return false;
}
- ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
__ptrauth_save_key(ctxt->sys_regs, APIA);
__ptrauth_save_key(ctxt->sys_regs, APIB);
__ptrauth_save_key(ctxt->sys_regs, APDA);
@@ -715,7 +715,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt;
u64 exit_code;
- host_ctxt = vcpu->arch.host_cpu_context;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
@@ -820,7 +820,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
vcpu = kern_hyp_va(vcpu);
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 6d2df9fe0b5d..143d7b7358f2 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -265,12 +265,13 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
+ struct kvm_cpu_context *host_ctxt;
if (!has_vhe())
return;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
__sysreg_save_user_state(host_ctxt);
/*
@@ -301,12 +302,13 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
+ struct kvm_cpu_context *host_ctxt;
if (!has_vhe())
return;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
deactivate_traps_vhe_put();
__sysreg_save_el1_state(guest_ctxt);
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index e71d00bb5271..b5ae3a5d509e 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -163,15 +163,13 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
*/
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt;
struct kvm_host_data *host;
u32 events_guest, events_host;
if (!has_vhe())
return;
- host_ctxt = vcpu->arch.host_cpu_context;
- host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ host = this_cpu_ptr(&kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
@@ -184,15 +182,13 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt;
struct kvm_host_data *host;
u32 events_guest, events_host;
if (!has_vhe())
return;
- host_ctxt = vcpu->arch.host_cpu_context;
- host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ host = this_cpu_ptr(&kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
--
2.26.2
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH v2] KVM: arm64: Remove host_cpu_context member from vcpu structure
@ 2020-06-08 8:56 ` Marc Zyngier
0 siblings, 0 replies; 15+ messages in thread
From: Marc Zyngier @ 2020-06-08 8:56 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel; +Cc: kernel-team
For very long, we have kept this pointer back to the per-cpu
host state, despite having working per-cpu accessors at EL2
for some time now.
Recent investigations have shown that this pointer is easy
to abuse in preemptible context, which is a sure sign that
it would better be gone. Not to mention that a per-cpu
pointer is faster to access at all times.
Reported-by: Andrew Scull <ascull@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
Notes:
v2: Stick to this_cpu_ptr() in pmu.c, as this only used on the
kernel side and not the hypervisor.
arch/arm64/include/asm/kvm_host.h | 3 ---
arch/arm64/kvm/arm.c | 3 ---
arch/arm64/kvm/hyp/debug-sr.c | 4 ++--
arch/arm64/kvm/hyp/switch.c | 6 +++---
arch/arm64/kvm/hyp/sysreg-sr.c | 6 ++++--
arch/arm64/kvm/pmu.c | 8 ++------
6 files changed, 11 insertions(+), 19 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 59029e90b557..ada1faa92211 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -284,9 +284,6 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state;
- /* Pointer to host CPU context */
- struct kvm_cpu_context *host_cpu_context;
-
struct thread_info *host_thread_info; /* hyp VA */
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 14b747266607..6ddaa23ef346 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -340,10 +340,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
int *last_ran;
- kvm_host_data_t *cpu_data;
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
- cpu_data = this_cpu_ptr(&kvm_host_data);
/*
* We might get preempted before the vCPU actually runs, but
@@ -355,7 +353,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
vcpu->cpu = cpu;
- vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
kvm_vgic_load(vcpu);
kvm_timer_vcpu_load(vcpu);
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 0fc9872a1467..e95af204fec7 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
@@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index fc09c3dfa466..fc671426c14b 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -544,7 +544,7 @@ static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
return false;
}
- ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
__ptrauth_save_key(ctxt->sys_regs, APIA);
__ptrauth_save_key(ctxt->sys_regs, APIB);
__ptrauth_save_key(ctxt->sys_regs, APDA);
@@ -715,7 +715,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt;
u64 exit_code;
- host_ctxt = vcpu->arch.host_cpu_context;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
@@ -820,7 +820,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
vcpu = kern_hyp_va(vcpu);
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 6d2df9fe0b5d..143d7b7358f2 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -265,12 +265,13 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
+ struct kvm_cpu_context *host_ctxt;
if (!has_vhe())
return;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
__sysreg_save_user_state(host_ctxt);
/*
@@ -301,12 +302,13 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
+ struct kvm_cpu_context *host_ctxt;
if (!has_vhe())
return;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
deactivate_traps_vhe_put();
__sysreg_save_el1_state(guest_ctxt);
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index e71d00bb5271..b5ae3a5d509e 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -163,15 +163,13 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
*/
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt;
struct kvm_host_data *host;
u32 events_guest, events_host;
if (!has_vhe())
return;
- host_ctxt = vcpu->arch.host_cpu_context;
- host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ host = this_cpu_ptr(&kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
@@ -184,15 +182,13 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt;
struct kvm_host_data *host;
u32 events_guest, events_host;
if (!has_vhe())
return;
- host_ctxt = vcpu->arch.host_cpu_context;
- host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ host = this_cpu_ptr(&kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
--
2.26.2
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply related [flat|nested] 15+ messages in thread
* [PATCH v2] KVM: arm64: Remove host_cpu_context member from vcpu structure
@ 2020-06-08 8:56 ` Marc Zyngier
0 siblings, 0 replies; 15+ messages in thread
From: Marc Zyngier @ 2020-06-08 8:56 UTC (permalink / raw)
To: kvm, kvmarm, linux-arm-kernel
Cc: Mark Rutland, Suzuki K Poulose, James Morse, Andrew Scull,
kernel-team, Julien Thierry
For very long, we have kept this pointer back to the per-cpu
host state, despite having working per-cpu accessors at EL2
for some time now.
Recent investigations have shown that this pointer is easy
to abuse in preemptible context, which is a sure sign that
it would better be gone. Not to mention that a per-cpu
pointer is faster to access at all times.
Reported-by: Andrew Scull <ascull@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
---
Notes:
v2: Stick to this_cpu_ptr() in pmu.c, as this only used on the
kernel side and not the hypervisor.
arch/arm64/include/asm/kvm_host.h | 3 ---
arch/arm64/kvm/arm.c | 3 ---
arch/arm64/kvm/hyp/debug-sr.c | 4 ++--
arch/arm64/kvm/hyp/switch.c | 6 +++---
arch/arm64/kvm/hyp/sysreg-sr.c | 6 ++++--
arch/arm64/kvm/pmu.c | 8 ++------
6 files changed, 11 insertions(+), 19 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 59029e90b557..ada1faa92211 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -284,9 +284,6 @@ struct kvm_vcpu_arch {
struct kvm_guest_debug_arch vcpu_debug_state;
struct kvm_guest_debug_arch external_debug_state;
- /* Pointer to host CPU context */
- struct kvm_cpu_context *host_cpu_context;
-
struct thread_info *host_thread_info; /* hyp VA */
struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 14b747266607..6ddaa23ef346 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -340,10 +340,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
int *last_ran;
- kvm_host_data_t *cpu_data;
last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
- cpu_data = this_cpu_ptr(&kvm_host_data);
/*
* We might get preempted before the vCPU actually runs, but
@@ -355,7 +353,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
}
vcpu->cpu = cpu;
- vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
kvm_vgic_load(vcpu);
kvm_timer_vcpu_load(vcpu);
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index 0fc9872a1467..e95af204fec7 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
@@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
return;
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
guest_ctxt = &vcpu->arch.ctxt;
host_dbg = &vcpu->arch.host_debug_state.regs;
guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index fc09c3dfa466..fc671426c14b 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -544,7 +544,7 @@ static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
return false;
}
- ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
__ptrauth_save_key(ctxt->sys_regs, APIA);
__ptrauth_save_key(ctxt->sys_regs, APIB);
__ptrauth_save_key(ctxt->sys_regs, APDA);
@@ -715,7 +715,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
struct kvm_cpu_context *guest_ctxt;
u64 exit_code;
- host_ctxt = vcpu->arch.host_cpu_context;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
@@ -820,7 +820,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
vcpu = kern_hyp_va(vcpu);
- host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
host_ctxt->__hyp_running_vcpu = vcpu;
guest_ctxt = &vcpu->arch.ctxt;
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 6d2df9fe0b5d..143d7b7358f2 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -265,12 +265,13 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
+ struct kvm_cpu_context *host_ctxt;
if (!has_vhe())
return;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
__sysreg_save_user_state(host_ctxt);
/*
@@ -301,12 +302,13 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
+ struct kvm_cpu_context *host_ctxt;
if (!has_vhe())
return;
+ host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
deactivate_traps_vhe_put();
__sysreg_save_el1_state(guest_ctxt);
diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
index e71d00bb5271..b5ae3a5d509e 100644
--- a/arch/arm64/kvm/pmu.c
+++ b/arch/arm64/kvm/pmu.c
@@ -163,15 +163,13 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
*/
void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt;
struct kvm_host_data *host;
u32 events_guest, events_host;
if (!has_vhe())
return;
- host_ctxt = vcpu->arch.host_cpu_context;
- host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ host = this_cpu_ptr(&kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
@@ -184,15 +182,13 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
*/
void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
{
- struct kvm_cpu_context *host_ctxt;
struct kvm_host_data *host;
u32 events_guest, events_host;
if (!has_vhe())
return;
- host_ctxt = vcpu->arch.host_cpu_context;
- host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
+ host = this_cpu_ptr(&kvm_host_data);
events_guest = host->pmu_events.events_guest;
events_host = host->pmu_events.events_host;
--
2.26.2
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply related [flat|nested] 15+ messages in thread
* Re: [PATCH v2] KVM: arm64: Remove host_cpu_context member from vcpu structure
2020-06-08 8:56 ` Marc Zyngier
(?)
@ 2020-06-08 14:51 ` Andrew Scull
-1 siblings, 0 replies; 15+ messages in thread
From: Andrew Scull @ 2020-06-08 14:51 UTC (permalink / raw)
To: Marc Zyngier
Cc: kvm, kvmarm, linux-arm-kernel, James Morse, Julien Thierry,
Suzuki K Poulose, Mark Rutland, kernel-team
On Mon, Jun 08, 2020 at 09:56:57AM +0100, Marc Zyngier wrote:
> For very long, we have kept this pointer back to the per-cpu
> host state, despite having working per-cpu accessors at EL2
> for some time now.
>
> Recent investigations have shown that this pointer is easy
> to abuse in preemptible context, which is a sure sign that
> it would better be gone. Not to mention that a per-cpu
> pointer is faster to access at all times.
Helps to make the references to `kvm_host_data` clearer with there now
being just one way to get to it and shows that it is scoped to the
current CPU. A good change IMO!
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2] KVM: arm64: Remove host_cpu_context member from vcpu structure
@ 2020-06-08 14:51 ` Andrew Scull
0 siblings, 0 replies; 15+ messages in thread
From: Andrew Scull @ 2020-06-08 14:51 UTC (permalink / raw)
To: Marc Zyngier; +Cc: kvm, kernel-team, kvmarm, linux-arm-kernel
On Mon, Jun 08, 2020 at 09:56:57AM +0100, Marc Zyngier wrote:
> For very long, we have kept this pointer back to the per-cpu
> host state, despite having working per-cpu accessors at EL2
> for some time now.
>
> Recent investigations have shown that this pointer is easy
> to abuse in preemptible context, which is a sure sign that
> it would better be gone. Not to mention that a per-cpu
> pointer is faster to access at all times.
Helps to make the references to `kvm_host_data` clearer with there now
being just one way to get to it and shows that it is scoped to the
current CPU. A good change IMO!
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2] KVM: arm64: Remove host_cpu_context member from vcpu structure
@ 2020-06-08 14:51 ` Andrew Scull
0 siblings, 0 replies; 15+ messages in thread
From: Andrew Scull @ 2020-06-08 14:51 UTC (permalink / raw)
To: Marc Zyngier
Cc: Mark Rutland, kvm, Suzuki K Poulose, James Morse, Julien Thierry,
kernel-team, kvmarm, linux-arm-kernel
On Mon, Jun 08, 2020 at 09:56:57AM +0100, Marc Zyngier wrote:
> For very long, we have kept this pointer back to the per-cpu
> host state, despite having working per-cpu accessors at EL2
> for some time now.
>
> Recent investigations have shown that this pointer is easy
> to abuse in preemptible context, which is a sure sign that
> it would better be gone. Not to mention that a per-cpu
> pointer is faster to access at all times.
Helps to make the references to `kvm_host_data` clearer with there now
being just one way to get to it and shows that it is scoped to the
current CPU. A good change IMO!
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2] KVM: arm64: Remove host_cpu_context member from vcpu structure
2020-06-08 14:51 ` Andrew Scull
(?)
@ 2020-06-08 15:42 ` Marc Zyngier
-1 siblings, 0 replies; 15+ messages in thread
From: Marc Zyngier @ 2020-06-08 15:42 UTC (permalink / raw)
To: Andrew Scull
Cc: kvm, kvmarm, linux-arm-kernel, James Morse, Julien Thierry,
Suzuki K Poulose, Mark Rutland, kernel-team
Hi Andrew,
On 2020-06-08 15:51, Andrew Scull wrote:
> On Mon, Jun 08, 2020 at 09:56:57AM +0100, Marc Zyngier wrote:
>> For very long, we have kept this pointer back to the per-cpu
>> host state, despite having working per-cpu accessors at EL2
>> for some time now.
>>
>> Recent investigations have shown that this pointer is easy
>> to abuse in preemptible context, which is a sure sign that
>> it would better be gone. Not to mention that a per-cpu
>> pointer is faster to access at all times.
>
> Helps to make the references to `kvm_host_data` clearer with there now
> being just one way to get to it and shows that it is scoped to the
> current CPU. A good change IMO!
Thanks! Can I take this as a Reviewed-by or Acked-by tag? Just let me
know.
Cheers,
M.
--
Jazz is not dead. It just smells funny...
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2] KVM: arm64: Remove host_cpu_context member from vcpu structure
@ 2020-06-08 15:42 ` Marc Zyngier
0 siblings, 0 replies; 15+ messages in thread
From: Marc Zyngier @ 2020-06-08 15:42 UTC (permalink / raw)
To: Andrew Scull; +Cc: kvm, kernel-team, kvmarm, linux-arm-kernel
Hi Andrew,
On 2020-06-08 15:51, Andrew Scull wrote:
> On Mon, Jun 08, 2020 at 09:56:57AM +0100, Marc Zyngier wrote:
>> For very long, we have kept this pointer back to the per-cpu
>> host state, despite having working per-cpu accessors at EL2
>> for some time now.
>>
>> Recent investigations have shown that this pointer is easy
>> to abuse in preemptible context, which is a sure sign that
>> it would better be gone. Not to mention that a per-cpu
>> pointer is faster to access at all times.
>
> Helps to make the references to `kvm_host_data` clearer with there now
> being just one way to get to it and shows that it is scoped to the
> current CPU. A good change IMO!
Thanks! Can I take this as a Reviewed-by or Acked-by tag? Just let me
know.
Cheers,
M.
--
Jazz is not dead. It just smells funny...
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2] KVM: arm64: Remove host_cpu_context member from vcpu structure
@ 2020-06-08 15:42 ` Marc Zyngier
0 siblings, 0 replies; 15+ messages in thread
From: Marc Zyngier @ 2020-06-08 15:42 UTC (permalink / raw)
To: Andrew Scull
Cc: Mark Rutland, kvm, Suzuki K Poulose, James Morse, Julien Thierry,
kernel-team, kvmarm, linux-arm-kernel
Hi Andrew,
On 2020-06-08 15:51, Andrew Scull wrote:
> On Mon, Jun 08, 2020 at 09:56:57AM +0100, Marc Zyngier wrote:
>> For very long, we have kept this pointer back to the per-cpu
>> host state, despite having working per-cpu accessors at EL2
>> for some time now.
>>
>> Recent investigations have shown that this pointer is easy
>> to abuse in preemptible context, which is a sure sign that
>> it would better be gone. Not to mention that a per-cpu
>> pointer is faster to access at all times.
>
> Helps to make the references to `kvm_host_data` clearer with there now
> being just one way to get to it and shows that it is scoped to the
> current CPU. A good change IMO!
Thanks! Can I take this as a Reviewed-by or Acked-by tag? Just let me
know.
Cheers,
M.
--
Jazz is not dead. It just smells funny...
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2] KVM: arm64: Remove host_cpu_context member from vcpu structure
2020-06-08 8:56 ` Marc Zyngier
(?)
@ 2020-06-08 16:29 ` Mark Rutland
-1 siblings, 0 replies; 15+ messages in thread
From: Mark Rutland @ 2020-06-08 16:29 UTC (permalink / raw)
To: Marc Zyngier
Cc: kvm, kvmarm, linux-arm-kernel, James Morse, Julien Thierry,
Suzuki K Poulose, kernel-team, Andrew Scull
On Mon, Jun 08, 2020 at 09:56:57AM +0100, Marc Zyngier wrote:
> For very long, we have kept this pointer back to the per-cpu
> host state, despite having working per-cpu accessors at EL2
> for some time now.
>
> Recent investigations have shown that this pointer is easy
> to abuse in preemptible context, which is a sure sign that
> it would better be gone. Not to mention that a per-cpu
> pointer is faster to access at all times.
>
> Reported-by: Andrew Scull <ascull@google.com>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
From a quick scan, this looks sane to me, so FWIW:
Acked-by: Mark Rutland <mark.rutland@arm.com>
Mark.
> ---
>
> Notes:
> v2: Stick to this_cpu_ptr() in pmu.c, as this only used on the
> kernel side and not the hypervisor.
>
> arch/arm64/include/asm/kvm_host.h | 3 ---
> arch/arm64/kvm/arm.c | 3 ---
> arch/arm64/kvm/hyp/debug-sr.c | 4 ++--
> arch/arm64/kvm/hyp/switch.c | 6 +++---
> arch/arm64/kvm/hyp/sysreg-sr.c | 6 ++++--
> arch/arm64/kvm/pmu.c | 8 ++------
> 6 files changed, 11 insertions(+), 19 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 59029e90b557..ada1faa92211 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -284,9 +284,6 @@ struct kvm_vcpu_arch {
> struct kvm_guest_debug_arch vcpu_debug_state;
> struct kvm_guest_debug_arch external_debug_state;
>
> - /* Pointer to host CPU context */
> - struct kvm_cpu_context *host_cpu_context;
> -
> struct thread_info *host_thread_info; /* hyp VA */
> struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
>
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 14b747266607..6ddaa23ef346 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -340,10 +340,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
> void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> {
> int *last_ran;
> - kvm_host_data_t *cpu_data;
>
> last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
> - cpu_data = this_cpu_ptr(&kvm_host_data);
>
> /*
> * We might get preempted before the vCPU actually runs, but
> @@ -355,7 +353,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> }
>
> vcpu->cpu = cpu;
> - vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
>
> kvm_vgic_load(vcpu);
> kvm_timer_vcpu_load(vcpu);
> diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
> index 0fc9872a1467..e95af204fec7 100644
> --- a/arch/arm64/kvm/hyp/debug-sr.c
> +++ b/arch/arm64/kvm/hyp/debug-sr.c
> @@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
> if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
> return;
>
> - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> guest_ctxt = &vcpu->arch.ctxt;
> host_dbg = &vcpu->arch.host_debug_state.regs;
> guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
> @@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
> if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
> return;
>
> - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> guest_ctxt = &vcpu->arch.ctxt;
> host_dbg = &vcpu->arch.host_debug_state.regs;
> guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
> diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
> index fc09c3dfa466..fc671426c14b 100644
> --- a/arch/arm64/kvm/hyp/switch.c
> +++ b/arch/arm64/kvm/hyp/switch.c
> @@ -544,7 +544,7 @@ static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
> return false;
> }
>
> - ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
> + ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> __ptrauth_save_key(ctxt->sys_regs, APIA);
> __ptrauth_save_key(ctxt->sys_regs, APIB);
> __ptrauth_save_key(ctxt->sys_regs, APDA);
> @@ -715,7 +715,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
> struct kvm_cpu_context *guest_ctxt;
> u64 exit_code;
>
> - host_ctxt = vcpu->arch.host_cpu_context;
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> host_ctxt->__hyp_running_vcpu = vcpu;
> guest_ctxt = &vcpu->arch.ctxt;
>
> @@ -820,7 +820,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
>
> vcpu = kern_hyp_va(vcpu);
>
> - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> host_ctxt->__hyp_running_vcpu = vcpu;
> guest_ctxt = &vcpu->arch.ctxt;
>
> diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
> index 6d2df9fe0b5d..143d7b7358f2 100644
> --- a/arch/arm64/kvm/hyp/sysreg-sr.c
> +++ b/arch/arm64/kvm/hyp/sysreg-sr.c
> @@ -265,12 +265,13 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
> */
> void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
> {
> - struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
> struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
> + struct kvm_cpu_context *host_ctxt;
>
> if (!has_vhe())
> return;
>
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> __sysreg_save_user_state(host_ctxt);
>
> /*
> @@ -301,12 +302,13 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
> */
> void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
> {
> - struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
> struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
> + struct kvm_cpu_context *host_ctxt;
>
> if (!has_vhe())
> return;
>
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> deactivate_traps_vhe_put();
>
> __sysreg_save_el1_state(guest_ctxt);
> diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
> index e71d00bb5271..b5ae3a5d509e 100644
> --- a/arch/arm64/kvm/pmu.c
> +++ b/arch/arm64/kvm/pmu.c
> @@ -163,15 +163,13 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
> */
> void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
> {
> - struct kvm_cpu_context *host_ctxt;
> struct kvm_host_data *host;
> u32 events_guest, events_host;
>
> if (!has_vhe())
> return;
>
> - host_ctxt = vcpu->arch.host_cpu_context;
> - host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
> + host = this_cpu_ptr(&kvm_host_data);
> events_guest = host->pmu_events.events_guest;
> events_host = host->pmu_events.events_host;
>
> @@ -184,15 +182,13 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
> */
> void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
> {
> - struct kvm_cpu_context *host_ctxt;
> struct kvm_host_data *host;
> u32 events_guest, events_host;
>
> if (!has_vhe())
> return;
>
> - host_ctxt = vcpu->arch.host_cpu_context;
> - host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
> + host = this_cpu_ptr(&kvm_host_data);
> events_guest = host->pmu_events.events_guest;
> events_host = host->pmu_events.events_host;
>
> --
> 2.26.2
>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2] KVM: arm64: Remove host_cpu_context member from vcpu structure
@ 2020-06-08 16:29 ` Mark Rutland
0 siblings, 0 replies; 15+ messages in thread
From: Mark Rutland @ 2020-06-08 16:29 UTC (permalink / raw)
To: Marc Zyngier; +Cc: kvm, kernel-team, kvmarm, linux-arm-kernel
On Mon, Jun 08, 2020 at 09:56:57AM +0100, Marc Zyngier wrote:
> For very long, we have kept this pointer back to the per-cpu
> host state, despite having working per-cpu accessors at EL2
> for some time now.
>
> Recent investigations have shown that this pointer is easy
> to abuse in preemptible context, which is a sure sign that
> it would better be gone. Not to mention that a per-cpu
> pointer is faster to access at all times.
>
> Reported-by: Andrew Scull <ascull@google.com>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
From a quick scan, this looks sane to me, so FWIW:
Acked-by: Mark Rutland <mark.rutland@arm.com>
Mark.
> ---
>
> Notes:
> v2: Stick to this_cpu_ptr() in pmu.c, as this only used on the
> kernel side and not the hypervisor.
>
> arch/arm64/include/asm/kvm_host.h | 3 ---
> arch/arm64/kvm/arm.c | 3 ---
> arch/arm64/kvm/hyp/debug-sr.c | 4 ++--
> arch/arm64/kvm/hyp/switch.c | 6 +++---
> arch/arm64/kvm/hyp/sysreg-sr.c | 6 ++++--
> arch/arm64/kvm/pmu.c | 8 ++------
> 6 files changed, 11 insertions(+), 19 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 59029e90b557..ada1faa92211 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -284,9 +284,6 @@ struct kvm_vcpu_arch {
> struct kvm_guest_debug_arch vcpu_debug_state;
> struct kvm_guest_debug_arch external_debug_state;
>
> - /* Pointer to host CPU context */
> - struct kvm_cpu_context *host_cpu_context;
> -
> struct thread_info *host_thread_info; /* hyp VA */
> struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
>
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 14b747266607..6ddaa23ef346 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -340,10 +340,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
> void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> {
> int *last_ran;
> - kvm_host_data_t *cpu_data;
>
> last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
> - cpu_data = this_cpu_ptr(&kvm_host_data);
>
> /*
> * We might get preempted before the vCPU actually runs, but
> @@ -355,7 +353,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> }
>
> vcpu->cpu = cpu;
> - vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
>
> kvm_vgic_load(vcpu);
> kvm_timer_vcpu_load(vcpu);
> diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
> index 0fc9872a1467..e95af204fec7 100644
> --- a/arch/arm64/kvm/hyp/debug-sr.c
> +++ b/arch/arm64/kvm/hyp/debug-sr.c
> @@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
> if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
> return;
>
> - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> guest_ctxt = &vcpu->arch.ctxt;
> host_dbg = &vcpu->arch.host_debug_state.regs;
> guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
> @@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
> if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
> return;
>
> - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> guest_ctxt = &vcpu->arch.ctxt;
> host_dbg = &vcpu->arch.host_debug_state.regs;
> guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
> diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
> index fc09c3dfa466..fc671426c14b 100644
> --- a/arch/arm64/kvm/hyp/switch.c
> +++ b/arch/arm64/kvm/hyp/switch.c
> @@ -544,7 +544,7 @@ static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
> return false;
> }
>
> - ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
> + ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> __ptrauth_save_key(ctxt->sys_regs, APIA);
> __ptrauth_save_key(ctxt->sys_regs, APIB);
> __ptrauth_save_key(ctxt->sys_regs, APDA);
> @@ -715,7 +715,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
> struct kvm_cpu_context *guest_ctxt;
> u64 exit_code;
>
> - host_ctxt = vcpu->arch.host_cpu_context;
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> host_ctxt->__hyp_running_vcpu = vcpu;
> guest_ctxt = &vcpu->arch.ctxt;
>
> @@ -820,7 +820,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
>
> vcpu = kern_hyp_va(vcpu);
>
> - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> host_ctxt->__hyp_running_vcpu = vcpu;
> guest_ctxt = &vcpu->arch.ctxt;
>
> diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
> index 6d2df9fe0b5d..143d7b7358f2 100644
> --- a/arch/arm64/kvm/hyp/sysreg-sr.c
> +++ b/arch/arm64/kvm/hyp/sysreg-sr.c
> @@ -265,12 +265,13 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
> */
> void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
> {
> - struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
> struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
> + struct kvm_cpu_context *host_ctxt;
>
> if (!has_vhe())
> return;
>
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> __sysreg_save_user_state(host_ctxt);
>
> /*
> @@ -301,12 +302,13 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
> */
> void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
> {
> - struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
> struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
> + struct kvm_cpu_context *host_ctxt;
>
> if (!has_vhe())
> return;
>
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> deactivate_traps_vhe_put();
>
> __sysreg_save_el1_state(guest_ctxt);
> diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
> index e71d00bb5271..b5ae3a5d509e 100644
> --- a/arch/arm64/kvm/pmu.c
> +++ b/arch/arm64/kvm/pmu.c
> @@ -163,15 +163,13 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
> */
> void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
> {
> - struct kvm_cpu_context *host_ctxt;
> struct kvm_host_data *host;
> u32 events_guest, events_host;
>
> if (!has_vhe())
> return;
>
> - host_ctxt = vcpu->arch.host_cpu_context;
> - host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
> + host = this_cpu_ptr(&kvm_host_data);
> events_guest = host->pmu_events.events_guest;
> events_host = host->pmu_events.events_host;
>
> @@ -184,15 +182,13 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
> */
> void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
> {
> - struct kvm_cpu_context *host_ctxt;
> struct kvm_host_data *host;
> u32 events_guest, events_host;
>
> if (!has_vhe())
> return;
>
> - host_ctxt = vcpu->arch.host_cpu_context;
> - host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
> + host = this_cpu_ptr(&kvm_host_data);
> events_guest = host->pmu_events.events_guest;
> events_host = host->pmu_events.events_host;
>
> --
> 2.26.2
>
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2] KVM: arm64: Remove host_cpu_context member from vcpu structure
@ 2020-06-08 16:29 ` Mark Rutland
0 siblings, 0 replies; 15+ messages in thread
From: Mark Rutland @ 2020-06-08 16:29 UTC (permalink / raw)
To: Marc Zyngier
Cc: kvm, Suzuki K Poulose, James Morse, Julien Thierry, kernel-team,
kvmarm, linux-arm-kernel, Andrew Scull
On Mon, Jun 08, 2020 at 09:56:57AM +0100, Marc Zyngier wrote:
> For very long, we have kept this pointer back to the per-cpu
> host state, despite having working per-cpu accessors at EL2
> for some time now.
>
> Recent investigations have shown that this pointer is easy
> to abuse in preemptible context, which is a sure sign that
> it would better be gone. Not to mention that a per-cpu
> pointer is faster to access at all times.
>
> Reported-by: Andrew Scull <ascull@google.com>
> Signed-off-by: Marc Zyngier <maz@kernel.org>
From a quick scan, this looks sane to me, so FWIW:
Acked-by: Mark Rutland <mark.rutland@arm.com>
Mark.
> ---
>
> Notes:
> v2: Stick to this_cpu_ptr() in pmu.c, as this only used on the
> kernel side and not the hypervisor.
>
> arch/arm64/include/asm/kvm_host.h | 3 ---
> arch/arm64/kvm/arm.c | 3 ---
> arch/arm64/kvm/hyp/debug-sr.c | 4 ++--
> arch/arm64/kvm/hyp/switch.c | 6 +++---
> arch/arm64/kvm/hyp/sysreg-sr.c | 6 ++++--
> arch/arm64/kvm/pmu.c | 8 ++------
> 6 files changed, 11 insertions(+), 19 deletions(-)
>
> diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
> index 59029e90b557..ada1faa92211 100644
> --- a/arch/arm64/include/asm/kvm_host.h
> +++ b/arch/arm64/include/asm/kvm_host.h
> @@ -284,9 +284,6 @@ struct kvm_vcpu_arch {
> struct kvm_guest_debug_arch vcpu_debug_state;
> struct kvm_guest_debug_arch external_debug_state;
>
> - /* Pointer to host CPU context */
> - struct kvm_cpu_context *host_cpu_context;
> -
> struct thread_info *host_thread_info; /* hyp VA */
> struct user_fpsimd_state *host_fpsimd_state; /* hyp VA */
>
> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
> index 14b747266607..6ddaa23ef346 100644
> --- a/arch/arm64/kvm/arm.c
> +++ b/arch/arm64/kvm/arm.c
> @@ -340,10 +340,8 @@ void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
> void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> {
> int *last_ran;
> - kvm_host_data_t *cpu_data;
>
> last_ran = this_cpu_ptr(vcpu->kvm->arch.last_vcpu_ran);
> - cpu_data = this_cpu_ptr(&kvm_host_data);
>
> /*
> * We might get preempted before the vCPU actually runs, but
> @@ -355,7 +353,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
> }
>
> vcpu->cpu = cpu;
> - vcpu->arch.host_cpu_context = &cpu_data->host_ctxt;
>
> kvm_vgic_load(vcpu);
> kvm_timer_vcpu_load(vcpu);
> diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
> index 0fc9872a1467..e95af204fec7 100644
> --- a/arch/arm64/kvm/hyp/debug-sr.c
> +++ b/arch/arm64/kvm/hyp/debug-sr.c
> @@ -185,7 +185,7 @@ void __hyp_text __debug_switch_to_guest(struct kvm_vcpu *vcpu)
> if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
> return;
>
> - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> guest_ctxt = &vcpu->arch.ctxt;
> host_dbg = &vcpu->arch.host_debug_state.regs;
> guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
> @@ -207,7 +207,7 @@ void __hyp_text __debug_switch_to_host(struct kvm_vcpu *vcpu)
> if (!(vcpu->arch.flags & KVM_ARM64_DEBUG_DIRTY))
> return;
>
> - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> guest_ctxt = &vcpu->arch.ctxt;
> host_dbg = &vcpu->arch.host_debug_state.regs;
> guest_dbg = kern_hyp_va(vcpu->arch.debug_ptr);
> diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
> index fc09c3dfa466..fc671426c14b 100644
> --- a/arch/arm64/kvm/hyp/switch.c
> +++ b/arch/arm64/kvm/hyp/switch.c
> @@ -544,7 +544,7 @@ static bool __hyp_text __hyp_handle_ptrauth(struct kvm_vcpu *vcpu)
> return false;
> }
>
> - ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
> + ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> __ptrauth_save_key(ctxt->sys_regs, APIA);
> __ptrauth_save_key(ctxt->sys_regs, APIB);
> __ptrauth_save_key(ctxt->sys_regs, APDA);
> @@ -715,7 +715,7 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
> struct kvm_cpu_context *guest_ctxt;
> u64 exit_code;
>
> - host_ctxt = vcpu->arch.host_cpu_context;
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> host_ctxt->__hyp_running_vcpu = vcpu;
> guest_ctxt = &vcpu->arch.ctxt;
>
> @@ -820,7 +820,7 @@ int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
>
> vcpu = kern_hyp_va(vcpu);
>
> - host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> host_ctxt->__hyp_running_vcpu = vcpu;
> guest_ctxt = &vcpu->arch.ctxt;
>
> diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
> index 6d2df9fe0b5d..143d7b7358f2 100644
> --- a/arch/arm64/kvm/hyp/sysreg-sr.c
> +++ b/arch/arm64/kvm/hyp/sysreg-sr.c
> @@ -265,12 +265,13 @@ void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
> */
> void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
> {
> - struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
> struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
> + struct kvm_cpu_context *host_ctxt;
>
> if (!has_vhe())
> return;
>
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> __sysreg_save_user_state(host_ctxt);
>
> /*
> @@ -301,12 +302,13 @@ void kvm_vcpu_load_sysregs(struct kvm_vcpu *vcpu)
> */
> void kvm_vcpu_put_sysregs(struct kvm_vcpu *vcpu)
> {
> - struct kvm_cpu_context *host_ctxt = vcpu->arch.host_cpu_context;
> struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt;
> + struct kvm_cpu_context *host_ctxt;
>
> if (!has_vhe())
> return;
>
> + host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
> deactivate_traps_vhe_put();
>
> __sysreg_save_el1_state(guest_ctxt);
> diff --git a/arch/arm64/kvm/pmu.c b/arch/arm64/kvm/pmu.c
> index e71d00bb5271..b5ae3a5d509e 100644
> --- a/arch/arm64/kvm/pmu.c
> +++ b/arch/arm64/kvm/pmu.c
> @@ -163,15 +163,13 @@ static void kvm_vcpu_pmu_disable_el0(unsigned long events)
> */
> void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
> {
> - struct kvm_cpu_context *host_ctxt;
> struct kvm_host_data *host;
> u32 events_guest, events_host;
>
> if (!has_vhe())
> return;
>
> - host_ctxt = vcpu->arch.host_cpu_context;
> - host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
> + host = this_cpu_ptr(&kvm_host_data);
> events_guest = host->pmu_events.events_guest;
> events_host = host->pmu_events.events_host;
>
> @@ -184,15 +182,13 @@ void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu)
> */
> void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu)
> {
> - struct kvm_cpu_context *host_ctxt;
> struct kvm_host_data *host;
> u32 events_guest, events_host;
>
> if (!has_vhe())
> return;
>
> - host_ctxt = vcpu->arch.host_cpu_context;
> - host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
> + host = this_cpu_ptr(&kvm_host_data);
> events_guest = host->pmu_events.events_guest;
> events_host = host->pmu_events.events_host;
>
> --
> 2.26.2
>
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2] KVM: arm64: Remove host_cpu_context member from vcpu structure
2020-06-08 15:42 ` Marc Zyngier
(?)
@ 2020-06-08 17:09 ` Andrew Scull
-1 siblings, 0 replies; 15+ messages in thread
From: Andrew Scull @ 2020-06-08 17:09 UTC (permalink / raw)
To: Marc Zyngier
Cc: kvm, kvmarm, linux-arm-kernel, James Morse, Julien Thierry,
Suzuki K Poulose, Mark Rutland, kernel-team
On Mon, Jun 08, 2020 at 04:42:42PM +0100, Marc Zyngier wrote:
> Hi Andrew,
>
> On 2020-06-08 15:51, Andrew Scull wrote:
> > On Mon, Jun 08, 2020 at 09:56:57AM +0100, Marc Zyngier wrote:
> > > For very long, we have kept this pointer back to the per-cpu
> > > host state, despite having working per-cpu accessors at EL2
> > > for some time now.
> > >
> > > Recent investigations have shown that this pointer is easy
> > > to abuse in preemptible context, which is a sure sign that
> > > it would better be gone. Not to mention that a per-cpu
> > > pointer is faster to access at all times.
> >
> > Helps to make the references to `kvm_host_data` clearer with there now
> > being just one way to get to it and shows that it is scoped to the
> > current CPU. A good change IMO!
>
> Thanks! Can I take this as a Reviewed-by or Acked-by tag? Just let me know.
Build and booted your kvm-arm64/ptrauth-fixes branch contianing this
patch with VHE and nVHE on qemu. Booted a VM within each with kvmtool.
Reviewed-by: Andrew Scull <ascull@google.com>
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2] KVM: arm64: Remove host_cpu_context member from vcpu structure
@ 2020-06-08 17:09 ` Andrew Scull
0 siblings, 0 replies; 15+ messages in thread
From: Andrew Scull @ 2020-06-08 17:09 UTC (permalink / raw)
To: Marc Zyngier; +Cc: kvm, kernel-team, kvmarm, linux-arm-kernel
On Mon, Jun 08, 2020 at 04:42:42PM +0100, Marc Zyngier wrote:
> Hi Andrew,
>
> On 2020-06-08 15:51, Andrew Scull wrote:
> > On Mon, Jun 08, 2020 at 09:56:57AM +0100, Marc Zyngier wrote:
> > > For very long, we have kept this pointer back to the per-cpu
> > > host state, despite having working per-cpu accessors at EL2
> > > for some time now.
> > >
> > > Recent investigations have shown that this pointer is easy
> > > to abuse in preemptible context, which is a sure sign that
> > > it would better be gone. Not to mention that a per-cpu
> > > pointer is faster to access at all times.
> >
> > Helps to make the references to `kvm_host_data` clearer with there now
> > being just one way to get to it and shows that it is scoped to the
> > current CPU. A good change IMO!
>
> Thanks! Can I take this as a Reviewed-by or Acked-by tag? Just let me know.
Build and booted your kvm-arm64/ptrauth-fixes branch contianing this
patch with VHE and nVHE on qemu. Booted a VM within each with kvmtool.
Reviewed-by: Andrew Scull <ascull@google.com>
_______________________________________________
kvmarm mailing list
kvmarm@lists.cs.columbia.edu
https://lists.cs.columbia.edu/mailman/listinfo/kvmarm
^ permalink raw reply [flat|nested] 15+ messages in thread
* Re: [PATCH v2] KVM: arm64: Remove host_cpu_context member from vcpu structure
@ 2020-06-08 17:09 ` Andrew Scull
0 siblings, 0 replies; 15+ messages in thread
From: Andrew Scull @ 2020-06-08 17:09 UTC (permalink / raw)
To: Marc Zyngier
Cc: Mark Rutland, kvm, Suzuki K Poulose, James Morse, Julien Thierry,
kernel-team, kvmarm, linux-arm-kernel
On Mon, Jun 08, 2020 at 04:42:42PM +0100, Marc Zyngier wrote:
> Hi Andrew,
>
> On 2020-06-08 15:51, Andrew Scull wrote:
> > On Mon, Jun 08, 2020 at 09:56:57AM +0100, Marc Zyngier wrote:
> > > For very long, we have kept this pointer back to the per-cpu
> > > host state, despite having working per-cpu accessors at EL2
> > > for some time now.
> > >
> > > Recent investigations have shown that this pointer is easy
> > > to abuse in preemptible context, which is a sure sign that
> > > it would better be gone. Not to mention that a per-cpu
> > > pointer is faster to access at all times.
> >
> > Helps to make the references to `kvm_host_data` clearer with there now
> > being just one way to get to it and shows that it is scoped to the
> > current CPU. A good change IMO!
>
> Thanks! Can I take this as a Reviewed-by or Acked-by tag? Just let me know.
Build and booted your kvm-arm64/ptrauth-fixes branch contianing this
patch with VHE and nVHE on qemu. Booted a VM within each with kvmtool.
Reviewed-by: Andrew Scull <ascull@google.com>
_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
^ permalink raw reply [flat|nested] 15+ messages in thread
end of thread, other threads:[~2020-06-08 17:09 UTC | newest]
Thread overview: 15+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2020-06-08 8:56 [PATCH v2] KVM: arm64: Remove host_cpu_context member from vcpu structure Marc Zyngier
2020-06-08 8:56 ` Marc Zyngier
2020-06-08 8:56 ` Marc Zyngier
2020-06-08 14:51 ` Andrew Scull
2020-06-08 14:51 ` Andrew Scull
2020-06-08 14:51 ` Andrew Scull
2020-06-08 15:42 ` Marc Zyngier
2020-06-08 15:42 ` Marc Zyngier
2020-06-08 15:42 ` Marc Zyngier
2020-06-08 17:09 ` Andrew Scull
2020-06-08 17:09 ` Andrew Scull
2020-06-08 17:09 ` Andrew Scull
2020-06-08 16:29 ` Mark Rutland
2020-06-08 16:29 ` Mark Rutland
2020-06-08 16:29 ` Mark Rutland
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.