* [RFC PATCH 07/32] KVM: PPC: Book3S HV: Call kvmppc_handle_exit_hv() with vcore unlocked
@ 2018-09-21 10:01 Paul Mackerras
2018-09-25 5:49 ` David Gibson
0 siblings, 1 reply; 2+ messages in thread
From: Paul Mackerras @ 2018-09-21 10:01 UTC (permalink / raw)
To: kvm-ppc
Currently kvmppc_handle_exit_hv() is called with the vcore lock held
because it is called within a for_each_runnable_thread loop.
However, we already unlock the vcore within kvmppc_handle_exit_hv()
under certain circumstances, and this is safe because (a) any vcpus
that become runnable and are added to the runnable set by
kvmppc_run_vcpu() have their vcpu->arch.trap = 0 and can't actually
run in the guest (because the vcore state is VCORE_EXITING), and
(b) for_each_runnable_thread is safe against addition or removal
of vcpus from the runnable set.
Therefore, in order to simplify things for following patches, let's
drop the vcore lock in the for_each_runnable_thread loop, so
kvmppc_handle_exit_hv() gets called without the vcore lock held.
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
---
arch/powerpc/kvm/book3s_hv.c | 19 ++++++++++---------
1 file changed, 10 insertions(+), 9 deletions(-)
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 49a686c..0e17593 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -1084,7 +1084,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
return RESUME_GUEST;
}
-/* Called with vcpu->arch.vcore->lock held */
static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
struct task_struct *tsk)
{
@@ -1205,10 +1204,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
swab32(vcpu->arch.emul_inst) :
vcpu->arch.emul_inst;
if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
- /* Need vcore unlocked to call kvmppc_get_last_inst */
- spin_unlock(&vcpu->arch.vcore->lock);
r = kvmppc_emulate_debug_inst(run, vcpu);
- spin_lock(&vcpu->arch.vcore->lock);
} else {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
@@ -1224,12 +1220,8 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
r = EMULATE_FAIL;
if (((vcpu->arch.hfscr >> 56) = FSCR_MSGP_LG) &&
- cpu_has_feature(CPU_FTR_ARCH_300)) {
- /* Need vcore unlocked to call kvmppc_get_last_inst */
- spin_unlock(&vcpu->arch.vcore->lock);
+ cpu_has_feature(CPU_FTR_ARCH_300))
r = kvmppc_emulate_doorbell_instr(vcpu);
- spin_lock(&vcpu->arch.vcore->lock);
- }
if (r = EMULATE_FAIL) {
kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
r = RESUME_GUEST;
@@ -2599,6 +2591,14 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
spin_lock(&vc->lock);
now = get_tb();
for_each_runnable_thread(i, vcpu, vc) {
+ /*
+ * It's safe to unlock the vcore in the loop here, because
+ * for_each_runnable_thread() is safe against removal of
+ * the vcpu, and the vcore state is VCORE_EXITING here,
+ * so any vcpus becoming runnable will have their arch.trap
+ * set to zero and can't actually run in the guest.
+ */
+ spin_unlock(&vc->lock);
/* cancel pending dec exception if dec is positive */
if (now < vcpu->arch.dec_expires &&
kvmppc_core_pending_dec(vcpu))
@@ -2614,6 +2614,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
vcpu->arch.ret = ret;
vcpu->arch.trap = 0;
+ spin_lock(&vc->lock);
if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
if (vcpu->arch.pending_exceptions)
kvmppc_core_prepare_to_enter(vcpu);
--
2.7.4
^ permalink raw reply related [flat|nested] 2+ messages in thread
* Re: [RFC PATCH 07/32] KVM: PPC: Book3S HV: Call kvmppc_handle_exit_hv() with vcore unlocked
2018-09-21 10:01 [RFC PATCH 07/32] KVM: PPC: Book3S HV: Call kvmppc_handle_exit_hv() with vcore unlocked Paul Mackerras
@ 2018-09-25 5:49 ` David Gibson
0 siblings, 0 replies; 2+ messages in thread
From: David Gibson @ 2018-09-25 5:49 UTC (permalink / raw)
To: kvm-ppc
[-- Attachment #1: Type: text/plain, Size: 3946 bytes --]
On Fri, Sep 21, 2018 at 08:01:38PM +1000, Paul Mackerras wrote:
> Currently kvmppc_handle_exit_hv() is called with the vcore lock held
> because it is called within a for_each_runnable_thread loop.
> However, we already unlock the vcore within kvmppc_handle_exit_hv()
> under certain circumstances, and this is safe because (a) any vcpus
> that become runnable and are added to the runnable set by
> kvmppc_run_vcpu() have their vcpu->arch.trap == 0 and can't actually
> run in the guest (because the vcore state is VCORE_EXITING), and
> (b) for_each_runnable_thread is safe against addition or removal
> of vcpus from the runnable set.
>
> Therefore, in order to simplify things for following patches, let's
> drop the vcore lock in the for_each_runnable_thread loop, so
> kvmppc_handle_exit_hv() gets called without the vcore lock held.
>
> Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Reviewed-by: David Gibson <david@gibson.dropbear.id.au>
> ---
> arch/powerpc/kvm/book3s_hv.c | 19 ++++++++++---------
> 1 file changed, 10 insertions(+), 9 deletions(-)
>
> diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
> index 49a686c..0e17593 100644
> --- a/arch/powerpc/kvm/book3s_hv.c
> +++ b/arch/powerpc/kvm/book3s_hv.c
> @@ -1084,7 +1084,6 @@ static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
> return RESUME_GUEST;
> }
>
> -/* Called with vcpu->arch.vcore->lock held */
> static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
> struct task_struct *tsk)
> {
> @@ -1205,10 +1204,7 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
> swab32(vcpu->arch.emul_inst) :
> vcpu->arch.emul_inst;
> if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
> - /* Need vcore unlocked to call kvmppc_get_last_inst */
> - spin_unlock(&vcpu->arch.vcore->lock);
> r = kvmppc_emulate_debug_inst(run, vcpu);
> - spin_lock(&vcpu->arch.vcore->lock);
> } else {
> kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
> r = RESUME_GUEST;
> @@ -1224,12 +1220,8 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
> case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
> r = EMULATE_FAIL;
> if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) &&
> - cpu_has_feature(CPU_FTR_ARCH_300)) {
> - /* Need vcore unlocked to call kvmppc_get_last_inst */
> - spin_unlock(&vcpu->arch.vcore->lock);
> + cpu_has_feature(CPU_FTR_ARCH_300))
> r = kvmppc_emulate_doorbell_instr(vcpu);
> - spin_lock(&vcpu->arch.vcore->lock);
> - }
> if (r == EMULATE_FAIL) {
> kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
> r = RESUME_GUEST;
> @@ -2599,6 +2591,14 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
> spin_lock(&vc->lock);
> now = get_tb();
> for_each_runnable_thread(i, vcpu, vc) {
> + /*
> + * It's safe to unlock the vcore in the loop here, because
> + * for_each_runnable_thread() is safe against removal of
> + * the vcpu, and the vcore state is VCORE_EXITING here,
> + * so any vcpus becoming runnable will have their arch.trap
> + * set to zero and can't actually run in the guest.
> + */
> + spin_unlock(&vc->lock);
> /* cancel pending dec exception if dec is positive */
> if (now < vcpu->arch.dec_expires &&
> kvmppc_core_pending_dec(vcpu))
> @@ -2614,6 +2614,7 @@ static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
> vcpu->arch.ret = ret;
> vcpu->arch.trap = 0;
>
> + spin_lock(&vc->lock);
> if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
> if (vcpu->arch.pending_exceptions)
> kvmppc_core_prepare_to_enter(vcpu);
--
David Gibson | I'll have my music baroque, and my code
david AT gibson.dropbear.id.au | minimalist, thank you. NOT _the_ _other_
| _way_ _around_!
http://www.ozlabs.org/~dgibson
[-- Attachment #2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]
^ permalink raw reply [flat|nested] 2+ messages in thread
end of thread, other threads:[~2018-09-25 5:49 UTC | newest]
Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-09-21 10:01 [RFC PATCH 07/32] KVM: PPC: Book3S HV: Call kvmppc_handle_exit_hv() with vcore unlocked Paul Mackerras
2018-09-25 5:49 ` David Gibson
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.