All of lore.kernel.org
 help / color / mirror / Atom feed
From: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
To: kvm-ppc@vger.kernel.org
Cc: paulus@ozlabs.org, kvm@vger.kernel.org,
	Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Subject: [PATCH 15/23] KVM: PPC: Book3S HV: Store lpcr and hdec_exp in the vcpu struct
Date: Mon, 26 Aug 2019 16:21:01 +1000	[thread overview]
Message-ID: <20190826062109.7573-16-sjitindarsingh@gmail.com> (raw)
In-Reply-To: <20190826062109.7573-1-sjitindarsingh@gmail.com>

When running a single vcpu with kvmhv_run_single_vcpu() the lpcr and
hypervisor decrementer expiry are passed as function arguments. When
running a vcore with kvmppc_run_vcpu() the lpcr is taken from the vcore
and there is no need to consider the hypervisor decrementer expiry as it
only applies when running a nested guest.

These fields will need to be accessed in the guest entry path in
book3s_hv_rmhandlers.S when running a nested hpt (hash page table)
guest. To allow for this store the lpcr and hdec_exp in the vcpu struct.

No functional change.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
---
 arch/powerpc/include/asm/kvm_book3s.h |  3 +--
 arch/powerpc/include/asm/kvm_host.h   |  2 ++
 arch/powerpc/kvm/book3s_hv.c          | 40 +++++++++++++++++------------------
 arch/powerpc/kvm/book3s_hv_nested.c   | 10 ++++-----
 4 files changed, 27 insertions(+), 28 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 40218e81b75f..e1dc1872e453 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -314,8 +314,7 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
 void kvmhv_release_all_nested(struct kvm *kvm);
 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
-int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu,
-			  u64 time_limit, unsigned long lpcr);
+int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
 void kvmhv_save_guest_slb(struct kvm_vcpu *vcpu, struct guest_slb *slbp);
 void kvmhv_restore_guest_slb(struct kvm_vcpu *vcpu, struct guest_slb *slbp);
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index bad09c213be6..b092701951ee 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -793,10 +793,12 @@ struct kvm_vcpu_arch {
 
 	u32 online;
 
+	unsigned long lpcr;
 	/* For support of nested guests */
 	struct kvm_nested_guest *nested;
 	u32 nested_vcpu_id;
 	gpa_t nested_io_gpr;
+	u64 hdec_exp;
 #endif
 
 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index be72bc6b4cd5..8407071d5e22 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3429,8 +3429,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 /*
  * Handle making the H_ENTER_NESTED hcall if we're pseries.
  */
-static int kvmhv_pseries_enter_guest(struct kvm_vcpu *vcpu, u64 time_limit,
-				     unsigned long lpcr)
+static int kvmhv_pseries_enter_guest(struct kvm_vcpu *vcpu, u64 time_limit)
 {
 	/* call our hypervisor to load up HV regs and go */
 	struct hv_guest_state hvregs;
@@ -3454,7 +3453,7 @@ static int kvmhv_pseries_enter_guest(struct kvm_vcpu *vcpu, u64 time_limit,
 	host_psscr = mfspr(SPRN_PSSCR_PR);
 	mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
 	kvmhv_save_hv_regs(vcpu, &hvregs);
-	hvregs.lpcr = lpcr;
+	hvregs.lpcr = vcpu->arch.lpcr;
 	vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
 	if (vcpu->arch.nested) {
 		hvregs.lpid = vcpu->arch.nested->shadow_lpid;
@@ -3536,8 +3535,7 @@ static int kvmhv_pseries_enter_guest(struct kvm_vcpu *vcpu, u64 time_limit,
  * CPU_FTR_HVMODE is set. This is only used for radix guests, however that
  * radix guest may be a direct guest of this hypervisor or a nested guest.
  */
-static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
-				     unsigned long lpcr)
+static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit)
 {
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
 	s64 hdec;
@@ -3594,7 +3592,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
 
 	mtspr(SPRN_AMOR, ~0UL);
 
-	mtspr(SPRN_LPCR, lpcr);
+	mtspr(SPRN_LPCR, vcpu->arch.lpcr);
 	isync();
 
 	kvmppc_xive_push_vcpu(vcpu);
@@ -3666,8 +3664,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
  * Virtual-mode guest entry for POWER9 and later when the host and
  * guest are both using the radix MMU.  The LPIDR has already been set.
  */
-int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
-			 unsigned long lpcr)
+int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu)
 {
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
 	unsigned long host_dscr = mfspr(SPRN_DSCR);
@@ -3675,7 +3672,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
 	unsigned long host_iamr = mfspr(SPRN_IAMR);
 	unsigned long host_amr = mfspr(SPRN_AMR);
 	s64 dec;
-	u64 tb;
+	u64 tb, time_limit;
 	int trap, save_pmu;
 
 	dec = mfspr(SPRN_DEC);
@@ -3683,8 +3680,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
 	if (dec < 512)
 		return BOOK3S_INTERRUPT_HV_DECREMENTER;
 	local_paca->kvm_hstate.dec_expires = dec + tb;
-	if (local_paca->kvm_hstate.dec_expires < time_limit)
-		time_limit = local_paca->kvm_hstate.dec_expires;
+	time_limit = min_t(u64, local_paca->kvm_hstate.dec_expires,
+				vcpu->arch.hdec_exp);
 
 	vcpu->arch.ceded = 0;
 
@@ -3736,15 +3733,16 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
 	mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
 
 	if (kvmhv_on_pseries()) {
-		trap = kvmhv_pseries_enter_guest(vcpu, time_limit, lpcr);
+		trap = kvmhv_pseries_enter_guest(vcpu, time_limit);
 	} else {
-		trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit, lpcr);
+		trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit);
 	}
 
 	if (kvm_is_radix(vcpu->kvm))
 		vcpu->arch.slb_max = 0;
 	dec = mfspr(SPRN_DEC);
-	if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
+	/* Sign extend if not using large decrementer */
+	if (!(vcpu->arch.lpcr & LPCR_LD))
 		dec = (s32) dec;
 	tb = mftb();
 	vcpu->arch.dec_expires = dec + tb;
@@ -4145,9 +4143,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	return vcpu->arch.ret;
 }
 
-int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
-			  struct kvm_vcpu *vcpu, u64 time_limit,
-			  unsigned long lpcr)
+int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
 	int trap, r, pcpu;
 	int srcu_idx, lpid;
@@ -4206,7 +4202,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
 		}
 		if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
 			     &vcpu->arch.pending_exceptions))
-			lpcr |= LPCR_MER;
+			vcpu->arch.lpcr |= LPCR_MER;
 	} else if (vcpu->arch.pending_exceptions ||
 		   vcpu->arch.doorbell_request ||
 		   xive_interrupt_pending(vcpu)) {
@@ -4242,7 +4238,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
 	/* Tell lockdep that we're about to enable interrupts */
 	trace_hardirqs_on();
 
-	trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr);
+	trap = kvmhv_p9_guest_entry(vcpu);
 	vcpu->arch.trap = trap;
 
 	trace_hardirqs_off();
@@ -4399,6 +4395,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
 
 	do {
+		/* update vcpu->arch.lpcr in case a previous loop modified it */
+		vcpu->arch.lpcr = vcpu->arch.vcore->lpcr;
+		vcpu->arch.hdec_exp = ~(u64)0;
 		/*
 		 * The early POWER9 chips that can't mix radix and HPT threads
 		 * on the same core also need the workaround for the problem
@@ -4412,8 +4411,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
 		if (kvmhv_on_pseries() || (kvm->arch.threads_indep &&
 					   kvm_is_radix(kvm) &&
 					   !no_mixing_hpt_and_radix))
-			r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0,
-						  vcpu->arch.vcore->lpcr);
+			r = kvmhv_run_single_vcpu(run, vcpu);
 		else
 			r = kvmppc_run_vcpu(run, vcpu);
 
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 883f8896ed60..f80491e9ff97 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -267,7 +267,6 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 	struct guest_slb *l2_slb = NULL, *saved_l1_slb = NULL;
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
 	u64 hv_ptr, regs_ptr, slb_ptr = 0UL;
-	u64 hdec_exp;
 	s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
 	u64 mask;
 	unsigned long lpcr;
@@ -357,7 +356,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 	}
 
 	/* convert TB values/offsets to host (L0) values */
-	hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
+	vcpu->arch.hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
 	vc->tb_offset += l2_hv.tb_offset;
 
 	/* set L1 state to L2 state */
@@ -377,14 +376,15 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 	vcpu->arch.ret = RESUME_GUEST;
 	vcpu->arch.trap = 0;
 	do {
-		if (mftb() >= hdec_exp) {
+		if (mftb() >= vcpu->arch.hdec_exp) {
 			vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
 			r = RESUME_HOST;
 			break;
 		}
+		/* update vcpu->arch.lpcr in case a previous loop modified it */
+		vcpu->arch.lpcr = lpcr;
 		if (radix)
-			r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu,
-						  hdec_exp, lpcr);
+			r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu);
 		else
 			r = RESUME_HOST; /* XXX TODO hpt entry path */
 	} while (is_kvmppc_resume_guest(r));
-- 
2.13.6


WARNING: multiple messages have this Message-ID (diff)
From: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
To: kvm-ppc@vger.kernel.org
Cc: paulus@ozlabs.org, kvm@vger.kernel.org,
	Suraj Jitindar Singh <sjitindarsingh@gmail.com>
Subject: [PATCH 15/23] KVM: PPC: Book3S HV: Store lpcr and hdec_exp in the vcpu struct
Date: Mon, 26 Aug 2019 06:21:01 +0000	[thread overview]
Message-ID: <20190826062109.7573-16-sjitindarsingh@gmail.com> (raw)
In-Reply-To: <20190826062109.7573-1-sjitindarsingh@gmail.com>

When running a single vcpu with kvmhv_run_single_vcpu() the lpcr and
hypervisor decrementer expiry are passed as function arguments. When
running a vcore with kvmppc_run_vcpu() the lpcr is taken from the vcore
and there is no need to consider the hypervisor decrementer expiry as it
only applies when running a nested guest.

These fields will need to be accessed in the guest entry path in
book3s_hv_rmhandlers.S when running a nested hpt (hash page table)
guest. To allow for this store the lpcr and hdec_exp in the vcpu struct.

No functional change.

Signed-off-by: Suraj Jitindar Singh <sjitindarsingh@gmail.com>
---
 arch/powerpc/include/asm/kvm_book3s.h |  3 +--
 arch/powerpc/include/asm/kvm_host.h   |  2 ++
 arch/powerpc/kvm/book3s_hv.c          | 40 +++++++++++++++++------------------
 arch/powerpc/kvm/book3s_hv_nested.c   | 10 ++++-----
 4 files changed, 27 insertions(+), 28 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 40218e81b75f..e1dc1872e453 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -314,8 +314,7 @@ void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1);
 void kvmhv_release_all_nested(struct kvm *kvm);
 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu);
 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu);
-int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu,
-			  u64 time_limit, unsigned long lpcr);
+int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr);
 void kvmhv_save_guest_slb(struct kvm_vcpu *vcpu, struct guest_slb *slbp);
 void kvmhv_restore_guest_slb(struct kvm_vcpu *vcpu, struct guest_slb *slbp);
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index bad09c213be6..b092701951ee 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -793,10 +793,12 @@ struct kvm_vcpu_arch {
 
 	u32 online;
 
+	unsigned long lpcr;
 	/* For support of nested guests */
 	struct kvm_nested_guest *nested;
 	u32 nested_vcpu_id;
 	gpa_t nested_io_gpr;
+	u64 hdec_exp;
 #endif
 
 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index be72bc6b4cd5..8407071d5e22 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -3429,8 +3429,7 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 /*
  * Handle making the H_ENTER_NESTED hcall if we're pseries.
  */
-static int kvmhv_pseries_enter_guest(struct kvm_vcpu *vcpu, u64 time_limit,
-				     unsigned long lpcr)
+static int kvmhv_pseries_enter_guest(struct kvm_vcpu *vcpu, u64 time_limit)
 {
 	/* call our hypervisor to load up HV regs and go */
 	struct hv_guest_state hvregs;
@@ -3454,7 +3453,7 @@ static int kvmhv_pseries_enter_guest(struct kvm_vcpu *vcpu, u64 time_limit,
 	host_psscr = mfspr(SPRN_PSSCR_PR);
 	mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
 	kvmhv_save_hv_regs(vcpu, &hvregs);
-	hvregs.lpcr = lpcr;
+	hvregs.lpcr = vcpu->arch.lpcr;
 	vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
 	if (vcpu->arch.nested) {
 		hvregs.lpid = vcpu->arch.nested->shadow_lpid;
@@ -3536,8 +3535,7 @@ static int kvmhv_pseries_enter_guest(struct kvm_vcpu *vcpu, u64 time_limit,
  * CPU_FTR_HVMODE is set. This is only used for radix guests, however that
  * radix guest may be a direct guest of this hypervisor or a nested guest.
  */
-static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
-				     unsigned long lpcr)
+static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit)
 {
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
 	s64 hdec;
@@ -3594,7 +3592,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
 
 	mtspr(SPRN_AMOR, ~0UL);
 
-	mtspr(SPRN_LPCR, lpcr);
+	mtspr(SPRN_LPCR, vcpu->arch.lpcr);
 	isync();
 
 	kvmppc_xive_push_vcpu(vcpu);
@@ -3666,8 +3664,7 @@ static int kvmhv_load_hv_regs_and_go(struct kvm_vcpu *vcpu, u64 time_limit,
  * Virtual-mode guest entry for POWER9 and later when the host and
  * guest are both using the radix MMU.  The LPIDR has already been set.
  */
-int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
-			 unsigned long lpcr)
+int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu)
 {
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
 	unsigned long host_dscr = mfspr(SPRN_DSCR);
@@ -3675,7 +3672,7 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
 	unsigned long host_iamr = mfspr(SPRN_IAMR);
 	unsigned long host_amr = mfspr(SPRN_AMR);
 	s64 dec;
-	u64 tb;
+	u64 tb, time_limit;
 	int trap, save_pmu;
 
 	dec = mfspr(SPRN_DEC);
@@ -3683,8 +3680,8 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
 	if (dec < 512)
 		return BOOK3S_INTERRUPT_HV_DECREMENTER;
 	local_paca->kvm_hstate.dec_expires = dec + tb;
-	if (local_paca->kvm_hstate.dec_expires < time_limit)
-		time_limit = local_paca->kvm_hstate.dec_expires;
+	time_limit = min_t(u64, local_paca->kvm_hstate.dec_expires,
+				vcpu->arch.hdec_exp);
 
 	vcpu->arch.ceded = 0;
 
@@ -3736,15 +3733,16 @@ int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit,
 	mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb());
 
 	if (kvmhv_on_pseries()) {
-		trap = kvmhv_pseries_enter_guest(vcpu, time_limit, lpcr);
+		trap = kvmhv_pseries_enter_guest(vcpu, time_limit);
 	} else {
-		trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit, lpcr);
+		trap = kvmhv_load_hv_regs_and_go(vcpu, time_limit);
 	}
 
 	if (kvm_is_radix(vcpu->kvm))
 		vcpu->arch.slb_max = 0;
 	dec = mfspr(SPRN_DEC);
-	if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
+	/* Sign extend if not using large decrementer */
+	if (!(vcpu->arch.lpcr & LPCR_LD))
 		dec = (s32) dec;
 	tb = mftb();
 	vcpu->arch.dec_expires = dec + tb;
@@ -4145,9 +4143,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 	return vcpu->arch.ret;
 }
 
-int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
-			  struct kvm_vcpu *vcpu, u64 time_limit,
-			  unsigned long lpcr)
+int kvmhv_run_single_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
 {
 	int trap, r, pcpu;
 	int srcu_idx, lpid;
@@ -4206,7 +4202,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
 		}
 		if (test_bit(BOOK3S_IRQPRIO_EXTERNAL,
 			     &vcpu->arch.pending_exceptions))
-			lpcr |= LPCR_MER;
+			vcpu->arch.lpcr |= LPCR_MER;
 	} else if (vcpu->arch.pending_exceptions ||
 		   vcpu->arch.doorbell_request ||
 		   xive_interrupt_pending(vcpu)) {
@@ -4242,7 +4238,7 @@ int kvmhv_run_single_vcpu(struct kvm_run *kvm_run,
 	/* Tell lockdep that we're about to enable interrupts */
 	trace_hardirqs_on();
 
-	trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr);
+	trap = kvmhv_p9_guest_entry(vcpu);
 	vcpu->arch.trap = trap;
 
 	trace_hardirqs_off();
@@ -4399,6 +4395,9 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
 	vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
 
 	do {
+		/* update vcpu->arch.lpcr in case a previous loop modified it */
+		vcpu->arch.lpcr = vcpu->arch.vcore->lpcr;
+		vcpu->arch.hdec_exp = ~(u64)0;
 		/*
 		 * The early POWER9 chips that can't mix radix and HPT threads
 		 * on the same core also need the workaround for the problem
@@ -4412,8 +4411,7 @@ static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
 		if (kvmhv_on_pseries() || (kvm->arch.threads_indep &&
 					   kvm_is_radix(kvm) &&
 					   !no_mixing_hpt_and_radix))
-			r = kvmhv_run_single_vcpu(run, vcpu, ~(u64)0,
-						  vcpu->arch.vcore->lpcr);
+			r = kvmhv_run_single_vcpu(run, vcpu);
 		else
 			r = kvmppc_run_vcpu(run, vcpu);
 
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 883f8896ed60..f80491e9ff97 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -267,7 +267,6 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 	struct guest_slb *l2_slb = NULL, *saved_l1_slb = NULL;
 	struct kvmppc_vcore *vc = vcpu->arch.vcore;
 	u64 hv_ptr, regs_ptr, slb_ptr = 0UL;
-	u64 hdec_exp;
 	s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
 	u64 mask;
 	unsigned long lpcr;
@@ -357,7 +356,7 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 	}
 
 	/* convert TB values/offsets to host (L0) values */
-	hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
+	vcpu->arch.hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
 	vc->tb_offset += l2_hv.tb_offset;
 
 	/* set L1 state to L2 state */
@@ -377,14 +376,15 @@ long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
 	vcpu->arch.ret = RESUME_GUEST;
 	vcpu->arch.trap = 0;
 	do {
-		if (mftb() >= hdec_exp) {
+		if (mftb() >= vcpu->arch.hdec_exp) {
 			vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
 			r = RESUME_HOST;
 			break;
 		}
+		/* update vcpu->arch.lpcr in case a previous loop modified it */
+		vcpu->arch.lpcr = lpcr;
 		if (radix)
-			r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu,
-						  hdec_exp, lpcr);
+			r = kvmhv_run_single_vcpu(vcpu->arch.kvm_run, vcpu);
 		else
 			r = RESUME_HOST; /* XXX TODO hpt entry path */
 	} while (is_kvmppc_resume_guest(r));
-- 
2.13.6

  parent reply	other threads:[~2019-08-26  6:21 UTC|newest]

Thread overview: 54+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-08-26  6:20 [PATCH 00/23] KVM: PPC: BOok3S HV: Support for nested HPT guests Suraj Jitindar Singh
2019-08-26  6:20 ` Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 01/23] KVM: PPC: Book3S HV: Use __gfn_to_pfn_memslot in HPT page fault handler Suraj Jitindar Singh
2019-08-26  6:20   ` Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 02/23] KVM: PPC: Book3S HV: Increment mmu_notifier_seq when modifying radix pte rc bits Suraj Jitindar Singh
2019-08-26  6:20   ` Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 03/23] KVM: PPC: Book3S HV: Nested: Don't allow hash guests to run nested guests Suraj Jitindar Singh
2019-08-26  6:20   ` Suraj Jitindar Singh
2019-10-23  4:47   ` Paul Mackerras
2019-10-23  4:47     ` Paul Mackerras
2019-08-26  6:20 ` [PATCH 04/23] KVM: PPC: Book3S HV: Handle making H_ENTER_NESTED hcall in a separate function Suraj Jitindar Singh
2019-08-26  6:20   ` Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 05/23] KVM: PPC: Book3S HV: Enable calling kvmppc_hpte_hv_fault in virtual mode Suraj Jitindar Singh
2019-08-26  6:20   ` Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 06/23] KVM: PPC: Book3S HV: Allow hpt manipulation hcalls to be called " Suraj Jitindar Singh
2019-08-26  6:20   ` Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 07/23] KVM: PPC: Book3S HV: Make kvmppc_invalidate_hpte() take lpid not a kvm struct Suraj Jitindar Singh
2019-08-26  6:20   ` Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 08/23] KVM: PPC: Book3S HV: Nested: Allow pseries hypervisor to run hpt nested guest Suraj Jitindar Singh
2019-08-26  6:20   ` Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 09/23] KVM: PPC: Book3S HV: Nested: Improve comments and naming of nest rmap functions Suraj Jitindar Singh
2019-08-26  6:20   ` Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 10/23] KVM: PPC: Book3S HV: Nested: Increase gpa field in nest rmap to 46 bits Suraj Jitindar Singh
2019-08-26  6:20   ` Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 11/23] KVM: PPC: Book3S HV: Nested: Remove single nest rmap entries Suraj Jitindar Singh
2019-08-26  6:20   ` Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 12/23] KVM: PPC: Book3S HV: Nested: add kvmhv_remove_all_nested_rmap_lpid() Suraj Jitindar Singh
2019-08-26  6:20   ` Suraj Jitindar Singh
2019-08-26  6:20 ` [PATCH 13/23] KVM: PPC: Book3S HV: Nested: Infrastructure for nested hpt guest setup Suraj Jitindar Singh
2019-08-26  6:20   ` Suraj Jitindar Singh
2019-10-24  3:43   ` Paul Mackerras
2019-10-24  3:43     ` Paul Mackerras
2019-08-26  6:21 ` [PATCH 14/23] KVM: PPC: Book3S HV: Nested: Context switch slb for nested hpt guest Suraj Jitindar Singh
2019-08-26  6:21   ` Suraj Jitindar Singh
2019-10-24  4:48   ` Paul Mackerras
2019-10-24  4:48     ` Paul Mackerras
2019-08-26  6:21 ` Suraj Jitindar Singh [this message]
2019-08-26  6:21   ` [PATCH 15/23] KVM: PPC: Book3S HV: Store lpcr and hdec_exp in the vcpu struct Suraj Jitindar Singh
2019-08-26  6:21 ` [PATCH 16/23] KVM: PPC: Book3S HV: Nested: Make kvmppc_run_vcpu() entry path nested capable Suraj Jitindar Singh
2019-08-26  6:21   ` Suraj Jitindar Singh
2019-08-26  6:21 ` [PATCH 17/23] KVM: PPC: Book3S HV: Nested: Rename kvmhv_xlate_addr_nested_radix Suraj Jitindar Singh
2019-08-26  6:21   ` Suraj Jitindar Singh
2019-08-26  6:21 ` [PATCH 18/23] KVM: PPC: Book3S HV: Separate out hashing from kvmppc_hv_find_lock_hpte() Suraj Jitindar Singh
2019-08-26  6:21   ` Suraj Jitindar Singh
2019-08-26  6:21 ` [PATCH 19/23] KVM: PPC: Book3S HV: Nested: Implement nested hpt mmu translation Suraj Jitindar Singh
2019-08-26  6:21   ` Suraj Jitindar Singh
2019-08-26  6:21 ` [PATCH 20/23] KVM: PPC: Book3S HV: Nested: Handle tlbie hcall for nested hpt guest Suraj Jitindar Singh
2019-08-26  6:21   ` Suraj Jitindar Singh
2019-08-26  6:21 ` [PATCH 21/23] KVM: PPC: Book3S HV: Nested: Implement nest rmap invalidations for hpt guests Suraj Jitindar Singh
2019-08-26  6:21   ` Suraj Jitindar Singh
2019-08-26  6:21 ` [PATCH 22/23] KVM: PPC: Book3S HV: Nested: Enable nested " Suraj Jitindar Singh
2019-08-26  6:21   ` Suraj Jitindar Singh
2019-08-26  6:21 ` [PATCH 23/23] KVM: PPC: Book3S HV: Add nested hpt pte information to debugfs Suraj Jitindar Singh
2019-08-26  6:21   ` Suraj Jitindar Singh

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20190826062109.7573-16-sjitindarsingh@gmail.com \
    --to=sjitindarsingh@gmail.com \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=kvm@vger.kernel.org \
    --cc=paulus@ozlabs.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.