All of lore.kernel.org
 help / color / mirror / Atom feed
From: Paul Mackerras <paulus@samba.org>
To: Alexander Graf <agraf@suse.de>
Cc: kvm@vger.kernel.org, kvm-ppc@vger.kernel.org
Subject: [PATCH 12/18] KVM: PPC: Book3S HV: Basic little-endian guest support
Date: Wed,  8 Jan 2014 21:25:30 +1100	[thread overview]
Message-ID: <1389176736-26821-13-git-send-email-paulus@samba.org> (raw)
In-Reply-To: <1389176736-26821-1-git-send-email-paulus@samba.org>

From: Anton Blanchard <anton@samba.org>

We create a guest MSR from scratch when delivering exceptions in
a few places.  Instead of extracting LPCR[ILE] and inserting it
into MSR_LE each time, we simply create a new variable intr_msr which
contains the entire MSR to use.  For a little-endian guest, userspace
needs to set the ILE (interrupt little-endian) bit in the LPCR for
each vcpu (or at least one vcpu in each virtual core).

[paulus@samba.org - removed H_SET_MODE implementation from original
version of the patch, and made kvmppc_set_lpcr update vcpu->arch.intr_msr.]

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 arch/powerpc/include/asm/kvm_host.h     |  1 +
 arch/powerpc/kernel/asm-offsets.c       |  1 +
 arch/powerpc/kvm/book3s_64_mmu_hv.c     |  2 +-
 arch/powerpc/kvm/book3s_hv.c            | 22 ++++++++++++++++++++++
 arch/powerpc/kvm/book3s_hv_rmhandlers.S | 15 +++++----------
 5 files changed, 30 insertions(+), 11 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index d161bc0..7726a3b 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -636,6 +636,7 @@ struct kvm_vcpu_arch {
 	spinlock_t tbacct_lock;
 	u64 busy_stolen;
 	u64 busy_preempt;
+	unsigned long intr_msr;
 #endif
 };
 
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 8e5c6c3..7ca3d86 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -480,6 +480,7 @@ int main(void)
 	DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
 	DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
 	DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
+	DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
 #endif
 #ifdef CONFIG_PPC_BOOK3S
 	DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index c5d1484..79e992d 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -262,7 +262,7 @@ int kvmppc_mmu_hv_init(void)
 
 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
 {
-	kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
+	kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
 }
 
 /*
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 8abcf19..c671262 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -788,6 +788,27 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
 
 	spin_lock(&vc->lock);
 	/*
+	 * If ILE (interrupt little-endian) has changed, update the
+	 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
+	 */
+	if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
+		struct kvm *kvm = vcpu->kvm;
+		struct kvm_vcpu *vcpu;
+		int i;
+
+		mutex_lock(&kvm->lock);
+		kvm_for_each_vcpu(i, vcpu, kvm) {
+			if (vcpu->arch.vcore != vc)
+				continue;
+			if (new_lpcr & LPCR_ILE)
+				vcpu->arch.intr_msr |= MSR_LE;
+			else
+				vcpu->arch.intr_msr &= ~MSR_LE;
+		}
+		mutex_unlock(&kvm->lock);
+	}
+
+	/*
 	 * Userspace can only modify DPFD (default prefetch depth),
 	 * ILE (interrupt little-endian) and TC (translation control).
 	 * On POWER8 userspace can also modify AIL (alt. interrupt loc.)
@@ -1155,6 +1176,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
 	spin_lock_init(&vcpu->arch.vpa_update_lock);
 	spin_lock_init(&vcpu->arch.tbacct_lock);
 	vcpu->arch.busy_preempt = TB_NIL;
+	vcpu->arch.intr_msr = MSR_SF | MSR_ME;
 
 	kvmppc_mmu_book3s_hv_init(vcpu);
 
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index cb2e4c4..557a478 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -815,8 +815,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 12:	mtspr	SPRN_SRR0, r10
 	mr	r10,r0
 	mtspr	SPRN_SRR1, r11
-	li	r11,(MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
-	rotldi	r11,r11,63
+	ld	r11, VCPU_INTR_MSR(r4)
 5:
 
 /*
@@ -1554,8 +1553,7 @@ kvmppc_hdsi:
 	mtspr	SPRN_SRR0, r10
 	mtspr	SPRN_SRR1, r11
 	li	r10, BOOK3S_INTERRUPT_DATA_STORAGE
-	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
-	rotldi	r11, r11, 63
+	ld	r11, VCPU_INTR_MSR(r9)
 fast_interrupt_c_return:
 6:	ld	r7, VCPU_CTR(r9)
 	lwz	r8, VCPU_XER(r9)
@@ -1624,8 +1622,7 @@ kvmppc_hisi:
 1:	mtspr	SPRN_SRR0, r10
 	mtspr	SPRN_SRR1, r11
 	li	r10, BOOK3S_INTERRUPT_INST_STORAGE
-	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
-	rotldi	r11, r11, 63
+	ld	r11, VCPU_INTR_MSR(r9)
 	b	fast_interrupt_c_return
 
 3:	ld	r6, VCPU_KVM(r9)	/* not relocated, use VRMA */
@@ -1668,8 +1665,7 @@ sc_1_fast_return:
 	mtspr	SPRN_SRR0,r10
 	mtspr	SPRN_SRR1,r11
 	li	r10, BOOK3S_INTERRUPT_SYSCALL
-	li	r11, (MSR_ME << 1) | 1  /* synthesize MSR_SF | MSR_ME */
-	rotldi	r11, r11, 63
+	ld	r11, VCPU_INTR_MSR(r9)
 	mr	r4,r9
 	b	fast_guest_return
 
@@ -1997,8 +1993,7 @@ machine_check_realmode:
 	beq	mc_cont
 	/* If not, deliver a machine check.  SRR0/1 are already set */
 	li	r10, BOOK3S_INTERRUPT_MACHINE_CHECK
-	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
-	rotldi	r11, r11, 63
+	ld	r11, VCPU_INTR_MSR(r9)
 	b	fast_interrupt_c_return
 
 /*
-- 
1.8.5.2


WARNING: multiple messages have this Message-ID (diff)
From: Paul Mackerras <paulus@samba.org>
To: Alexander Graf <agraf@suse.de>
Cc: kvm@vger.kernel.org, kvm-ppc@vger.kernel.org
Subject: [PATCH 12/18] KVM: PPC: Book3S HV: Basic little-endian guest support
Date: Wed, 08 Jan 2014 10:25:30 +0000	[thread overview]
Message-ID: <1389176736-26821-13-git-send-email-paulus@samba.org> (raw)
In-Reply-To: <1389176736-26821-1-git-send-email-paulus@samba.org>

From: Anton Blanchard <anton@samba.org>

We create a guest MSR from scratch when delivering exceptions in
a few places.  Instead of extracting LPCR[ILE] and inserting it
into MSR_LE each time, we simply create a new variable intr_msr which
contains the entire MSR to use.  For a little-endian guest, userspace
needs to set the ILE (interrupt little-endian) bit in the LPCR for
each vcpu (or at least one vcpu in each virtual core).

[paulus@samba.org - removed H_SET_MODE implementation from original
version of the patch, and made kvmppc_set_lpcr update vcpu->arch.intr_msr.]

Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
---
 arch/powerpc/include/asm/kvm_host.h     |  1 +
 arch/powerpc/kernel/asm-offsets.c       |  1 +
 arch/powerpc/kvm/book3s_64_mmu_hv.c     |  2 +-
 arch/powerpc/kvm/book3s_hv.c            | 22 ++++++++++++++++++++++
 arch/powerpc/kvm/book3s_hv_rmhandlers.S | 15 +++++----------
 5 files changed, 30 insertions(+), 11 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index d161bc0..7726a3b 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -636,6 +636,7 @@ struct kvm_vcpu_arch {
 	spinlock_t tbacct_lock;
 	u64 busy_stolen;
 	u64 busy_preempt;
+	unsigned long intr_msr;
 #endif
 };
 
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 8e5c6c3..7ca3d86 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -480,6 +480,7 @@ int main(void)
 	DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
 	DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
 	DEFINE(VCPU_VPA_DIRTY, offsetof(struct kvm_vcpu, arch.vpa.dirty));
+	DEFINE(VCPU_INTR_MSR, offsetof(struct kvm_vcpu, arch.intr_msr));
 #endif
 #ifdef CONFIG_PPC_BOOK3S
 	DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index c5d1484..79e992d 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -262,7 +262,7 @@ int kvmppc_mmu_hv_init(void)
 
 static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
 {
-	kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
+	kvmppc_set_msr(vcpu, vcpu->arch.intr_msr);
 }
 
 /*
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 8abcf19..c671262 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -788,6 +788,27 @@ static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr)
 
 	spin_lock(&vc->lock);
 	/*
+	 * If ILE (interrupt little-endian) has changed, update the
+	 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
+	 */
+	if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
+		struct kvm *kvm = vcpu->kvm;
+		struct kvm_vcpu *vcpu;
+		int i;
+
+		mutex_lock(&kvm->lock);
+		kvm_for_each_vcpu(i, vcpu, kvm) {
+			if (vcpu->arch.vcore != vc)
+				continue;
+			if (new_lpcr & LPCR_ILE)
+				vcpu->arch.intr_msr |= MSR_LE;
+			else
+				vcpu->arch.intr_msr &= ~MSR_LE;
+		}
+		mutex_unlock(&kvm->lock);
+	}
+
+	/*
 	 * Userspace can only modify DPFD (default prefetch depth),
 	 * ILE (interrupt little-endian) and TC (translation control).
 	 * On POWER8 userspace can also modify AIL (alt. interrupt loc.)
@@ -1155,6 +1176,7 @@ static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
 	spin_lock_init(&vcpu->arch.vpa_update_lock);
 	spin_lock_init(&vcpu->arch.tbacct_lock);
 	vcpu->arch.busy_preempt = TB_NIL;
+	vcpu->arch.intr_msr = MSR_SF | MSR_ME;
 
 	kvmppc_mmu_book3s_hv_init(vcpu);
 
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index cb2e4c4..557a478 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -815,8 +815,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
 12:	mtspr	SPRN_SRR0, r10
 	mr	r10,r0
 	mtspr	SPRN_SRR1, r11
-	li	r11,(MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
-	rotldi	r11,r11,63
+	ld	r11, VCPU_INTR_MSR(r4)
 5:
 
 /*
@@ -1554,8 +1553,7 @@ kvmppc_hdsi:
 	mtspr	SPRN_SRR0, r10
 	mtspr	SPRN_SRR1, r11
 	li	r10, BOOK3S_INTERRUPT_DATA_STORAGE
-	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
-	rotldi	r11, r11, 63
+	ld	r11, VCPU_INTR_MSR(r9)
 fast_interrupt_c_return:
 6:	ld	r7, VCPU_CTR(r9)
 	lwz	r8, VCPU_XER(r9)
@@ -1624,8 +1622,7 @@ kvmppc_hisi:
 1:	mtspr	SPRN_SRR0, r10
 	mtspr	SPRN_SRR1, r11
 	li	r10, BOOK3S_INTERRUPT_INST_STORAGE
-	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
-	rotldi	r11, r11, 63
+	ld	r11, VCPU_INTR_MSR(r9)
 	b	fast_interrupt_c_return
 
 3:	ld	r6, VCPU_KVM(r9)	/* not relocated, use VRMA */
@@ -1668,8 +1665,7 @@ sc_1_fast_return:
 	mtspr	SPRN_SRR0,r10
 	mtspr	SPRN_SRR1,r11
 	li	r10, BOOK3S_INTERRUPT_SYSCALL
-	li	r11, (MSR_ME << 1) | 1  /* synthesize MSR_SF | MSR_ME */
-	rotldi	r11, r11, 63
+	ld	r11, VCPU_INTR_MSR(r9)
 	mr	r4,r9
 	b	fast_guest_return
 
@@ -1997,8 +1993,7 @@ machine_check_realmode:
 	beq	mc_cont
 	/* If not, deliver a machine check.  SRR0/1 are already set */
 	li	r10, BOOK3S_INTERRUPT_MACHINE_CHECK
-	li	r11, (MSR_ME << 1) | 1	/* synthesize MSR_SF | MSR_ME */
-	rotldi	r11, r11, 63
+	ld	r11, VCPU_INTR_MSR(r9)
 	b	fast_interrupt_c_return
 
 /*
-- 
1.8.5.2


  parent reply	other threads:[~2014-01-08 10:26 UTC|newest]

Thread overview: 48+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-01-08 10:25 [PATCH 00/18] KVM: PPC: Book3S HV: POWER8 support Paul Mackerras
2014-01-08 10:25 ` Paul Mackerras
2014-01-08 10:25 ` [PATCH 01/18] KVM: PPC: Book3S HV: Don't set DABR on POWER8 Paul Mackerras
2014-01-08 10:25   ` Paul Mackerras
2014-01-08 10:25 ` [PATCH 02/18] KVM: PPC: Book3S HV: Align physical and virtual CPU thread numbers Paul Mackerras
2014-01-08 10:25   ` Paul Mackerras
2014-01-08 10:25 ` [PATCH 03/18] KVM: PPC: Book3S HV: Context-switch new POWER8 SPRs Paul Mackerras
2014-01-08 10:25   ` Paul Mackerras
2014-01-08 10:25 ` [PATCH 04/18] KVM: PPC: Book3S HV: Flush the correct number of TLB sets on POWER8 Paul Mackerras
2014-01-08 10:25   ` Paul Mackerras
2014-01-08 10:25 ` [PATCH 05/18] KVM: PPC: Book3S HV: Add handler for HV facility unavailable Paul Mackerras
2014-01-08 10:25   ` Paul Mackerras
2014-01-08 10:25 ` [PATCH 06/18] KVM: PPC: Book3S HV: Implement architecture compatibility modes for POWER8 Paul Mackerras
2014-01-08 10:25   ` Paul Mackerras
2014-01-08 10:25 ` [PATCH 07/18] KVM: PPC: Book3S HV: Consolidate code that checks reason for wake from nap Paul Mackerras
2014-01-08 10:25   ` Paul Mackerras
2014-01-27 11:16   ` Alexander Graf
2014-01-27 11:16     ` Alexander Graf
2014-01-08 10:25 ` [PATCH 08/18] KVM: PPC: Book3S HV: Handle guest using doorbells for IPIs Paul Mackerras
2014-01-08 10:25   ` Paul Mackerras
2014-01-08 10:25 ` [PATCH 09/18] KVM: PPC: Book3S HV: Handle new LPCR bits on POWER8 Paul Mackerras
2014-01-08 10:25   ` Paul Mackerras
2014-01-08 10:25 ` [PATCH 10/18] KVM: PPC: Book3S HV: Prepare for host using hypervisor doorbells Paul Mackerras
2014-01-08 10:25   ` Paul Mackerras
2014-01-08 10:25 ` [PATCH 11/18] KVM: PPC: Book3S HV: Add support for DABRX register on POWER7 Paul Mackerras
2014-01-08 10:25   ` Paul Mackerras
2014-01-08 10:25 ` Paul Mackerras [this message]
2014-01-08 10:25   ` [PATCH 12/18] KVM: PPC: Book3S HV: Basic little-endian guest support Paul Mackerras
2014-01-08 10:25 ` [PATCH 13/18] powerpc/Kconfig: Make TM select VSX and VMX Paul Mackerras
2014-01-08 10:25   ` Paul Mackerras
2014-01-08 10:25 ` [PATCH 14/18] KVM: PPC: Book3S HV: Add new state for transactional memory Paul Mackerras
2014-01-08 10:25   ` Paul Mackerras
2014-01-27 12:23   ` Alexander Graf
2014-01-27 12:23     ` Alexander Graf
2014-01-08 10:25 ` [PATCH 15/18] KVM: PPC: Book3S HV: Add software abort codes " Paul Mackerras
2014-01-08 10:25   ` Paul Mackerras
2014-01-08 10:25 ` [PATCH 16/18] KVM: PPC: Book3S HV: Add transactional memory support Paul Mackerras
2014-01-08 10:25   ` Paul Mackerras
2014-01-27 12:47   ` Alexander Graf
2014-01-27 12:47     ` Alexander Graf
2014-03-24  1:18     ` Paul Mackerras
2014-03-24  1:18       ` Paul Mackerras
2014-01-08 10:25 ` [PATCH 17/18] KVM: PPC: Book3S HV: Add get/set_one_reg for new TM state Paul Mackerras
2014-01-08 10:25   ` Paul Mackerras
2014-01-08 10:25 ` [PATCH 18/18] KVM: PPC: Book3S PR: Cope with doorbell interrupts Paul Mackerras
2014-01-08 10:25   ` Paul Mackerras
2014-01-27 12:50 ` [PATCH 00/18] KVM: PPC: Book3S HV: POWER8 support Alexander Graf
2014-01-27 12:50   ` Alexander Graf

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1389176736-26821-13-git-send-email-paulus@samba.org \
    --to=paulus@samba.org \
    --cc=agraf@suse.de \
    --cc=kvm-ppc@vger.kernel.org \
    --cc=kvm@vger.kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.