From mboxrd@z Thu Jan 1 00:00:00 1970 From: Alexander Graf Subject: [PATCH 05/15] KVM: PPC: Book3S_32 guest MMU fixes Date: Fri, 5 Mar 2010 17:50:32 +0100 Message-ID: <1267807842-3751-6-git-send-email-agraf@suse.de> References: <1267807842-3751-1-git-send-email-agraf@suse.de> Cc: kvm-u79uwXL29TY76Z2rM5mHXA@public.gmane.org To: kvm-ppc-u79uwXL29TY76Z2rM5mHXA@public.gmane.org Return-path: In-Reply-To: <1267807842-3751-1-git-send-email-agraf-l3A5Bk7waGM@public.gmane.org> Sender: kvm-ppc-owner-u79uwXL29TY76Z2rM5mHXA@public.gmane.org List-Id: kvm.vger.kernel.org This patch makes the VSID of mapped pages always reflecting all special cases we have, like split mode. It also changes the tlbie mask to 0x0ffff000 according to the spec. The mask we used before was incorrect. Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_book3s.h | 1 + arch/powerpc/kvm/book3s_32_mmu.c | 30 +++++++++++++++++++++++------- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 997fcc0..1b76fa1 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -44,6 +44,7 @@ struct kvmppc_sr { bool Ks; bool Kp; bool nx; + bool valid; }; struct kvmppc_bat { diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 1483a9b..7071e22 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -57,6 +57,8 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu) static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data); +static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid, + u64 *vsid); static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr) { @@ -66,13 +68,14 @@ static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t e static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, bool data) { - struct kvmppc_sr *sre = find_sr(to_book3s(vcpu), eaddr); + u64 vsid; struct kvmppc_pte pte; if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data)) return pte.vpage; - return (((u64)eaddr >> 12) & 0xffff) | (((u64)sre->vsid) << 16); + kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); + return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16); } static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu) @@ -142,8 +145,13 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, bat->bepi_mask); } if ((eaddr & bat->bepi_mask) == bat->bepi) { + u64 vsid; + kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, + eaddr >> SID_SHIFT, &vsid); + vsid <<= 16; + pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid; + pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask); - pte->vpage = (eaddr >> 12) | VSID_BAT; pte->may_read = bat->pp; pte->may_write = bat->pp > 1; pte->may_execute = true; @@ -302,6 +310,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, /* And then put in the new SR */ sre->raw = value; sre->vsid = (value & 0x0fffffff); + sre->valid = (value & 0x80000000) ? false : true; sre->Ks = (value & 0x40000000) ? true : false; sre->Kp = (value & 0x20000000) ? true : false; sre->nx = (value & 0x10000000) ? true : false; @@ -312,7 +321,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) { - kvmppc_mmu_pte_flush(vcpu, ea, ~0xFFFULL); + kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000); } static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid, @@ -333,15 +342,22 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid, break; case MSR_DR|MSR_IR: { - ulong ea; - ea = esid << SID_SHIFT; - *vsid = find_sr(to_book3s(vcpu), ea)->vsid; + ulong ea = esid << SID_SHIFT; + struct kvmppc_sr *sr = find_sr(to_book3s(vcpu), ea); + + if (!sr->valid) + return -1; + + *vsid = sr->vsid; break; } default: BUG(); } + if (vcpu->arch.msr & MSR_PR) + *vsid |= VSID_PR; + return 0; } -- 1.6.0.2 From mboxrd@z Thu Jan 1 00:00:00 1970 From: Alexander Graf Date: Fri, 05 Mar 2010 16:50:32 +0000 Subject: [PATCH 05/15] KVM: PPC: Book3S_32 guest MMU fixes Message-Id: <1267807842-3751-6-git-send-email-agraf@suse.de> List-Id: References: <1267807842-3751-1-git-send-email-agraf@suse.de> In-Reply-To: <1267807842-3751-1-git-send-email-agraf-l3A5Bk7waGM@public.gmane.org> MIME-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit To: kvm-ppc-u79uwXL29TY76Z2rM5mHXA@public.gmane.org Cc: kvm-u79uwXL29TY76Z2rM5mHXA@public.gmane.org This patch makes the VSID of mapped pages always reflecting all special cases we have, like split mode. It also changes the tlbie mask to 0x0ffff000 according to the spec. The mask we used before was incorrect. Signed-off-by: Alexander Graf --- arch/powerpc/include/asm/kvm_book3s.h | 1 + arch/powerpc/kvm/book3s_32_mmu.c | 30 +++++++++++++++++++++++------- 2 files changed, 24 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 997fcc0..1b76fa1 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -44,6 +44,7 @@ struct kvmppc_sr { bool Ks; bool Kp; bool nx; + bool valid; }; struct kvmppc_bat { diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 1483a9b..7071e22 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c @@ -57,6 +57,8 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu) static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data); +static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid, + u64 *vsid); static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr) { @@ -66,13 +68,14 @@ static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t e static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, bool data) { - struct kvmppc_sr *sre = find_sr(to_book3s(vcpu), eaddr); + u64 vsid; struct kvmppc_pte pte; if (!kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, &pte, data)) return pte.vpage; - return (((u64)eaddr >> 12) & 0xffff) | (((u64)sre->vsid) << 16); + kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); + return (((u64)eaddr >> 12) & 0xffff) | (vsid << 16); } static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu) @@ -142,8 +145,13 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, bat->bepi_mask); } if ((eaddr & bat->bepi_mask) = bat->bepi) { + u64 vsid; + kvmppc_mmu_book3s_32_esid_to_vsid(vcpu, + eaddr >> SID_SHIFT, &vsid); + vsid <<= 16; + pte->vpage = (((u64)eaddr >> 12) & 0xffff) | vsid; + pte->raddr = bat->brpn | (eaddr & ~bat->bepi_mask); - pte->vpage = (eaddr >> 12) | VSID_BAT; pte->may_read = bat->pp; pte->may_write = bat->pp > 1; pte->may_execute = true; @@ -302,6 +310,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, /* And then put in the new SR */ sre->raw = value; sre->vsid = (value & 0x0fffffff); + sre->valid = (value & 0x80000000) ? false : true; sre->Ks = (value & 0x40000000) ? true : false; sre->Kp = (value & 0x20000000) ? true : false; sre->nx = (value & 0x10000000) ? true : false; @@ -312,7 +321,7 @@ static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, static void kvmppc_mmu_book3s_32_tlbie(struct kvm_vcpu *vcpu, ulong ea, bool large) { - kvmppc_mmu_pte_flush(vcpu, ea, ~0xFFFULL); + kvmppc_mmu_pte_flush(vcpu, ea, 0x0FFFF000); } static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid, @@ -333,15 +342,22 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, u64 esid, break; case MSR_DR|MSR_IR: { - ulong ea; - ea = esid << SID_SHIFT; - *vsid = find_sr(to_book3s(vcpu), ea)->vsid; + ulong ea = esid << SID_SHIFT; + struct kvmppc_sr *sr = find_sr(to_book3s(vcpu), ea); + + if (!sr->valid) + return -1; + + *vsid = sr->vsid; break; } default: BUG(); } + if (vcpu->arch.msr & MSR_PR) + *vsid |= VSID_PR; + return 0; } -- 1.6.0.2