All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH 09/13] KVM: PPC: e500: enable magic page
@ 2011-05-17 23:42 Scott Wood
  2011-05-19 10:43 ` Alexander Graf
                   ` (7 more replies)
  0 siblings, 8 replies; 9+ messages in thread
From: Scott Wood @ 2011-05-17 23:42 UTC (permalink / raw)
  To: kvm-ppc

This is a shared page used for paravirtualization.  It is always present
in the guest kernel's effective address space at the address indicated
by the hypercall that enables it.

The physical address specified by the hypercall is not used, as
e500 does not have real mode.

Signed-off-by: Scott Wood <scottwood@freescale.com>
---
 arch/powerpc/include/asm/kvm_ppc.h |    1 +
 arch/powerpc/kvm/booke.c           |   11 +++++++++++
 arch/powerpc/kvm/e500_tlb.c        |   22 +++++++++++++++++++++-
 arch/powerpc/kvm/powerpc.c         |    9 +++++----
 4 files changed, 38 insertions(+), 5 deletions(-)

diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 9345238..c662f14 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -109,6 +109,7 @@ extern void kvmppc_booke_exit(void);
 
 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
+extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
 
 /*
  * Cuts out inst bits with ordering according to spec.
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 85b9391..036625e 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -472,6 +472,17 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		gpa_t gpaddr;
 		gfn_t gfn;
 
+#ifdef CONFIG_KVM_E500
+		if (!(vcpu->arch.shared->msr & MSR_PR) &&
+		    (eaddr & PAGE_MASK) = vcpu->arch.magic_page_ea) {
+			kvmppc_map_magic(vcpu);
+			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
+			r = RESUME_GUEST;
+
+			break;
+		}
+#endif
+
 		/* Check the guest TLB. */
 		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
 		if (gtlb_index < 0) {
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 7f808c5..c09e642 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -76,7 +76,8 @@ static inline unsigned int tlb0_get_next_victim(
 
 static inline unsigned int tlb1_max_shadow_size(void)
 {
-	return tlb1_entry_num - tlbcam_index;
+	/* reserve one entry for magic page */
+	return tlb1_entry_num - tlbcam_index - 1;
 }
 
 static inline int tlbe_is_writable(struct tlbe *tlbe)
@@ -142,6 +143,25 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 	}
 }
 
+void kvmppc_map_magic(struct kvm_vcpu *vcpu)
+{
+	struct tlbe magic;
+	ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
+	pfn_t pfn;
+
+	pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
+	get_page(pfn_to_page(pfn));
+
+	magic.mas1 = MAS1_VALID | MAS1_TS |
+		     MAS1_TSIZE(BOOK3E_PAGESZ_4K);
+	magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
+	magic.mas3 = (pfn << PAGE_SHIFT) |
+		     MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
+	magic.mas7 = pfn >> (32 - PAGE_SHIFT);
+
+	__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
+}
+
 void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
 {
 }
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 616dd51..d5beb72 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -66,16 +66,17 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
 		vcpu->arch.magic_page_pa = param1;
 		vcpu->arch.magic_page_ea = param2;
 
-		r2 = KVM_MAGIC_FEAT_SR;
+#ifdef CONFIG_KVM_E500
+		kvmppc_map_magic(vcpu);
+#endif
 
+		r2 = KVM_MAGIC_FEAT_SR;
 		r = HC_EV_SUCCESS;
 		break;
 	}
 	case HC_VENDOR_KVM | KVM_HC_FEATURES:
 		r = HC_EV_SUCCESS;
-#if defined(CONFIG_PPC_BOOK3S) /* XXX Missing magic page on BookE */
-		r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
-#endif
+		r2 = (1 << KVM_FEATURE_MAGIC_PAGE);
 
 		/* Second return value is in r4 */
 		break;
-- 
1.7.4.1



^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 09/13] KVM: PPC: e500: enable magic page
  2011-05-17 23:42 [PATCH 09/13] KVM: PPC: e500: enable magic page Scott Wood
@ 2011-05-19 10:43 ` Alexander Graf
  2011-05-19 16:37 ` Scott Wood
                   ` (6 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Alexander Graf @ 2011-05-19 10:43 UTC (permalink / raw)
  To: kvm-ppc

On 05/18/2011 01:42 AM, Scott Wood wrote:
> This is a shared page used for paravirtualization.  It is always present
> in the guest kernel's effective address space at the address indicated
> by the hypercall that enables it.
>
> The physical address specified by the hypercall is not used, as
> e500 does not have real mode.

This also requires a documentation change.

> Signed-off-by: Scott Wood<scottwood@freescale.com>
> ---
>   arch/powerpc/include/asm/kvm_ppc.h |    1 +
>   arch/powerpc/kvm/booke.c           |   11 +++++++++++
>   arch/powerpc/kvm/e500_tlb.c        |   22 +++++++++++++++++++++-
>   arch/powerpc/kvm/powerpc.c         |    9 +++++----
>   4 files changed, 38 insertions(+), 5 deletions(-)
>
> diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
> index 9345238..c662f14 100644
> --- a/arch/powerpc/include/asm/kvm_ppc.h
> +++ b/arch/powerpc/include/asm/kvm_ppc.h
> @@ -109,6 +109,7 @@ extern void kvmppc_booke_exit(void);
>
>   extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
>   extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
> +extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
>
>   /*
>    * Cuts out inst bits with ordering according to spec.
> diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
> index 85b9391..036625e 100644
> --- a/arch/powerpc/kvm/booke.c
> +++ b/arch/powerpc/kvm/booke.c
> @@ -472,6 +472,17 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
>   		gpa_t gpaddr;
>   		gfn_t gfn;
>
> +#ifdef CONFIG_KVM_E500
> +		if (!(vcpu->arch.shared->msr&  MSR_PR)&&
> +		    (eaddr&  PAGE_MASK) = vcpu->arch.magic_page_ea) {
> +			kvmppc_map_magic(vcpu);
> +			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
> +			r = RESUME_GUEST;
> +
> +			break;
> +		}
> +#endif
> +
>   		/* Check the guest TLB. */
>   		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
>   		if (gtlb_index<  0) {
> diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
> index 7f808c5..c09e642 100644
> --- a/arch/powerpc/kvm/e500_tlb.c
> +++ b/arch/powerpc/kvm/e500_tlb.c
> @@ -76,7 +76,8 @@ static inline unsigned int tlb0_get_next_victim(
>
>   static inline unsigned int tlb1_max_shadow_size(void)
>   {
> -	return tlb1_entry_num - tlbcam_index;
> +	/* reserve one entry for magic page */
> +	return tlb1_entry_num - tlbcam_index - 1;
>   }
>
>   static inline int tlbe_is_writable(struct tlbe *tlbe)
> @@ -142,6 +143,25 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
>   	}
>   }
>
> +void kvmppc_map_magic(struct kvm_vcpu *vcpu)
> +{
> +	struct tlbe magic;
> +	ulong shared_page = ((ulong)vcpu->arch.shared)&  PAGE_MASK;
> +	pfn_t pfn;
> +
> +	pfn = (pfn_t)virt_to_phys((void *)shared_page)>>  PAGE_SHIFT;
> +	get_page(pfn_to_page(pfn));
> +
> +	magic.mas1 = MAS1_VALID | MAS1_TS |
> +		     MAS1_TSIZE(BOOK3E_PAGESZ_4K);
> +	magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
> +	magic.mas3 = (pfn<<  PAGE_SHIFT) |
> +		     MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
> +	magic.mas7 = pfn>>  (32 - PAGE_SHIFT);
> +
> +	__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));

It's a 4k page, so why use TLB1? Also, you add this as PID0 mapping, no? 
Shouldn't it be restricted to the guest's kernel?

Alex


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 09/13] KVM: PPC: e500: enable magic page
  2011-05-17 23:42 [PATCH 09/13] KVM: PPC: e500: enable magic page Scott Wood
  2011-05-19 10:43 ` Alexander Graf
@ 2011-05-19 16:37 ` Scott Wood
  2011-05-21 16:39 ` Alexander Graf
                   ` (5 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Scott Wood @ 2011-05-19 16:37 UTC (permalink / raw)
  To: kvm-ppc

On Thu, 19 May 2011 12:43:21 +0200
Alexander Graf <agraf@suse.de> wrote:

> On 05/18/2011 01:42 AM, Scott Wood wrote:
> > +void kvmppc_map_magic(struct kvm_vcpu *vcpu)
> > +{
> > +	struct tlbe magic;
> > +	ulong shared_page = ((ulong)vcpu->arch.shared)&  PAGE_MASK;
> > +	pfn_t pfn;
> > +
> > +	pfn = (pfn_t)virt_to_phys((void *)shared_page)>>  PAGE_SHIFT;
> > +	get_page(pfn_to_page(pfn));
> > +
> > +	magic.mas1 = MAS1_VALID | MAS1_TS |
> > +		     MAS1_TSIZE(BOOK3E_PAGESZ_4K);
> > +	magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
> > +	magic.mas3 = (pfn<<  PAGE_SHIFT) |
> > +		     MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
> > +	magic.mas7 = pfn>>  (32 - PAGE_SHIFT);
> > +
> > +	__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
> 
> It's a 4k page, so why use TLB1? 

Seemed useful to keep it more or less pinned.

> Also, you add this as PID0 mapping, no? 
> Shouldn't it be restricted to the guest's kernel?

As of this stage in the patchset, we're still dumping the TLB on guest
privilege switch, and we don't call this function if the guest is in user
mode.

Later when shadow PIDs are added, we use a shadow PID that corresponds to
guest kernel mode.

-Scott


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 09/13] KVM: PPC: e500: enable magic page
  2011-05-17 23:42 [PATCH 09/13] KVM: PPC: e500: enable magic page Scott Wood
  2011-05-19 10:43 ` Alexander Graf
  2011-05-19 16:37 ` Scott Wood
@ 2011-05-21 16:39 ` Alexander Graf
  2011-05-23 15:45 ` Scott Wood
                   ` (4 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Alexander Graf @ 2011-05-21 16:39 UTC (permalink / raw)
  To: kvm-ppc


On 19.05.2011, at 18:37, Scott Wood wrote:

> On Thu, 19 May 2011 12:43:21 +0200
> Alexander Graf <agraf@suse.de> wrote:
> 
>> On 05/18/2011 01:42 AM, Scott Wood wrote:
>>> +void kvmppc_map_magic(struct kvm_vcpu *vcpu)
>>> +{
>>> +	struct tlbe magic;
>>> +	ulong shared_page = ((ulong)vcpu->arch.shared)&  PAGE_MASK;
>>> +	pfn_t pfn;
>>> +
>>> +	pfn = (pfn_t)virt_to_phys((void *)shared_page)>>  PAGE_SHIFT;
>>> +	get_page(pfn_to_page(pfn));
>>> +
>>> +	magic.mas1 = MAS1_VALID | MAS1_TS |
>>> +		     MAS1_TSIZE(BOOK3E_PAGESZ_4K);
>>> +	magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
>>> +	magic.mas3 = (pfn<<  PAGE_SHIFT) |
>>> +		     MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
>>> +	magic.mas7 = pfn>>  (32 - PAGE_SHIFT);
>>> +
>>> +	__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
>> 
>> It's a 4k page, so why use TLB1? 
> 
> Seemed useful to keep it more or less pinned.
> 
>> Also, you add this as PID0 mapping, no? 
>> Shouldn't it be restricted to the guest's kernel?
> 
> As of this stage in the patchset, we're still dumping the TLB on guest
> privilege switch, and we don't call this function if the guest is in user
> mode.
> 
> Later when shadow PIDs are added, we use a shadow PID that corresponds to
> guest kernel mode.

Makes sense :).

Btw - have you considered moving the MAS registers into the shared page now that it's available on BookE? That should give quite a significant performance boost :)


Alex


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 09/13] KVM: PPC: e500: enable magic page
  2011-05-17 23:42 [PATCH 09/13] KVM: PPC: e500: enable magic page Scott Wood
                   ` (2 preceding siblings ...)
  2011-05-21 16:39 ` Alexander Graf
@ 2011-05-23 15:45 ` Scott Wood
  2011-05-27 23:03 ` Alexander Graf
                   ` (3 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Scott Wood @ 2011-05-23 15:45 UTC (permalink / raw)
  To: kvm-ppc

On Sat, 21 May 2011 18:39:01 +0200
Alexander Graf <agraf@suse.de> wrote:

> Btw - have you considered moving the MAS registers into the shared page now that it's available on BookE? That should give quite a significant performance boost :)

That's in the next patchset (among other paravirt stuff). :-)

-Scott


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 09/13] KVM: PPC: e500: enable magic page
  2011-05-17 23:42 [PATCH 09/13] KVM: PPC: e500: enable magic page Scott Wood
                   ` (3 preceding siblings ...)
  2011-05-23 15:45 ` Scott Wood
@ 2011-05-27 23:03 ` Alexander Graf
  2011-06-02 23:17 ` Scott Wood
                   ` (2 subsequent siblings)
  7 siblings, 0 replies; 9+ messages in thread
From: Alexander Graf @ 2011-05-27 23:03 UTC (permalink / raw)
  To: kvm-ppc


On 23.05.2011, at 17:45, Scott Wood wrote:

> On Sat, 21 May 2011 18:39:01 +0200
> Alexander Graf <agraf@suse.de> wrote:
> 
>> Btw - have you considered moving the MAS registers into the shared page now that it's available on BookE? That should give quite a significant performance boost :)
> 
> That's in the next patchset (among other paravirt stuff). :-)

Very nice :). Is anything still unclear or can we go to v2 and finally get some speed? :)


Alex


^ permalink raw reply	[flat|nested] 9+ messages in thread

* [PATCH 09/13] KVM: PPC: e500: enable magic page
  2011-05-17 23:42 [PATCH 09/13] KVM: PPC: e500: enable magic page Scott Wood
                   ` (4 preceding siblings ...)
  2011-05-27 23:03 ` Alexander Graf
@ 2011-06-02 23:17 ` Scott Wood
  2011-06-14  9:55 ` Alexander Graf
  2011-06-14 18:52 ` Scott Wood
  7 siblings, 0 replies; 9+ messages in thread
From: Scott Wood @ 2011-06-02 23:17 UTC (permalink / raw)
  To: kvm-ppc

This is a shared page used for paravirtualization.  It is always present
in the guest kernel's effective address space at the address indicated
by the hypercall that enables it.

The physical address specified by the hypercall is not used, as
e500 does not have real mode.

Signed-off-by: Scott Wood <scottwood@freescale.com>
---
 Documentation/virtual/kvm/ppc-pv.txt |    8 +++++---
 arch/powerpc/include/asm/kvm_ppc.h   |    1 +
 arch/powerpc/kvm/booke.c             |   11 +++++++++++
 arch/powerpc/kvm/e500_tlb.c          |   22 +++++++++++++++++++++-
 arch/powerpc/kvm/powerpc.c           |    9 +++++----
 5 files changed, 43 insertions(+), 8 deletions(-)

diff --git a/Documentation/virtual/kvm/ppc-pv.txt b/Documentation/virtual/kvm/ppc-pv.txt
index 3ab969c..2b7ce19 100644
--- a/Documentation/virtual/kvm/ppc-pv.txt
+++ b/Documentation/virtual/kvm/ppc-pv.txt
@@ -68,9 +68,11 @@ page that contains parts of supervisor visible register state. The guest can
 map this shared page using the KVM hypercall KVM_HC_PPC_MAP_MAGIC_PAGE.
 
 With this hypercall issued the guest always gets the magic page mapped at the
-desired location in effective and physical address space. For now, we always
-map the page to -4096. This way we can access it using absolute load and store
-functions. The following instruction reads the first field of the magic page:
+desired location. The first parameter indicates the effective address when the
+MMU is enabled. The second parameter indicates the address in real mode, if
+applicable to the target. For now, we always map the page to -4096. This way we
+can access it using absolute load and store functions. The following
+instruction reads the first field of the magic page:
 
 	ld	rX, -4096(0)
 
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 9345238..c662f14 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -109,6 +109,7 @@ extern void kvmppc_booke_exit(void);
 
 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
+extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
 
 /*
  * Cuts out inst bits with ordering according to spec.
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index e1d368c..45ad454 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -472,6 +472,17 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
 		gpa_t gpaddr;
 		gfn_t gfn;
 
+#ifdef CONFIG_KVM_E500
+		if (!(vcpu->arch.shared->msr & MSR_PR) &&
+		    (eaddr & PAGE_MASK) = vcpu->arch.magic_page_ea) {
+			kvmppc_map_magic(vcpu);
+			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
+			r = RESUME_GUEST;
+
+			break;
+		}
+#endif
+
 		/* Check the guest TLB. */
 		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
 		if (gtlb_index < 0) {
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 7f808c5..c09e642 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -76,7 +76,8 @@ static inline unsigned int tlb0_get_next_victim(
 
 static inline unsigned int tlb1_max_shadow_size(void)
 {
-	return tlb1_entry_num - tlbcam_index;
+	/* reserve one entry for magic page */
+	return tlb1_entry_num - tlbcam_index - 1;
 }
 
 static inline int tlbe_is_writable(struct tlbe *tlbe)
@@ -142,6 +143,25 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 	}
 }
 
+void kvmppc_map_magic(struct kvm_vcpu *vcpu)
+{
+	struct tlbe magic;
+	ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
+	pfn_t pfn;
+
+	pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
+	get_page(pfn_to_page(pfn));
+
+	magic.mas1 = MAS1_VALID | MAS1_TS |
+		     MAS1_TSIZE(BOOK3E_PAGESZ_4K);
+	magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
+	magic.mas3 = (pfn << PAGE_SHIFT) |
+		     MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
+	magic.mas7 = pfn >> (32 - PAGE_SHIFT);
+
+	__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
+}
+
 void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
 {
 }
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 616dd51..d5beb72 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -66,16 +66,17 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
 		vcpu->arch.magic_page_pa = param1;
 		vcpu->arch.magic_page_ea = param2;
 
-		r2 = KVM_MAGIC_FEAT_SR;
+#ifdef CONFIG_KVM_E500
+		kvmppc_map_magic(vcpu);
+#endif
 
+		r2 = KVM_MAGIC_FEAT_SR;
 		r = HC_EV_SUCCESS;
 		break;
 	}
 	case HC_VENDOR_KVM | KVM_HC_FEATURES:
 		r = HC_EV_SUCCESS;
-#if defined(CONFIG_PPC_BOOK3S) /* XXX Missing magic page on BookE */
-		r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
-#endif
+		r2 = (1 << KVM_FEATURE_MAGIC_PAGE);
 
 		/* Second return value is in r4 */
 		break;
-- 
1.7.4.1



^ permalink raw reply related	[flat|nested] 9+ messages in thread

* Re: [PATCH 09/13] KVM: PPC: e500: enable magic page
  2011-05-17 23:42 [PATCH 09/13] KVM: PPC: e500: enable magic page Scott Wood
                   ` (5 preceding siblings ...)
  2011-06-02 23:17 ` Scott Wood
@ 2011-06-14  9:55 ` Alexander Graf
  2011-06-14 18:52 ` Scott Wood
  7 siblings, 0 replies; 9+ messages in thread
From: Alexander Graf @ 2011-06-14  9:55 UTC (permalink / raw)
  To: kvm-ppc


On 03.06.2011, at 01:17, Scott Wood wrote:

> This is a shared page used for paravirtualization.  It is always present
> in the guest kernel's effective address space at the address indicated
> by the hypercall that enables it.
> 
> The physical address specified by the hypercall is not used, as
> e500 does not have real mode.
> 
> Signed-off-by: Scott Wood <scottwood@freescale.com>
> ---
> Documentation/virtual/kvm/ppc-pv.txt |    8 +++++---
> arch/powerpc/include/asm/kvm_ppc.h   |    1 +
> arch/powerpc/kvm/booke.c             |   11 +++++++++++
> arch/powerpc/kvm/e500_tlb.c          |   22 +++++++++++++++++++++-
> arch/powerpc/kvm/powerpc.c           |    9 +++++----
> 5 files changed, 43 insertions(+), 8 deletions(-)
> 
> diff --git a/Documentation/virtual/kvm/ppc-pv.txt b/Documentation/virtual/kvm/ppc-pv.txt
> index 3ab969c..2b7ce19 100644
> --- a/Documentation/virtual/kvm/ppc-pv.txt
> +++ b/Documentation/virtual/kvm/ppc-pv.txt
> @@ -68,9 +68,11 @@ page that contains parts of supervisor visible register state. The guest can
> map this shared page using the KVM hypercall KVM_HC_PPC_MAP_MAGIC_PAGE.
> 
> With this hypercall issued the guest always gets the magic page mapped at the
> -desired location in effective and physical address space. For now, we always
> -map the page to -4096. This way we can access it using absolute load and store
> -functions. The following instruction reads the first field of the magic page:
> +desired location. The first parameter indicates the effective address when the
> +MMU is enabled. The second parameter indicates the address in real mode, if
> +applicable to the target. For now, we always map the page to -4096. This way we
> +can access it using absolute load and store functions. The following
> +instruction reads the first field of the magic page:
> 
> 	ld	rX, -4096(0)
> 
> diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
> index 9345238..c662f14 100644
> --- a/arch/powerpc/include/asm/kvm_ppc.h
> +++ b/arch/powerpc/include/asm/kvm_ppc.h
> @@ -109,6 +109,7 @@ extern void kvmppc_booke_exit(void);
> 
> extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
> extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
> +extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
> 
> /*
>  * Cuts out inst bits with ordering according to spec.
> diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
> index e1d368c..45ad454 100644
> --- a/arch/powerpc/kvm/booke.c
> +++ b/arch/powerpc/kvm/booke.c
> @@ -472,6 +472,17 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
> 		gpa_t gpaddr;
> 		gfn_t gfn;
> 
> +#ifdef CONFIG_KVM_E500
> +		if (!(vcpu->arch.shared->msr & MSR_PR) &&
> +		    (eaddr & PAGE_MASK) = vcpu->arch.magic_page_ea) {
> +			kvmppc_map_magic(vcpu);
> +			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);

This is fine for now, but please note that I find it unpretty. The magic page hook should be in the guest tlb lookup and then reuse the same mapping mechanisms as real TLB entries, as it's basically the same.

However, for now just leave it like this.

> +			r = RESUME_GUEST;
> +
> +			break;
> +		}
> +#endif
> +
> 		/* Check the guest TLB. */
> 		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
> 		if (gtlb_index < 0) {
> diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
> index 7f808c5..c09e642 100644
> --- a/arch/powerpc/kvm/e500_tlb.c
> +++ b/arch/powerpc/kvm/e500_tlb.c
> @@ -76,7 +76,8 @@ static inline unsigned int tlb0_get_next_victim(
> 
> static inline unsigned int tlb1_max_shadow_size(void)
> {
> -	return tlb1_entry_num - tlbcam_index;
> +	/* reserve one entry for magic page */
> +	return tlb1_entry_num - tlbcam_index - 1;
> }
> 
> static inline int tlbe_is_writable(struct tlbe *tlbe)
> @@ -142,6 +143,25 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
> 	}
> }
> 
> +void kvmppc_map_magic(struct kvm_vcpu *vcpu)
> +{
> +	struct tlbe magic;
> +	ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
> +	pfn_t pfn;
> +
> +	pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
> +	get_page(pfn_to_page(pfn));
> +
> +	magic.mas1 = MAS1_VALID | MAS1_TS |
> +		     MAS1_TSIZE(BOOK3E_PAGESZ_4K);
> +	magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
> +	magic.mas3 = (pfn << PAGE_SHIFT) |
> +		     MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
> +	magic.mas7 = pfn >> (32 - PAGE_SHIFT);
> +
> +	__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
> +}
> +
> void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
> {
> }
> diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
> index 616dd51..d5beb72 100644
> --- a/arch/powerpc/kvm/powerpc.c
> +++ b/arch/powerpc/kvm/powerpc.c
> @@ -66,16 +66,17 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
> 		vcpu->arch.magic_page_pa = param1;
> 		vcpu->arch.magic_page_ea = param2;
> 
> -		r2 = KVM_MAGIC_FEAT_SR;
> +#ifdef CONFIG_KVM_E500
> +		kvmppc_map_magic(vcpu);
> +#endif

Is this necessary? Sure, we save a few ms on the trap, but is it worth it? If we had a generic function that could map the magic page on all systems, sure, but I'd like to keep generic code clean of too many #ifdefs.

> 
> +		r2 = KVM_MAGIC_FEAT_SR;
> 		r = HC_EV_SUCCESS;
> 		break;
> 	}
> 	case HC_VENDOR_KVM | KVM_HC_FEATURES:
> 		r = HC_EV_SUCCESS;
> -#if defined(CONFIG_PPC_BOOK3S) /* XXX Missing magic page on BookE */
> -		r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
> -#endif
> +		r2 = (1 << KVM_FEATURE_MAGIC_PAGE);

As you've noted already, this breaks 440. You can choose between implementing it there or changing the #ifdef :).


Alex


^ permalink raw reply	[flat|nested] 9+ messages in thread

* Re: [PATCH 09/13] KVM: PPC: e500: enable magic page
  2011-05-17 23:42 [PATCH 09/13] KVM: PPC: e500: enable magic page Scott Wood
                   ` (6 preceding siblings ...)
  2011-06-14  9:55 ` Alexander Graf
@ 2011-06-14 18:52 ` Scott Wood
  7 siblings, 0 replies; 9+ messages in thread
From: Scott Wood @ 2011-06-14 18:52 UTC (permalink / raw)
  To: kvm-ppc

On Tue, 14 Jun 2011 11:55:11 +0200
Alexander Graf <agraf@suse.de> wrote:

> 
> On 03.06.2011, at 01:17, Scott Wood wrote:
> 
> > diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
> > index e1d368c..45ad454 100644
> > --- a/arch/powerpc/kvm/booke.c
> > +++ b/arch/powerpc/kvm/booke.c
> > @@ -472,6 +472,17 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
> > 		gpa_t gpaddr;
> > 		gfn_t gfn;
> > 
> > +#ifdef CONFIG_KVM_E500
> > +		if (!(vcpu->arch.shared->msr & MSR_PR) &&
> > +		    (eaddr & PAGE_MASK) = vcpu->arch.magic_page_ea) {
> > +			kvmppc_map_magic(vcpu);
> > +			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
> 
> This is fine for now, but please note that I find it unpretty. The magic page hook should be in the guest tlb lookup and then reuse the same mapping mechanisms as real TLB entries, as it's basically the same.
> 
> However, for now just leave it like this.

I'm not thrilled with it either, but it's different in that there is no
entry for this in the guest TLB.  If there were to be one, that would be a
guest-visible change in the magic page interface.

> > diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
> > index 616dd51..d5beb72 100644
> > --- a/arch/powerpc/kvm/powerpc.c
> > +++ b/arch/powerpc/kvm/powerpc.c
> > @@ -66,16 +66,17 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
> > 		vcpu->arch.magic_page_pa = param1;
> > 		vcpu->arch.magic_page_ea = param2;
> > 
> > -		r2 = KVM_MAGIC_FEAT_SR;
> > +#ifdef CONFIG_KVM_E500
> > +		kvmppc_map_magic(vcpu);
> > +#endif
> 
> Is this necessary? Sure, we save a few ms on the trap, but is it worth it? If we had a generic function that could map the magic page on all systems, sure, but I'd like to keep generic code clean of too many #ifdefs.

I think that's a relic from when I was trying to avoid adding code to the
TLB miss path to load this on demand.  It should be able to come out.

-Scott


^ permalink raw reply	[flat|nested] 9+ messages in thread

end of thread, other threads:[~2011-06-14 18:52 UTC | newest]

Thread overview: 9+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2011-05-17 23:42 [PATCH 09/13] KVM: PPC: e500: enable magic page Scott Wood
2011-05-19 10:43 ` Alexander Graf
2011-05-19 16:37 ` Scott Wood
2011-05-21 16:39 ` Alexander Graf
2011-05-23 15:45 ` Scott Wood
2011-05-27 23:03 ` Alexander Graf
2011-06-02 23:17 ` Scott Wood
2011-06-14  9:55 ` Alexander Graf
2011-06-14 18:52 ` Scott Wood

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.