All of lore.kernel.org
 help / color / mirror / Atom feed
* [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
@ 2016-01-07  9:03 ` Marc Zyngier
  0 siblings, 0 replies; 20+ messages in thread
From: Marc Zyngier @ 2016-01-07  9:03 UTC (permalink / raw)
  To: Christoffer Dall, Shannon Zhao, Peter Maydell
  Cc: linux-arm-kernel, kvm, kvmarm

At the moment, our fault injection is pretty limited. We always
generate a SYNC exception into EL1, as if the fault was actually
from EL1h, no matter how it was generated.

This is obviously wrong, as EL0 can generate faults of its own
(not to mention the pretty-much unused EL1t mode).

This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
and in particular table D1-7 ("Vector offsets from vector table base
address"), which describes which vector to use depending on the source
exception level and type (synchronous, IRQ, FIQ or SError).

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
 arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
 1 file changed, 35 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 648112e..4d1ac81 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -27,7 +27,11 @@
 
 #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
 				 PSR_I_BIT | PSR_D_BIT)
-#define EL1_EXCEPT_SYNC_OFFSET	0x200
+
+#define CURRENT_EL_SP_EL0_VECTOR	0x0
+#define CURRENT_EL_SP_ELx_VECTOR	0x200
+#define LOWER_EL_AArch64_VECTOR		0x400
+#define LOWER_EL_AArch32_VECTOR		0x600
 
 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
 {
@@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
 		*fsr = 0x14;
 }
 
+enum exception_type {
+	except_type_sync	= 0,
+	except_type_irq		= 0x80,
+	except_type_fiq		= 0x100,
+	except_type_serror	= 0x180,
+};
+
+static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
+{
+	u64 exc_offset;
+
+	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
+	case PSR_MODE_EL1t:
+		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
+		break;
+	case PSR_MODE_EL1h:
+		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
+		break;
+	case PSR_MODE_EL0t:
+		exc_offset = LOWER_EL_AArch64_VECTOR;
+		break;
+	default:
+		exc_offset = LOWER_EL_AArch32_VECTOR;
+	}
+
+	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
+}
+
 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
 {
 	unsigned long cpsr = *vcpu_cpsr(vcpu);
@@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
 	*vcpu_spsr(vcpu) = cpsr;
 	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
 
+	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
 	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
-	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
 
 	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
 
@@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
 	*vcpu_spsr(vcpu) = cpsr;
 	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
 
+	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
 	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
-	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
 
 	/*
 	 * Build an unknown exception, depending on the instruction
-- 
2.1.4


^ permalink raw reply related	[flat|nested] 20+ messages in thread

* [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
@ 2016-01-07  9:03 ` Marc Zyngier
  0 siblings, 0 replies; 20+ messages in thread
From: Marc Zyngier @ 2016-01-07  9:03 UTC (permalink / raw)
  To: linux-arm-kernel

At the moment, our fault injection is pretty limited. We always
generate a SYNC exception into EL1, as if the fault was actually
from EL1h, no matter how it was generated.

This is obviously wrong, as EL0 can generate faults of its own
(not to mention the pretty-much unused EL1t mode).

This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
and in particular table D1-7 ("Vector offsets from vector table base
address"), which describes which vector to use depending on the source
exception level and type (synchronous, IRQ, FIQ or SError).

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
 arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
 1 file changed, 35 insertions(+), 3 deletions(-)

diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 648112e..4d1ac81 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -27,7 +27,11 @@
 
 #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
 				 PSR_I_BIT | PSR_D_BIT)
-#define EL1_EXCEPT_SYNC_OFFSET	0x200
+
+#define CURRENT_EL_SP_EL0_VECTOR	0x0
+#define CURRENT_EL_SP_ELx_VECTOR	0x200
+#define LOWER_EL_AArch64_VECTOR		0x400
+#define LOWER_EL_AArch32_VECTOR		0x600
 
 static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
 {
@@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
 		*fsr = 0x14;
 }
 
+enum exception_type {
+	except_type_sync	= 0,
+	except_type_irq		= 0x80,
+	except_type_fiq		= 0x100,
+	except_type_serror	= 0x180,
+};
+
+static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
+{
+	u64 exc_offset;
+
+	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
+	case PSR_MODE_EL1t:
+		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
+		break;
+	case PSR_MODE_EL1h:
+		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
+		break;
+	case PSR_MODE_EL0t:
+		exc_offset = LOWER_EL_AArch64_VECTOR;
+		break;
+	default:
+		exc_offset = LOWER_EL_AArch32_VECTOR;
+	}
+
+	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
+}
+
 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
 {
 	unsigned long cpsr = *vcpu_cpsr(vcpu);
@@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
 	*vcpu_spsr(vcpu) = cpsr;
 	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
 
+	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
 	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
-	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
 
 	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
 
@@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
 	*vcpu_spsr(vcpu) = cpsr;
 	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
 
+	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
 	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
-	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
 
 	/*
 	 * Build an unknown exception, depending on the instruction
-- 
2.1.4

^ permalink raw reply related	[flat|nested] 20+ messages in thread

* Re: [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
  2016-01-07  9:03 ` Marc Zyngier
  (?)
@ 2016-01-08  8:36   ` Shannon Zhao
  -1 siblings, 0 replies; 20+ messages in thread
From: Shannon Zhao @ 2016-01-08  8:36 UTC (permalink / raw)
  To: Marc Zyngier, Christoffer Dall, Shannon Zhao, Peter Maydell
  Cc: kvm, linux-arm-kernel, kvmarm



On 2016/1/7 17:03, Marc Zyngier wrote:
> At the moment, our fault injection is pretty limited. We always
> generate a SYNC exception into EL1, as if the fault was actually
> from EL1h, no matter how it was generated.
> 
> This is obviously wrong, as EL0 can generate faults of its own
> (not to mention the pretty-much unused EL1t mode).
> 
> This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
> and in particular table D1-7 ("Vector offsets from vector table base
> address"), which describes which vector to use depending on the source
> exception level and type (synchronous, IRQ, FIQ or SError).
> 
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>  arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
>  1 file changed, 35 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
> index 648112e..4d1ac81 100644
> --- a/arch/arm64/kvm/inject_fault.c
> +++ b/arch/arm64/kvm/inject_fault.c
> @@ -27,7 +27,11 @@
>  
>  #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
>  				 PSR_I_BIT | PSR_D_BIT)
> -#define EL1_EXCEPT_SYNC_OFFSET	0x200
> +
> +#define CURRENT_EL_SP_EL0_VECTOR	0x0
> +#define CURRENT_EL_SP_ELx_VECTOR	0x200
> +#define LOWER_EL_AArch64_VECTOR		0x400
> +#define LOWER_EL_AArch32_VECTOR		0x600
>  
>  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
>  {
> @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
>  		*fsr = 0x14;
>  }
>  
> +enum exception_type {
> +	except_type_sync	= 0,
> +	except_type_irq		= 0x80,
> +	except_type_fiq		= 0x100,
> +	except_type_serror	= 0x180,
> +};
> +
> +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
> +{
> +	u64 exc_offset;
> +
> +	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
> +	case PSR_MODE_EL1t:
> +		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
> +		break;
> +	case PSR_MODE_EL1h:
> +		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
> +		break;
> +	case PSR_MODE_EL0t:
> +		exc_offset = LOWER_EL_AArch64_VECTOR;
> +		break;
> +	default:
> +		exc_offset = LOWER_EL_AArch32_VECTOR;
> +	}
> +
> +	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
> +}
> +
>  static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
>  {
>  	unsigned long cpsr = *vcpu_cpsr(vcpu);
> @@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
>  	*vcpu_spsr(vcpu) = cpsr;
>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>  
> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>  
>  	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
>  
> @@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
>  	*vcpu_spsr(vcpu) = cpsr;
>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>  
> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>  
>  	/*
>  	 * Build an unknown exception, depending on the instruction
> 
I test this patch based on PMU patch set. It works as expected. I just
have a question that here it sets EC with ESR_ELx_EC_UNKNOWN by default,
not set it with the value of esr_el2. Does this matter?

Thanks,
-- 
Shannon


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
@ 2016-01-08  8:36   ` Shannon Zhao
  0 siblings, 0 replies; 20+ messages in thread
From: Shannon Zhao @ 2016-01-08  8:36 UTC (permalink / raw)
  To: Marc Zyngier, Christoffer Dall, Shannon Zhao, Peter Maydell
  Cc: kvm, linux-arm-kernel, kvmarm



On 2016/1/7 17:03, Marc Zyngier wrote:
> At the moment, our fault injection is pretty limited. We always
> generate a SYNC exception into EL1, as if the fault was actually
> from EL1h, no matter how it was generated.
> 
> This is obviously wrong, as EL0 can generate faults of its own
> (not to mention the pretty-much unused EL1t mode).
> 
> This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
> and in particular table D1-7 ("Vector offsets from vector table base
> address"), which describes which vector to use depending on the source
> exception level and type (synchronous, IRQ, FIQ or SError).
> 
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>  arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
>  1 file changed, 35 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
> index 648112e..4d1ac81 100644
> --- a/arch/arm64/kvm/inject_fault.c
> +++ b/arch/arm64/kvm/inject_fault.c
> @@ -27,7 +27,11 @@
>  
>  #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
>  				 PSR_I_BIT | PSR_D_BIT)
> -#define EL1_EXCEPT_SYNC_OFFSET	0x200
> +
> +#define CURRENT_EL_SP_EL0_VECTOR	0x0
> +#define CURRENT_EL_SP_ELx_VECTOR	0x200
> +#define LOWER_EL_AArch64_VECTOR		0x400
> +#define LOWER_EL_AArch32_VECTOR		0x600
>  
>  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
>  {
> @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
>  		*fsr = 0x14;
>  }
>  
> +enum exception_type {
> +	except_type_sync	= 0,
> +	except_type_irq		= 0x80,
> +	except_type_fiq		= 0x100,
> +	except_type_serror	= 0x180,
> +};
> +
> +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
> +{
> +	u64 exc_offset;
> +
> +	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
> +	case PSR_MODE_EL1t:
> +		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
> +		break;
> +	case PSR_MODE_EL1h:
> +		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
> +		break;
> +	case PSR_MODE_EL0t:
> +		exc_offset = LOWER_EL_AArch64_VECTOR;
> +		break;
> +	default:
> +		exc_offset = LOWER_EL_AArch32_VECTOR;
> +	}
> +
> +	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
> +}
> +
>  static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
>  {
>  	unsigned long cpsr = *vcpu_cpsr(vcpu);
> @@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
>  	*vcpu_spsr(vcpu) = cpsr;
>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>  
> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>  
>  	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
>  
> @@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
>  	*vcpu_spsr(vcpu) = cpsr;
>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>  
> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>  
>  	/*
>  	 * Build an unknown exception, depending on the instruction
> 
I test this patch based on PMU patch set. It works as expected. I just
have a question that here it sets EC with ESR_ELx_EC_UNKNOWN by default,
not set it with the value of esr_el2. Does this matter?

Thanks,
-- 
Shannon


^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
@ 2016-01-08  8:36   ` Shannon Zhao
  0 siblings, 0 replies; 20+ messages in thread
From: Shannon Zhao @ 2016-01-08  8:36 UTC (permalink / raw)
  To: linux-arm-kernel



On 2016/1/7 17:03, Marc Zyngier wrote:
> At the moment, our fault injection is pretty limited. We always
> generate a SYNC exception into EL1, as if the fault was actually
> from EL1h, no matter how it was generated.
> 
> This is obviously wrong, as EL0 can generate faults of its own
> (not to mention the pretty-much unused EL1t mode).
> 
> This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
> and in particular table D1-7 ("Vector offsets from vector table base
> address"), which describes which vector to use depending on the source
> exception level and type (synchronous, IRQ, FIQ or SError).
> 
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>  arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
>  1 file changed, 35 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
> index 648112e..4d1ac81 100644
> --- a/arch/arm64/kvm/inject_fault.c
> +++ b/arch/arm64/kvm/inject_fault.c
> @@ -27,7 +27,11 @@
>  
>  #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
>  				 PSR_I_BIT | PSR_D_BIT)
> -#define EL1_EXCEPT_SYNC_OFFSET	0x200
> +
> +#define CURRENT_EL_SP_EL0_VECTOR	0x0
> +#define CURRENT_EL_SP_ELx_VECTOR	0x200
> +#define LOWER_EL_AArch64_VECTOR		0x400
> +#define LOWER_EL_AArch32_VECTOR		0x600
>  
>  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
>  {
> @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
>  		*fsr = 0x14;
>  }
>  
> +enum exception_type {
> +	except_type_sync	= 0,
> +	except_type_irq		= 0x80,
> +	except_type_fiq		= 0x100,
> +	except_type_serror	= 0x180,
> +};
> +
> +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
> +{
> +	u64 exc_offset;
> +
> +	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
> +	case PSR_MODE_EL1t:
> +		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
> +		break;
> +	case PSR_MODE_EL1h:
> +		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
> +		break;
> +	case PSR_MODE_EL0t:
> +		exc_offset = LOWER_EL_AArch64_VECTOR;
> +		break;
> +	default:
> +		exc_offset = LOWER_EL_AArch32_VECTOR;
> +	}
> +
> +	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
> +}
> +
>  static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
>  {
>  	unsigned long cpsr = *vcpu_cpsr(vcpu);
> @@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
>  	*vcpu_spsr(vcpu) = cpsr;
>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>  
> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>  
>  	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
>  
> @@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
>  	*vcpu_spsr(vcpu) = cpsr;
>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>  
> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>  
>  	/*
>  	 * Build an unknown exception, depending on the instruction
> 
I test this patch based on PMU patch set. It works as expected. I just
have a question that here it sets EC with ESR_ELx_EC_UNKNOWN by default,
not set it with the value of esr_el2. Does this matter?

Thanks,
-- 
Shannon

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
  2016-01-08  8:36   ` Shannon Zhao
@ 2016-01-08  8:56     ` Marc Zyngier
  -1 siblings, 0 replies; 20+ messages in thread
From: Marc Zyngier @ 2016-01-08  8:56 UTC (permalink / raw)
  To: Shannon Zhao, Christoffer Dall, Shannon Zhao, Peter Maydell
  Cc: kvm, linux-arm-kernel, kvmarm

On 08/01/16 08:36, Shannon Zhao wrote:
> 
> 
> On 2016/1/7 17:03, Marc Zyngier wrote:
>> At the moment, our fault injection is pretty limited. We always
>> generate a SYNC exception into EL1, as if the fault was actually
>> from EL1h, no matter how it was generated.
>>
>> This is obviously wrong, as EL0 can generate faults of its own
>> (not to mention the pretty-much unused EL1t mode).
>>
>> This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
>> and in particular table D1-7 ("Vector offsets from vector table base
>> address"), which describes which vector to use depending on the source
>> exception level and type (synchronous, IRQ, FIQ or SError).
>>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>>  arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
>>  1 file changed, 35 insertions(+), 3 deletions(-)
>>
>> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
>> index 648112e..4d1ac81 100644
>> --- a/arch/arm64/kvm/inject_fault.c
>> +++ b/arch/arm64/kvm/inject_fault.c
>> @@ -27,7 +27,11 @@
>>  
>>  #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
>>  				 PSR_I_BIT | PSR_D_BIT)
>> -#define EL1_EXCEPT_SYNC_OFFSET	0x200
>> +
>> +#define CURRENT_EL_SP_EL0_VECTOR	0x0
>> +#define CURRENT_EL_SP_ELx_VECTOR	0x200
>> +#define LOWER_EL_AArch64_VECTOR		0x400
>> +#define LOWER_EL_AArch32_VECTOR		0x600
>>  
>>  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
>>  {
>> @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
>>  		*fsr = 0x14;
>>  }
>>  
>> +enum exception_type {
>> +	except_type_sync	= 0,
>> +	except_type_irq		= 0x80,
>> +	except_type_fiq		= 0x100,
>> +	except_type_serror	= 0x180,
>> +};
>> +
>> +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
>> +{
>> +	u64 exc_offset;
>> +
>> +	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
>> +	case PSR_MODE_EL1t:
>> +		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
>> +		break;
>> +	case PSR_MODE_EL1h:
>> +		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
>> +		break;
>> +	case PSR_MODE_EL0t:
>> +		exc_offset = LOWER_EL_AArch64_VECTOR;
>> +		break;
>> +	default:
>> +		exc_offset = LOWER_EL_AArch32_VECTOR;
>> +	}
>> +
>> +	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
>> +}
>> +
>>  static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
>>  {
>>  	unsigned long cpsr = *vcpu_cpsr(vcpu);
>> @@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
>>  	*vcpu_spsr(vcpu) = cpsr;
>>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>>  
>> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
>> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>>  
>>  	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
>>  
>> @@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
>>  	*vcpu_spsr(vcpu) = cpsr;
>>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>>  
>> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
>> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>>  
>>  	/*
>>  	 * Build an unknown exception, depending on the instruction
>>
> I test this patch based on PMU patch set. It works as expected. I just
> have a question that here it sets EC with ESR_ELx_EC_UNKNOWN by default,
> not set it with the value of esr_el2. Does this matter?

For an UNDEF, ESR_ELx_EC_UNKNOWN is the right EC to use. The EC set in
ESR_EL2 when we trap is likely to be something like ESR_ELx_EC_SYS64,
which the kernel handles as an UNDEF, but that doesn't match what we
want to do here (the guest could legitimately handle that in a complete
different way).

Thanks,

	M.
-- 
Jazz is not dead. It just smells funny...

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
@ 2016-01-08  8:56     ` Marc Zyngier
  0 siblings, 0 replies; 20+ messages in thread
From: Marc Zyngier @ 2016-01-08  8:56 UTC (permalink / raw)
  To: linux-arm-kernel

On 08/01/16 08:36, Shannon Zhao wrote:
> 
> 
> On 2016/1/7 17:03, Marc Zyngier wrote:
>> At the moment, our fault injection is pretty limited. We always
>> generate a SYNC exception into EL1, as if the fault was actually
>> from EL1h, no matter how it was generated.
>>
>> This is obviously wrong, as EL0 can generate faults of its own
>> (not to mention the pretty-much unused EL1t mode).
>>
>> This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
>> and in particular table D1-7 ("Vector offsets from vector table base
>> address"), which describes which vector to use depending on the source
>> exception level and type (synchronous, IRQ, FIQ or SError).
>>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>>  arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
>>  1 file changed, 35 insertions(+), 3 deletions(-)
>>
>> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
>> index 648112e..4d1ac81 100644
>> --- a/arch/arm64/kvm/inject_fault.c
>> +++ b/arch/arm64/kvm/inject_fault.c
>> @@ -27,7 +27,11 @@
>>  
>>  #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
>>  				 PSR_I_BIT | PSR_D_BIT)
>> -#define EL1_EXCEPT_SYNC_OFFSET	0x200
>> +
>> +#define CURRENT_EL_SP_EL0_VECTOR	0x0
>> +#define CURRENT_EL_SP_ELx_VECTOR	0x200
>> +#define LOWER_EL_AArch64_VECTOR		0x400
>> +#define LOWER_EL_AArch32_VECTOR		0x600
>>  
>>  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
>>  {
>> @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
>>  		*fsr = 0x14;
>>  }
>>  
>> +enum exception_type {
>> +	except_type_sync	= 0,
>> +	except_type_irq		= 0x80,
>> +	except_type_fiq		= 0x100,
>> +	except_type_serror	= 0x180,
>> +};
>> +
>> +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
>> +{
>> +	u64 exc_offset;
>> +
>> +	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
>> +	case PSR_MODE_EL1t:
>> +		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
>> +		break;
>> +	case PSR_MODE_EL1h:
>> +		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
>> +		break;
>> +	case PSR_MODE_EL0t:
>> +		exc_offset = LOWER_EL_AArch64_VECTOR;
>> +		break;
>> +	default:
>> +		exc_offset = LOWER_EL_AArch32_VECTOR;
>> +	}
>> +
>> +	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
>> +}
>> +
>>  static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
>>  {
>>  	unsigned long cpsr = *vcpu_cpsr(vcpu);
>> @@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
>>  	*vcpu_spsr(vcpu) = cpsr;
>>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>>  
>> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
>> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>>  
>>  	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
>>  
>> @@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
>>  	*vcpu_spsr(vcpu) = cpsr;
>>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>>  
>> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
>> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>>  
>>  	/*
>>  	 * Build an unknown exception, depending on the instruction
>>
> I test this patch based on PMU patch set. It works as expected. I just
> have a question that here it sets EC with ESR_ELx_EC_UNKNOWN by default,
> not set it with the value of esr_el2. Does this matter?

For an UNDEF, ESR_ELx_EC_UNKNOWN is the right EC to use. The EC set in
ESR_EL2 when we trap is likely to be something like ESR_ELx_EC_SYS64,
which the kernel handles as an UNDEF, but that doesn't match what we
want to do here (the guest could legitimately handle that in a complete
different way).

Thanks,

	M.
-- 
Jazz is not dead. It just smells funny...

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
  2016-01-07  9:03 ` Marc Zyngier
@ 2016-01-10 19:45   ` Christoffer Dall
  -1 siblings, 0 replies; 20+ messages in thread
From: Christoffer Dall @ 2016-01-10 19:45 UTC (permalink / raw)
  To: Marc Zyngier; +Cc: Shannon Zhao, Peter Maydell, linux-arm-kernel, kvm, kvmarm

On Thu, Jan 07, 2016 at 09:03:36AM +0000, Marc Zyngier wrote:
> At the moment, our fault injection is pretty limited. We always
> generate a SYNC exception into EL1, as if the fault was actually
> from EL1h, no matter how it was generated.
> 
> This is obviously wrong, as EL0 can generate faults of its own
> (not to mention the pretty-much unused EL1t mode).
> 
> This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
> and in particular table D1-7 ("Vector offsets from vector table base
> address"), which describes which vector to use depending on the source
> exception level and type (synchronous, IRQ, FIQ or SError).
> 
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>  arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
>  1 file changed, 35 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
> index 648112e..4d1ac81 100644
> --- a/arch/arm64/kvm/inject_fault.c
> +++ b/arch/arm64/kvm/inject_fault.c
> @@ -27,7 +27,11 @@
>  
>  #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
>  				 PSR_I_BIT | PSR_D_BIT)
> -#define EL1_EXCEPT_SYNC_OFFSET	0x200
> +
> +#define CURRENT_EL_SP_EL0_VECTOR	0x0
> +#define CURRENT_EL_SP_ELx_VECTOR	0x200
> +#define LOWER_EL_AArch64_VECTOR		0x400
> +#define LOWER_EL_AArch32_VECTOR		0x600
>  
>  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
>  {
> @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
>  		*fsr = 0x14;
>  }
>  
> +enum exception_type {
> +	except_type_sync	= 0,
> +	except_type_irq		= 0x80,
> +	except_type_fiq		= 0x100,
> +	except_type_serror	= 0x180,
> +};
> +
> +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
> +{
> +	u64 exc_offset;
> +
> +	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
> +	case PSR_MODE_EL1t:
> +		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
> +		break;
> +	case PSR_MODE_EL1h:
> +		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
> +		break;
> +	case PSR_MODE_EL0t:
> +		exc_offset = LOWER_EL_AArch64_VECTOR;
> +		break;
> +	default:
> +		exc_offset = LOWER_EL_AArch32_VECTOR;

so this catches any EL0 32-bit state, right?

If so:

Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>

-Christoffer

> +	}
> +
> +	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
> +}
> +
>  static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
>  {
>  	unsigned long cpsr = *vcpu_cpsr(vcpu);
> @@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
>  	*vcpu_spsr(vcpu) = cpsr;
>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>  
> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>  
>  	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
>  
> @@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
>  	*vcpu_spsr(vcpu) = cpsr;
>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>  
> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>  
>  	/*
>  	 * Build an unknown exception, depending on the instruction
> -- 
> 2.1.4
> 

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
@ 2016-01-10 19:45   ` Christoffer Dall
  0 siblings, 0 replies; 20+ messages in thread
From: Christoffer Dall @ 2016-01-10 19:45 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jan 07, 2016 at 09:03:36AM +0000, Marc Zyngier wrote:
> At the moment, our fault injection is pretty limited. We always
> generate a SYNC exception into EL1, as if the fault was actually
> from EL1h, no matter how it was generated.
> 
> This is obviously wrong, as EL0 can generate faults of its own
> (not to mention the pretty-much unused EL1t mode).
> 
> This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
> and in particular table D1-7 ("Vector offsets from vector table base
> address"), which describes which vector to use depending on the source
> exception level and type (synchronous, IRQ, FIQ or SError).
> 
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>  arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
>  1 file changed, 35 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
> index 648112e..4d1ac81 100644
> --- a/arch/arm64/kvm/inject_fault.c
> +++ b/arch/arm64/kvm/inject_fault.c
> @@ -27,7 +27,11 @@
>  
>  #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
>  				 PSR_I_BIT | PSR_D_BIT)
> -#define EL1_EXCEPT_SYNC_OFFSET	0x200
> +
> +#define CURRENT_EL_SP_EL0_VECTOR	0x0
> +#define CURRENT_EL_SP_ELx_VECTOR	0x200
> +#define LOWER_EL_AArch64_VECTOR		0x400
> +#define LOWER_EL_AArch32_VECTOR		0x600
>  
>  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
>  {
> @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
>  		*fsr = 0x14;
>  }
>  
> +enum exception_type {
> +	except_type_sync	= 0,
> +	except_type_irq		= 0x80,
> +	except_type_fiq		= 0x100,
> +	except_type_serror	= 0x180,
> +};
> +
> +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
> +{
> +	u64 exc_offset;
> +
> +	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
> +	case PSR_MODE_EL1t:
> +		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
> +		break;
> +	case PSR_MODE_EL1h:
> +		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
> +		break;
> +	case PSR_MODE_EL0t:
> +		exc_offset = LOWER_EL_AArch64_VECTOR;
> +		break;
> +	default:
> +		exc_offset = LOWER_EL_AArch32_VECTOR;

so this catches any EL0 32-bit state, right?

If so:

Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>

-Christoffer

> +	}
> +
> +	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
> +}
> +
>  static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
>  {
>  	unsigned long cpsr = *vcpu_cpsr(vcpu);
> @@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
>  	*vcpu_spsr(vcpu) = cpsr;
>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>  
> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>  
>  	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
>  
> @@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
>  	*vcpu_spsr(vcpu) = cpsr;
>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>  
> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>  
>  	/*
>  	 * Build an unknown exception, depending on the instruction
> -- 
> 2.1.4
> 

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
  2016-01-08  8:56     ` Marc Zyngier
  (?)
@ 2016-01-11  1:36       ` Shannon Zhao
  -1 siblings, 0 replies; 20+ messages in thread
From: Shannon Zhao @ 2016-01-11  1:36 UTC (permalink / raw)
  To: Marc Zyngier, Christoffer Dall, Shannon Zhao, Peter Maydell
  Cc: kvm, linux-arm-kernel, kvmarm



On 2016/1/8 16:56, Marc Zyngier wrote:
> On 08/01/16 08:36, Shannon Zhao wrote:
>> > 
>> > 
>> > On 2016/1/7 17:03, Marc Zyngier wrote:
>>> >> At the moment, our fault injection is pretty limited. We always
>>> >> generate a SYNC exception into EL1, as if the fault was actually
>>> >> from EL1h, no matter how it was generated.
>>> >>
>>> >> This is obviously wrong, as EL0 can generate faults of its own
>>> >> (not to mention the pretty-much unused EL1t mode).
>>> >>
>>> >> This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
>>> >> and in particular table D1-7 ("Vector offsets from vector table base
>>> >> address"), which describes which vector to use depending on the source
>>> >> exception level and type (synchronous, IRQ, FIQ or SError).
>>> >>
>>> >> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>>> >> ---
>>> >>  arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
>>> >>  1 file changed, 35 insertions(+), 3 deletions(-)
>>> >>
>>> >> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
>>> >> index 648112e..4d1ac81 100644
>>> >> --- a/arch/arm64/kvm/inject_fault.c
>>> >> +++ b/arch/arm64/kvm/inject_fault.c
>>> >> @@ -27,7 +27,11 @@
>>> >>  
>>> >>  #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
>>> >>  				 PSR_I_BIT | PSR_D_BIT)
>>> >> -#define EL1_EXCEPT_SYNC_OFFSET	0x200
>>> >> +
>>> >> +#define CURRENT_EL_SP_EL0_VECTOR	0x0
>>> >> +#define CURRENT_EL_SP_ELx_VECTOR	0x200
>>> >> +#define LOWER_EL_AArch64_VECTOR		0x400
>>> >> +#define LOWER_EL_AArch32_VECTOR		0x600
>>> >>  
>>> >>  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
>>> >>  {
>>> >> @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
>>> >>  		*fsr = 0x14;
>>> >>  }
>>> >>  
>>> >> +enum exception_type {
>>> >> +	except_type_sync	= 0,
>>> >> +	except_type_irq		= 0x80,
>>> >> +	except_type_fiq		= 0x100,
>>> >> +	except_type_serror	= 0x180,
>>> >> +};
>>> >> +
>>> >> +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
>>> >> +{
>>> >> +	u64 exc_offset;
>>> >> +
>>> >> +	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
>>> >> +	case PSR_MODE_EL1t:
>>> >> +		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
>>> >> +		break;
>>> >> +	case PSR_MODE_EL1h:
>>> >> +		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
>>> >> +		break;
>>> >> +	case PSR_MODE_EL0t:
>>> >> +		exc_offset = LOWER_EL_AArch64_VECTOR;
>>> >> +		break;
>>> >> +	default:
>>> >> +		exc_offset = LOWER_EL_AArch32_VECTOR;
>>> >> +	}
>>> >> +
>>> >> +	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
>>> >> +}
>>> >> +
>>> >>  static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
>>> >>  {
>>> >>  	unsigned long cpsr = *vcpu_cpsr(vcpu);
>>> >> @@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
>>> >>  	*vcpu_spsr(vcpu) = cpsr;
>>> >>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>>> >>  
>>> >> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>>> >>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
>>> >> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>>> >>  
>>> >>  	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
>>> >>  
>>> >> @@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
>>> >>  	*vcpu_spsr(vcpu) = cpsr;
>>> >>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>>> >>  
>>> >> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>>> >>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
>>> >> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>>> >>  
>>> >>  	/*
>>> >>  	 * Build an unknown exception, depending on the instruction
>>> >>
>> > I test this patch based on PMU patch set. It works as expected. I just
>> > have a question that here it sets EC with ESR_ELx_EC_UNKNOWN by default,
>> > not set it with the value of esr_el2. Does this matter?
> For an UNDEF, ESR_ELx_EC_UNKNOWN is the right EC to use. The EC set in
> ESR_EL2 when we trap is likely to be something like ESR_ELx_EC_SYS64,
> which the kernel handles as an UNDEF, but that doesn't match what we
> want to do here (the guest could legitimately handle that in a complete
> different way).
Tested-by: Shannon Zhao <shannon.zhao@linaro.org>

-- 
Shannon


^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
@ 2016-01-11  1:36       ` Shannon Zhao
  0 siblings, 0 replies; 20+ messages in thread
From: Shannon Zhao @ 2016-01-11  1:36 UTC (permalink / raw)
  To: Marc Zyngier, Christoffer Dall, Shannon Zhao, Peter Maydell
  Cc: kvm, linux-arm-kernel, kvmarm



On 2016/1/8 16:56, Marc Zyngier wrote:
> On 08/01/16 08:36, Shannon Zhao wrote:
>> > 
>> > 
>> > On 2016/1/7 17:03, Marc Zyngier wrote:
>>> >> At the moment, our fault injection is pretty limited. We always
>>> >> generate a SYNC exception into EL1, as if the fault was actually
>>> >> from EL1h, no matter how it was generated.
>>> >>
>>> >> This is obviously wrong, as EL0 can generate faults of its own
>>> >> (not to mention the pretty-much unused EL1t mode).
>>> >>
>>> >> This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
>>> >> and in particular table D1-7 ("Vector offsets from vector table base
>>> >> address"), which describes which vector to use depending on the source
>>> >> exception level and type (synchronous, IRQ, FIQ or SError).
>>> >>
>>> >> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>>> >> ---
>>> >>  arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
>>> >>  1 file changed, 35 insertions(+), 3 deletions(-)
>>> >>
>>> >> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
>>> >> index 648112e..4d1ac81 100644
>>> >> --- a/arch/arm64/kvm/inject_fault.c
>>> >> +++ b/arch/arm64/kvm/inject_fault.c
>>> >> @@ -27,7 +27,11 @@
>>> >>  
>>> >>  #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
>>> >>  				 PSR_I_BIT | PSR_D_BIT)
>>> >> -#define EL1_EXCEPT_SYNC_OFFSET	0x200
>>> >> +
>>> >> +#define CURRENT_EL_SP_EL0_VECTOR	0x0
>>> >> +#define CURRENT_EL_SP_ELx_VECTOR	0x200
>>> >> +#define LOWER_EL_AArch64_VECTOR		0x400
>>> >> +#define LOWER_EL_AArch32_VECTOR		0x600
>>> >>  
>>> >>  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
>>> >>  {
>>> >> @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
>>> >>  		*fsr = 0x14;
>>> >>  }
>>> >>  
>>> >> +enum exception_type {
>>> >> +	except_type_sync	= 0,
>>> >> +	except_type_irq		= 0x80,
>>> >> +	except_type_fiq		= 0x100,
>>> >> +	except_type_serror	= 0x180,
>>> >> +};
>>> >> +
>>> >> +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
>>> >> +{
>>> >> +	u64 exc_offset;
>>> >> +
>>> >> +	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
>>> >> +	case PSR_MODE_EL1t:
>>> >> +		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
>>> >> +		break;
>>> >> +	case PSR_MODE_EL1h:
>>> >> +		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
>>> >> +		break;
>>> >> +	case PSR_MODE_EL0t:
>>> >> +		exc_offset = LOWER_EL_AArch64_VECTOR;
>>> >> +		break;
>>> >> +	default:
>>> >> +		exc_offset = LOWER_EL_AArch32_VECTOR;
>>> >> +	}
>>> >> +
>>> >> +	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
>>> >> +}
>>> >> +
>>> >>  static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
>>> >>  {
>>> >>  	unsigned long cpsr = *vcpu_cpsr(vcpu);
>>> >> @@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
>>> >>  	*vcpu_spsr(vcpu) = cpsr;
>>> >>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>>> >>  
>>> >> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>>> >>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
>>> >> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>>> >>  
>>> >>  	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
>>> >>  
>>> >> @@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
>>> >>  	*vcpu_spsr(vcpu) = cpsr;
>>> >>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>>> >>  
>>> >> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>>> >>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
>>> >> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>>> >>  
>>> >>  	/*
>>> >>  	 * Build an unknown exception, depending on the instruction
>>> >>
>> > I test this patch based on PMU patch set. It works as expected. I just
>> > have a question that here it sets EC with ESR_ELx_EC_UNKNOWN by default,
>> > not set it with the value of esr_el2. Does this matter?
> For an UNDEF, ESR_ELx_EC_UNKNOWN is the right EC to use. The EC set in
> ESR_EL2 when we trap is likely to be something like ESR_ELx_EC_SYS64,
> which the kernel handles as an UNDEF, but that doesn't match what we
> want to do here (the guest could legitimately handle that in a complete
> different way).
Tested-by: Shannon Zhao <shannon.zhao@linaro.org>

-- 
Shannon


^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
@ 2016-01-11  1:36       ` Shannon Zhao
  0 siblings, 0 replies; 20+ messages in thread
From: Shannon Zhao @ 2016-01-11  1:36 UTC (permalink / raw)
  To: linux-arm-kernel



On 2016/1/8 16:56, Marc Zyngier wrote:
> On 08/01/16 08:36, Shannon Zhao wrote:
>> > 
>> > 
>> > On 2016/1/7 17:03, Marc Zyngier wrote:
>>> >> At the moment, our fault injection is pretty limited. We always
>>> >> generate a SYNC exception into EL1, as if the fault was actually
>>> >> from EL1h, no matter how it was generated.
>>> >>
>>> >> This is obviously wrong, as EL0 can generate faults of its own
>>> >> (not to mention the pretty-much unused EL1t mode).
>>> >>
>>> >> This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
>>> >> and in particular table D1-7 ("Vector offsets from vector table base
>>> >> address"), which describes which vector to use depending on the source
>>> >> exception level and type (synchronous, IRQ, FIQ or SError).
>>> >>
>>> >> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>>> >> ---
>>> >>  arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
>>> >>  1 file changed, 35 insertions(+), 3 deletions(-)
>>> >>
>>> >> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
>>> >> index 648112e..4d1ac81 100644
>>> >> --- a/arch/arm64/kvm/inject_fault.c
>>> >> +++ b/arch/arm64/kvm/inject_fault.c
>>> >> @@ -27,7 +27,11 @@
>>> >>  
>>> >>  #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
>>> >>  				 PSR_I_BIT | PSR_D_BIT)
>>> >> -#define EL1_EXCEPT_SYNC_OFFSET	0x200
>>> >> +
>>> >> +#define CURRENT_EL_SP_EL0_VECTOR	0x0
>>> >> +#define CURRENT_EL_SP_ELx_VECTOR	0x200
>>> >> +#define LOWER_EL_AArch64_VECTOR		0x400
>>> >> +#define LOWER_EL_AArch32_VECTOR		0x600
>>> >>  
>>> >>  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
>>> >>  {
>>> >> @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
>>> >>  		*fsr = 0x14;
>>> >>  }
>>> >>  
>>> >> +enum exception_type {
>>> >> +	except_type_sync	= 0,
>>> >> +	except_type_irq		= 0x80,
>>> >> +	except_type_fiq		= 0x100,
>>> >> +	except_type_serror	= 0x180,
>>> >> +};
>>> >> +
>>> >> +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
>>> >> +{
>>> >> +	u64 exc_offset;
>>> >> +
>>> >> +	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
>>> >> +	case PSR_MODE_EL1t:
>>> >> +		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
>>> >> +		break;
>>> >> +	case PSR_MODE_EL1h:
>>> >> +		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
>>> >> +		break;
>>> >> +	case PSR_MODE_EL0t:
>>> >> +		exc_offset = LOWER_EL_AArch64_VECTOR;
>>> >> +		break;
>>> >> +	default:
>>> >> +		exc_offset = LOWER_EL_AArch32_VECTOR;
>>> >> +	}
>>> >> +
>>> >> +	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
>>> >> +}
>>> >> +
>>> >>  static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
>>> >>  {
>>> >>  	unsigned long cpsr = *vcpu_cpsr(vcpu);
>>> >> @@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
>>> >>  	*vcpu_spsr(vcpu) = cpsr;
>>> >>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>>> >>  
>>> >> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>>> >>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
>>> >> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>>> >>  
>>> >>  	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
>>> >>  
>>> >> @@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
>>> >>  	*vcpu_spsr(vcpu) = cpsr;
>>> >>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>>> >>  
>>> >> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>>> >>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
>>> >> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>>> >>  
>>> >>  	/*
>>> >>  	 * Build an unknown exception, depending on the instruction
>>> >>
>> > I test this patch based on PMU patch set. It works as expected. I just
>> > have a question that here it sets EC with ESR_ELx_EC_UNKNOWN by default,
>> > not set it with the value of esr_el2. Does this matter?
> For an UNDEF, ESR_ELx_EC_UNKNOWN is the right EC to use. The EC set in
> ESR_EL2 when we trap is likely to be something like ESR_ELx_EC_SYS64,
> which the kernel handles as an UNDEF, but that doesn't match what we
> want to do here (the guest could legitimately handle that in a complete
> different way).
Tested-by: Shannon Zhao <shannon.zhao@linaro.org>

-- 
Shannon

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
  2016-01-10 19:45   ` Christoffer Dall
@ 2016-01-11 10:06     ` Marc Zyngier
  -1 siblings, 0 replies; 20+ messages in thread
From: Marc Zyngier @ 2016-01-11 10:06 UTC (permalink / raw)
  To: Christoffer Dall; +Cc: kvmarm, Shannon Zhao, linux-arm-kernel, kvm

On 10/01/16 19:45, Christoffer Dall wrote:
> On Thu, Jan 07, 2016 at 09:03:36AM +0000, Marc Zyngier wrote:
>> At the moment, our fault injection is pretty limited. We always
>> generate a SYNC exception into EL1, as if the fault was actually
>> from EL1h, no matter how it was generated.
>>
>> This is obviously wrong, as EL0 can generate faults of its own
>> (not to mention the pretty-much unused EL1t mode).
>>
>> This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
>> and in particular table D1-7 ("Vector offsets from vector table base
>> address"), which describes which vector to use depending on the source
>> exception level and type (synchronous, IRQ, FIQ or SError).
>>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>>  arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
>>  1 file changed, 35 insertions(+), 3 deletions(-)
>>
>> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
>> index 648112e..4d1ac81 100644
>> --- a/arch/arm64/kvm/inject_fault.c
>> +++ b/arch/arm64/kvm/inject_fault.c
>> @@ -27,7 +27,11 @@
>>  
>>  #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
>>  				 PSR_I_BIT | PSR_D_BIT)
>> -#define EL1_EXCEPT_SYNC_OFFSET	0x200
>> +
>> +#define CURRENT_EL_SP_EL0_VECTOR	0x0
>> +#define CURRENT_EL_SP_ELx_VECTOR	0x200
>> +#define LOWER_EL_AArch64_VECTOR		0x400
>> +#define LOWER_EL_AArch32_VECTOR		0x600
>>  
>>  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
>>  {
>> @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
>>  		*fsr = 0x14;
>>  }
>>  
>> +enum exception_type {
>> +	except_type_sync	= 0,
>> +	except_type_irq		= 0x80,
>> +	except_type_fiq		= 0x100,
>> +	except_type_serror	= 0x180,
>> +};
>> +
>> +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
>> +{
>> +	u64 exc_offset;
>> +
>> +	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
>> +	case PSR_MODE_EL1t:
>> +		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
>> +		break;
>> +	case PSR_MODE_EL1h:
>> +		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
>> +		break;
>> +	case PSR_MODE_EL0t:
>> +		exc_offset = LOWER_EL_AArch64_VECTOR;
>> +		break;
>> +	default:
>> +		exc_offset = LOWER_EL_AArch32_VECTOR;
> 
> so this catches any EL0 32-bit state, right?

Indeed.

> 
> If so:
> 
> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>

Thanks!

	M.
-- 
Jazz is not dead. It just smells funny...

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
@ 2016-01-11 10:06     ` Marc Zyngier
  0 siblings, 0 replies; 20+ messages in thread
From: Marc Zyngier @ 2016-01-11 10:06 UTC (permalink / raw)
  To: linux-arm-kernel

On 10/01/16 19:45, Christoffer Dall wrote:
> On Thu, Jan 07, 2016 at 09:03:36AM +0000, Marc Zyngier wrote:
>> At the moment, our fault injection is pretty limited. We always
>> generate a SYNC exception into EL1, as if the fault was actually
>> from EL1h, no matter how it was generated.
>>
>> This is obviously wrong, as EL0 can generate faults of its own
>> (not to mention the pretty-much unused EL1t mode).
>>
>> This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
>> and in particular table D1-7 ("Vector offsets from vector table base
>> address"), which describes which vector to use depending on the source
>> exception level and type (synchronous, IRQ, FIQ or SError).
>>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>>  arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
>>  1 file changed, 35 insertions(+), 3 deletions(-)
>>
>> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
>> index 648112e..4d1ac81 100644
>> --- a/arch/arm64/kvm/inject_fault.c
>> +++ b/arch/arm64/kvm/inject_fault.c
>> @@ -27,7 +27,11 @@
>>  
>>  #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
>>  				 PSR_I_BIT | PSR_D_BIT)
>> -#define EL1_EXCEPT_SYNC_OFFSET	0x200
>> +
>> +#define CURRENT_EL_SP_EL0_VECTOR	0x0
>> +#define CURRENT_EL_SP_ELx_VECTOR	0x200
>> +#define LOWER_EL_AArch64_VECTOR		0x400
>> +#define LOWER_EL_AArch32_VECTOR		0x600
>>  
>>  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
>>  {
>> @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
>>  		*fsr = 0x14;
>>  }
>>  
>> +enum exception_type {
>> +	except_type_sync	= 0,
>> +	except_type_irq		= 0x80,
>> +	except_type_fiq		= 0x100,
>> +	except_type_serror	= 0x180,
>> +};
>> +
>> +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
>> +{
>> +	u64 exc_offset;
>> +
>> +	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
>> +	case PSR_MODE_EL1t:
>> +		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
>> +		break;
>> +	case PSR_MODE_EL1h:
>> +		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
>> +		break;
>> +	case PSR_MODE_EL0t:
>> +		exc_offset = LOWER_EL_AArch64_VECTOR;
>> +		break;
>> +	default:
>> +		exc_offset = LOWER_EL_AArch32_VECTOR;
> 
> so this catches any EL0 32-bit state, right?

Indeed.

> 
> If so:
> 
> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org>

Thanks!

	M.
-- 
Jazz is not dead. It just smells funny...

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
  2016-01-07  9:03 ` Marc Zyngier
@ 2016-01-12 18:23   ` Andrew Jones
  -1 siblings, 0 replies; 20+ messages in thread
From: Andrew Jones @ 2016-01-12 18:23 UTC (permalink / raw)
  To: Marc Zyngier
  Cc: Christoffer Dall, Shannon Zhao, Peter Maydell, linux-arm-kernel,
	kvm, kvmarm

On Thu, Jan 07, 2016 at 09:03:36AM +0000, Marc Zyngier wrote:
> At the moment, our fault injection is pretty limited. We always
> generate a SYNC exception into EL1, as if the fault was actually
> from EL1h, no matter how it was generated.
> 
> This is obviously wrong, as EL0 can generate faults of its own
> (not to mention the pretty-much unused EL1t mode).
> 
> This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
> and in particular table D1-7 ("Vector offsets from vector table base
> address"), which describes which vector to use depending on the source
> exception level and type (synchronous, IRQ, FIQ or SError).
> 
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>  arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
>  1 file changed, 35 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
> index 648112e..4d1ac81 100644
> --- a/arch/arm64/kvm/inject_fault.c
> +++ b/arch/arm64/kvm/inject_fault.c
> @@ -27,7 +27,11 @@
>  
>  #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
>  				 PSR_I_BIT | PSR_D_BIT)
> -#define EL1_EXCEPT_SYNC_OFFSET	0x200
> +
> +#define CURRENT_EL_SP_EL0_VECTOR	0x0
> +#define CURRENT_EL_SP_ELx_VECTOR	0x200
> +#define LOWER_EL_AArch64_VECTOR		0x400
> +#define LOWER_EL_AArch32_VECTOR		0x600
>  
>  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
>  {
> @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
>  		*fsr = 0x14;
>  }
>  
> +enum exception_type {
> +	except_type_sync	= 0,
> +	except_type_irq		= 0x80,
> +	except_type_fiq		= 0x100,
> +	except_type_serror	= 0x180,
> +};
> +
> +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
> +{
> +	u64 exc_offset;
> +
> +	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
> +	case PSR_MODE_EL1t:
> +		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
> +		break;
> +	case PSR_MODE_EL1h:
> +		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
> +		break;
> +	case PSR_MODE_EL0t:
> +		exc_offset = LOWER_EL_AArch64_VECTOR;
> +		break;
> +	default:
> +		exc_offset = LOWER_EL_AArch32_VECTOR;
> +	}
> +
> +	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
> +}
> +
>  static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
>  {
>  	unsigned long cpsr = *vcpu_cpsr(vcpu);
> @@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
>  	*vcpu_spsr(vcpu) = cpsr;
>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>  
> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>  
>  	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
>  
> @@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
>  	*vcpu_spsr(vcpu) = cpsr;
>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>  
> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>  
>  	/*
>  	 * Build an unknown exception, depending on the instruction

Hi Marc,

Please shoot me if the following statement is false.

Without this patch, if a guest that is running in, e.g. PSR_MODE_EL0t,
tries to do, e.g. 'smc #0', then KVM will inject an undef exception,
which should lead to the guest resuming at VBAR_EL1 + 0x400, but instead
it resumes at VBAR_EL1 + 0x200.

Now, if you haven't started loading your gun to shoot me yet, then I'm
quite confused as to why the unit test[1] I wrote for this works just
fine without this patch.

(If you're suspicious of the vector table in kvm-unit-tests, take a
 look at the bottom of [2] to see it.)

Thanks,
drew

[1] https://github.com/rhdrjones/kvm-unit-tests/blob/arm64/test-exinj/arm/test-exinj.c
[2] https://github.com/rhdrjones/kvm-unit-tests/blob/arm64/test-exinj/arm/cstart64.S

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
@ 2016-01-12 18:23   ` Andrew Jones
  0 siblings, 0 replies; 20+ messages in thread
From: Andrew Jones @ 2016-01-12 18:23 UTC (permalink / raw)
  To: linux-arm-kernel

On Thu, Jan 07, 2016 at 09:03:36AM +0000, Marc Zyngier wrote:
> At the moment, our fault injection is pretty limited. We always
> generate a SYNC exception into EL1, as if the fault was actually
> from EL1h, no matter how it was generated.
> 
> This is obviously wrong, as EL0 can generate faults of its own
> (not to mention the pretty-much unused EL1t mode).
> 
> This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
> and in particular table D1-7 ("Vector offsets from vector table base
> address"), which describes which vector to use depending on the source
> exception level and type (synchronous, IRQ, FIQ or SError).
> 
> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
> ---
>  arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
>  1 file changed, 35 insertions(+), 3 deletions(-)
> 
> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
> index 648112e..4d1ac81 100644
> --- a/arch/arm64/kvm/inject_fault.c
> +++ b/arch/arm64/kvm/inject_fault.c
> @@ -27,7 +27,11 @@
>  
>  #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
>  				 PSR_I_BIT | PSR_D_BIT)
> -#define EL1_EXCEPT_SYNC_OFFSET	0x200
> +
> +#define CURRENT_EL_SP_EL0_VECTOR	0x0
> +#define CURRENT_EL_SP_ELx_VECTOR	0x200
> +#define LOWER_EL_AArch64_VECTOR		0x400
> +#define LOWER_EL_AArch32_VECTOR		0x600
>  
>  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
>  {
> @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
>  		*fsr = 0x14;
>  }
>  
> +enum exception_type {
> +	except_type_sync	= 0,
> +	except_type_irq		= 0x80,
> +	except_type_fiq		= 0x100,
> +	except_type_serror	= 0x180,
> +};
> +
> +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
> +{
> +	u64 exc_offset;
> +
> +	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
> +	case PSR_MODE_EL1t:
> +		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
> +		break;
> +	case PSR_MODE_EL1h:
> +		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
> +		break;
> +	case PSR_MODE_EL0t:
> +		exc_offset = LOWER_EL_AArch64_VECTOR;
> +		break;
> +	default:
> +		exc_offset = LOWER_EL_AArch32_VECTOR;
> +	}
> +
> +	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
> +}
> +
>  static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
>  {
>  	unsigned long cpsr = *vcpu_cpsr(vcpu);
> @@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
>  	*vcpu_spsr(vcpu) = cpsr;
>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>  
> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>  
>  	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
>  
> @@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
>  	*vcpu_spsr(vcpu) = cpsr;
>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>  
> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>  
>  	/*
>  	 * Build an unknown exception, depending on the instruction

Hi Marc,

Please shoot me if the following statement is false.

Without this patch, if a guest that is running in, e.g. PSR_MODE_EL0t,
tries to do, e.g. 'smc #0', then KVM will inject an undef exception,
which should lead to the guest resuming at VBAR_EL1 + 0x400, but instead
it resumes at VBAR_EL1 + 0x200.

Now, if you haven't started loading your gun to shoot me yet, then I'm
quite confused as to why the unit test[1] I wrote for this works just
fine without this patch.

(If you're suspicious of the vector table in kvm-unit-tests, take a
 look at the bottom of [2] to see it.)

Thanks,
drew

[1] https://github.com/rhdrjones/kvm-unit-tests/blob/arm64/test-exinj/arm/test-exinj.c
[2] https://github.com/rhdrjones/kvm-unit-tests/blob/arm64/test-exinj/arm/cstart64.S

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
  2016-01-12 18:23   ` Andrew Jones
@ 2016-01-12 18:44     ` Marc Zyngier
  -1 siblings, 0 replies; 20+ messages in thread
From: Marc Zyngier @ 2016-01-12 18:44 UTC (permalink / raw)
  To: Andrew Jones
  Cc: Christoffer Dall, Shannon Zhao, Peter Maydell, linux-arm-kernel,
	kvm, kvmarm

Hey Drew,

On 12/01/16 18:23, Andrew Jones wrote:
> On Thu, Jan 07, 2016 at 09:03:36AM +0000, Marc Zyngier wrote:
>> At the moment, our fault injection is pretty limited. We always
>> generate a SYNC exception into EL1, as if the fault was actually
>> from EL1h, no matter how it was generated.
>>
>> This is obviously wrong, as EL0 can generate faults of its own
>> (not to mention the pretty-much unused EL1t mode).
>>
>> This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
>> and in particular table D1-7 ("Vector offsets from vector table base
>> address"), which describes which vector to use depending on the source
>> exception level and type (synchronous, IRQ, FIQ or SError).
>>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>>  arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
>>  1 file changed, 35 insertions(+), 3 deletions(-)
>>
>> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
>> index 648112e..4d1ac81 100644
>> --- a/arch/arm64/kvm/inject_fault.c
>> +++ b/arch/arm64/kvm/inject_fault.c
>> @@ -27,7 +27,11 @@
>>  
>>  #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
>>  				 PSR_I_BIT | PSR_D_BIT)
>> -#define EL1_EXCEPT_SYNC_OFFSET	0x200
>> +
>> +#define CURRENT_EL_SP_EL0_VECTOR	0x0
>> +#define CURRENT_EL_SP_ELx_VECTOR	0x200
>> +#define LOWER_EL_AArch64_VECTOR		0x400
>> +#define LOWER_EL_AArch32_VECTOR		0x600
>>  
>>  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
>>  {
>> @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
>>  		*fsr = 0x14;
>>  }
>>  
>> +enum exception_type {
>> +	except_type_sync	= 0,
>> +	except_type_irq		= 0x80,
>> +	except_type_fiq		= 0x100,
>> +	except_type_serror	= 0x180,
>> +};
>> +
>> +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
>> +{
>> +	u64 exc_offset;
>> +
>> +	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
>> +	case PSR_MODE_EL1t:
>> +		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
>> +		break;
>> +	case PSR_MODE_EL1h:
>> +		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
>> +		break;
>> +	case PSR_MODE_EL0t:
>> +		exc_offset = LOWER_EL_AArch64_VECTOR;
>> +		break;
>> +	default:
>> +		exc_offset = LOWER_EL_AArch32_VECTOR;
>> +	}
>> +
>> +	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
>> +}
>> +
>>  static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
>>  {
>>  	unsigned long cpsr = *vcpu_cpsr(vcpu);
>> @@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
>>  	*vcpu_spsr(vcpu) = cpsr;
>>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>>  
>> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
>> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>>  
>>  	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
>>  
>> @@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
>>  	*vcpu_spsr(vcpu) = cpsr;
>>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>>  
>> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
>> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>>  
>>  	/*
>>  	 * Build an unknown exception, depending on the instruction
> 
> Hi Marc,
> 
> Please shoot me if the following statement is false.

I wouldn't do that. Having had the privilege to waste 10 months of my
life doing a military service, I quickly discovered I didn't like
weapons nor those who carry them...

> Without this patch, if a guest that is running in, e.g. PSR_MODE_EL0t,
> tries to do, e.g. 'smc #0', then KVM will inject an undef exception,
> which should lead to the guest resuming at VBAR_EL1 + 0x400, but instead
> it resumes at VBAR_EL1 + 0x200.

Not quite. SMC is undefined at EL0 (see C6.6.165), so it is not trapped
to EL2, but to EL1. KVM is not in the loop at all in that case.

> Now, if you haven't started loading your gun to shoot me yet, then I'm
> quite confused as to why the unit test[1] I wrote for this works just
> fine without this patch.

If you want to exercise that path, you have to access something that
wouldn't trap to EL1, but that EL2 traps. I don't think we have much
stuff so far that can be used at EL0 and would be trapped to EL2,
unfortunately (the PMU code is probably the first thing we'll merge).

In the meantime, this test case is fairly pointless, I'm afraid...

Thanks,

	M.
-- 
Jazz is not dead. It just smells funny...

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
@ 2016-01-12 18:44     ` Marc Zyngier
  0 siblings, 0 replies; 20+ messages in thread
From: Marc Zyngier @ 2016-01-12 18:44 UTC (permalink / raw)
  To: linux-arm-kernel

Hey Drew,

On 12/01/16 18:23, Andrew Jones wrote:
> On Thu, Jan 07, 2016 at 09:03:36AM +0000, Marc Zyngier wrote:
>> At the moment, our fault injection is pretty limited. We always
>> generate a SYNC exception into EL1, as if the fault was actually
>> from EL1h, no matter how it was generated.
>>
>> This is obviously wrong, as EL0 can generate faults of its own
>> (not to mention the pretty-much unused EL1t mode).
>>
>> This patch fixes it by implementing section D1.10.2 of the ARMv8 ARM,
>> and in particular table D1-7 ("Vector offsets from vector table base
>> address"), which describes which vector to use depending on the source
>> exception level and type (synchronous, IRQ, FIQ or SError).
>>
>> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
>> ---
>>  arch/arm64/kvm/inject_fault.c | 38 +++++++++++++++++++++++++++++++++++---
>>  1 file changed, 35 insertions(+), 3 deletions(-)
>>
>> diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
>> index 648112e..4d1ac81 100644
>> --- a/arch/arm64/kvm/inject_fault.c
>> +++ b/arch/arm64/kvm/inject_fault.c
>> @@ -27,7 +27,11 @@
>>  
>>  #define PSTATE_FAULT_BITS_64 	(PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
>>  				 PSR_I_BIT | PSR_D_BIT)
>> -#define EL1_EXCEPT_SYNC_OFFSET	0x200
>> +
>> +#define CURRENT_EL_SP_EL0_VECTOR	0x0
>> +#define CURRENT_EL_SP_ELx_VECTOR	0x200
>> +#define LOWER_EL_AArch64_VECTOR		0x400
>> +#define LOWER_EL_AArch32_VECTOR		0x600
>>  
>>  static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
>>  {
>> @@ -97,6 +101,34 @@ static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
>>  		*fsr = 0x14;
>>  }
>>  
>> +enum exception_type {
>> +	except_type_sync	= 0,
>> +	except_type_irq		= 0x80,
>> +	except_type_fiq		= 0x100,
>> +	except_type_serror	= 0x180,
>> +};
>> +
>> +static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
>> +{
>> +	u64 exc_offset;
>> +
>> +	switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
>> +	case PSR_MODE_EL1t:
>> +		exc_offset = CURRENT_EL_SP_EL0_VECTOR;
>> +		break;
>> +	case PSR_MODE_EL1h:
>> +		exc_offset = CURRENT_EL_SP_ELx_VECTOR;
>> +		break;
>> +	case PSR_MODE_EL0t:
>> +		exc_offset = LOWER_EL_AArch64_VECTOR;
>> +		break;
>> +	default:
>> +		exc_offset = LOWER_EL_AArch32_VECTOR;
>> +	}
>> +
>> +	return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
>> +}
>> +
>>  static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
>>  {
>>  	unsigned long cpsr = *vcpu_cpsr(vcpu);
>> @@ -108,8 +140,8 @@ static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr
>>  	*vcpu_spsr(vcpu) = cpsr;
>>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>>  
>> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
>> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>>  
>>  	vcpu_sys_reg(vcpu, FAR_EL1) = addr;
>>  
>> @@ -143,8 +175,8 @@ static void inject_undef64(struct kvm_vcpu *vcpu)
>>  	*vcpu_spsr(vcpu) = cpsr;
>>  	*vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
>>  
>> +	*vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
>>  	*vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
>> -	*vcpu_pc(vcpu) = vcpu_sys_reg(vcpu, VBAR_EL1) + EL1_EXCEPT_SYNC_OFFSET;
>>  
>>  	/*
>>  	 * Build an unknown exception, depending on the instruction
> 
> Hi Marc,
> 
> Please shoot me if the following statement is false.

I wouldn't do that. Having had the privilege to waste 10 months of my
life doing a military service, I quickly discovered I didn't like
weapons nor those who carry them...

> Without this patch, if a guest that is running in, e.g. PSR_MODE_EL0t,
> tries to do, e.g. 'smc #0', then KVM will inject an undef exception,
> which should lead to the guest resuming at VBAR_EL1 + 0x400, but instead
> it resumes at VBAR_EL1 + 0x200.

Not quite. SMC is undefined at EL0 (see C6.6.165), so it is not trapped
to EL2, but to EL1. KVM is not in the loop at all in that case.

> Now, if you haven't started loading your gun to shoot me yet, then I'm
> quite confused as to why the unit test[1] I wrote for this works just
> fine without this patch.

If you want to exercise that path, you have to access something that
wouldn't trap to EL1, but that EL2 traps. I don't think we have much
stuff so far that can be used at EL0 and would be trapped to EL2,
unfortunately (the PMU code is probably the first thing we'll merge).

In the meantime, this test case is fairly pointless, I'm afraid...

Thanks,

	M.
-- 
Jazz is not dead. It just smells funny...

^ permalink raw reply	[flat|nested] 20+ messages in thread

* Re: [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
  2016-01-12 18:44     ` Marc Zyngier
@ 2016-01-12 19:13       ` Andrew Jones
  -1 siblings, 0 replies; 20+ messages in thread
From: Andrew Jones @ 2016-01-12 19:13 UTC (permalink / raw)
  To: Marc Zyngier
  Cc: Christoffer Dall, Shannon Zhao, Peter Maydell, linux-arm-kernel,
	kvm, kvmarm

On Tue, Jan 12, 2016 at 06:44:34PM +0000, Marc Zyngier wrote:
> On 12/01/16 18:23, Andrew Jones wrote:
> > Hi Marc,
> > 
> > Please shoot me if the following statement is false.
> 
> I wouldn't do that. Having had the privilege to waste 10 months of my
> life doing a military service, I quickly discovered I didn't like
> weapons nor those who carry them...
> 
> > Without this patch, if a guest that is running in, e.g. PSR_MODE_EL0t,
> > tries to do, e.g. 'smc #0', then KVM will inject an undef exception,
> > which should lead to the guest resuming at VBAR_EL1 + 0x400, but instead
> > it resumes at VBAR_EL1 + 0x200.
> 
> Not quite. SMC is undefined at EL0 (see C6.6.165), so it is not trapped
> to EL2, but to EL1. KVM is not in the loop at all in that case.
> 
> > Now, if you haven't started loading your gun to shoot me yet, then I'm
> > quite confused as to why the unit test[1] I wrote for this works just
> > fine without this patch.
> 
> If you want to exercise that path, you have to access something that
> wouldn't trap to EL1, but that EL2 traps. I don't think we have much
> stuff so far that can be used at EL0 and would be trapped to EL2,
> unfortunately (the PMU code is probably the first thing we'll merge).
>
> In the meantime, this test case is fairly pointless, I'm afraid...

Ah, thanks for the clarification, and the lack of desire to shoot me.
I'll stash this test case for a later day.

drew

^ permalink raw reply	[flat|nested] 20+ messages in thread

* [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection
@ 2016-01-12 19:13       ` Andrew Jones
  0 siblings, 0 replies; 20+ messages in thread
From: Andrew Jones @ 2016-01-12 19:13 UTC (permalink / raw)
  To: linux-arm-kernel

On Tue, Jan 12, 2016 at 06:44:34PM +0000, Marc Zyngier wrote:
> On 12/01/16 18:23, Andrew Jones wrote:
> > Hi Marc,
> > 
> > Please shoot me if the following statement is false.
> 
> I wouldn't do that. Having had the privilege to waste 10 months of my
> life doing a military service, I quickly discovered I didn't like
> weapons nor those who carry them...
> 
> > Without this patch, if a guest that is running in, e.g. PSR_MODE_EL0t,
> > tries to do, e.g. 'smc #0', then KVM will inject an undef exception,
> > which should lead to the guest resuming at VBAR_EL1 + 0x400, but instead
> > it resumes at VBAR_EL1 + 0x200.
> 
> Not quite. SMC is undefined at EL0 (see C6.6.165), so it is not trapped
> to EL2, but to EL1. KVM is not in the loop at all in that case.
> 
> > Now, if you haven't started loading your gun to shoot me yet, then I'm
> > quite confused as to why the unit test[1] I wrote for this works just
> > fine without this patch.
> 
> If you want to exercise that path, you have to access something that
> wouldn't trap to EL1, but that EL2 traps. I don't think we have much
> stuff so far that can be used at EL0 and would be trapped to EL2,
> unfortunately (the PMU code is probably the first thing we'll merge).
>
> In the meantime, this test case is fairly pointless, I'm afraid...

Ah, thanks for the clarification, and the lack of desire to shoot me.
I'll stash this test case for a later day.

drew

^ permalink raw reply	[flat|nested] 20+ messages in thread

end of thread, other threads:[~2016-01-12 19:13 UTC | newest]

Thread overview: 20+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2016-01-07  9:03 [PATCH] arm64: KVM: Fix AArch64 guest userspace exception injection Marc Zyngier
2016-01-07  9:03 ` Marc Zyngier
2016-01-08  8:36 ` Shannon Zhao
2016-01-08  8:36   ` Shannon Zhao
2016-01-08  8:36   ` Shannon Zhao
2016-01-08  8:56   ` Marc Zyngier
2016-01-08  8:56     ` Marc Zyngier
2016-01-11  1:36     ` Shannon Zhao
2016-01-11  1:36       ` Shannon Zhao
2016-01-11  1:36       ` Shannon Zhao
2016-01-10 19:45 ` Christoffer Dall
2016-01-10 19:45   ` Christoffer Dall
2016-01-11 10:06   ` Marc Zyngier
2016-01-11 10:06     ` Marc Zyngier
2016-01-12 18:23 ` Andrew Jones
2016-01-12 18:23   ` Andrew Jones
2016-01-12 18:44   ` Marc Zyngier
2016-01-12 18:44     ` Marc Zyngier
2016-01-12 19:13     ` Andrew Jones
2016-01-12 19:13       ` Andrew Jones

This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.