From: Gavin Shan <gshan@redhat.com>
To: kvmarm@lists.cs.columbia.edu
Cc: linux-arm-kernel@lists.infradead.org,
linux-kernel@vger.kernel.org, maz@kernel.org,
mark.rutland@arm.com, will@kernel.org, catalin.marinas@arm.com,
james.morse@arm.com, suzuki.poulose@arm.com, drjones@redhat.com,
eric.auger@redhat.com, aarcange@redhat.com, shan.gavin@gmail.com
Subject: [PATCH RFCv2 5/9] kvm/arm64: Replace hsr with esr
Date: Fri, 8 May 2020 13:29:15 +1000 [thread overview]
Message-ID: <20200508032919.52147-6-gshan@redhat.com> (raw)
In-Reply-To: <20200508032919.52147-1-gshan@redhat.com>
This replace the variable names to make them self-explaining. The
tracepoint isn't changed accordingly because they're part of ABI:
* @hsr to @esr
* @hsr_ec to @ec
* Use kvm_vcpu_trap_get_class() helper if possible
Signed-off-by: Gavin Shan <gshan@redhat.com>
---
arch/arm64/kvm/handle_exit.c | 28 ++++++++++++++--------------
arch/arm64/kvm/hyp/switch.c | 9 ++++-----
arch/arm64/kvm/sys_regs.c | 30 +++++++++++++++---------------
3 files changed, 33 insertions(+), 34 deletions(-)
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 00858db82a64..e3b3dcd5b811 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -123,13 +123,13 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
*/
static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- u32 hsr = kvm_vcpu_get_esr(vcpu);
+ u32 esr = kvm_vcpu_get_esr(vcpu);
int ret = 0;
run->exit_reason = KVM_EXIT_DEBUG;
- run->debug.arch.hsr = hsr;
+ run->debug.arch.hsr = esr;
- switch (ESR_ELx_EC(hsr)) {
+ switch (kvm_vcpu_trap_get_class(esr)) {
case ESR_ELx_EC_WATCHPT_LOW:
run->debug.arch.far = vcpu->arch.fault.far_el2;
/* fall through */
@@ -139,8 +139,8 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
case ESR_ELx_EC_BRK64:
break;
default:
- kvm_err("%s: un-handled case hsr: %#08x\n",
- __func__, (unsigned int) hsr);
+ kvm_err("%s: un-handled case esr: %#08x\n",
+ __func__, (unsigned int)esr);
ret = -1;
break;
}
@@ -150,10 +150,10 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run)
static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
- u32 hsr = kvm_vcpu_get_esr(vcpu);
+ u32 esr = kvm_vcpu_get_esr(vcpu);
- kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n",
- hsr, esr_get_class_string(hsr));
+ kvm_pr_unimpl("Unknown exception class: esr: %#08x -- %s\n",
+ esr, esr_get_class_string(esr));
kvm_inject_undefined(vcpu);
return 1;
@@ -230,10 +230,10 @@ static exit_handle_fn arm_exit_handlers[] = {
static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
{
- u32 hsr = kvm_vcpu_get_esr(vcpu);
- u8 hsr_ec = ESR_ELx_EC(hsr);
+ u32 esr = kvm_vcpu_get_esr(vcpu);
+ u8 ec = kvm_vcpu_trap_get_class(esr);
- return arm_exit_handlers[hsr_ec];
+ return arm_exit_handlers[ec];
}
/*
@@ -273,15 +273,15 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
{
if (ARM_SERROR_PENDING(exception_index)) {
u32 esr = kvm_vcpu_get_esr(vcpu);
- u8 hsr_ec = ESR_ELx_EC(esr);
+ u8 ec = kvm_vcpu_trap_get_class(esr);
/*
* HVC/SMC already have an adjusted PC, which we need
* to correct in order to return to after having
* injected the SError.
*/
- if (hsr_ec == ESR_ELx_EC_HVC32 || hsr_ec == ESR_ELx_EC_HVC64 ||
- hsr_ec == ESR_ELx_EC_SMC32 || hsr_ec == ESR_ELx_EC_SMC64) {
+ if (ec == ESR_ELx_EC_HVC32 || ec == ESR_ELx_EC_HVC64 ||
+ ec == ESR_ELx_EC_SMC32 || ec == ESR_ELx_EC_SMC64) {
u32 adj = kvm_vcpu_trap_il_is32bit(esr) ? 4 : 2;
*vcpu_pc(vcpu) -= adj;
}
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 369f22f49f3d..7bf4840bf90e 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -356,8 +356,8 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
{
u32 esr = kvm_vcpu_get_esr(vcpu);
+ u8 ec = kvm_vcpu_trap_get_class(esr);
bool vhe, sve_guest, sve_host;
- u8 hsr_ec;
if (!system_supports_fpsimd())
return false;
@@ -372,14 +372,13 @@ static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
vhe = has_vhe();
}
- hsr_ec = kvm_vcpu_trap_get_class(esr);
- if (hsr_ec != ESR_ELx_EC_FP_ASIMD &&
- hsr_ec != ESR_ELx_EC_SVE)
+ if (ec != ESR_ELx_EC_FP_ASIMD &&
+ ec != ESR_ELx_EC_SVE)
return false;
/* Don't handle SVE traps for non-SVE vcpus here: */
if (!sve_guest)
- if (hsr_ec != ESR_ELx_EC_FP_ASIMD)
+ if (ec != ESR_ELx_EC_FP_ASIMD)
return false;
/* Valid trap. Switch the context: */
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 012fff834a4b..58f81ab519af 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -2182,10 +2182,10 @@ static void unhandled_cp_access(struct kvm_vcpu *vcpu,
struct sys_reg_params *params)
{
u32 esr = kvm_vcpu_get_esr(vcpu);
- u8 hsr_ec = kvm_vcpu_trap_get_class(esr);
+ u8 ec = kvm_vcpu_trap_get_class(esr);
int cp = -1;
- switch(hsr_ec) {
+ switch (ec) {
case ESR_ELx_EC_CP15_32:
case ESR_ELx_EC_CP15_64:
cp = 15;
@@ -2216,17 +2216,17 @@ static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
size_t nr_specific)
{
struct sys_reg_params params;
- u32 hsr = kvm_vcpu_get_esr(vcpu);
- int Rt = kvm_vcpu_sys_get_rt(hsr);
- int Rt2 = (hsr >> 10) & 0x1f;
+ u32 esr = kvm_vcpu_get_esr(vcpu);
+ int Rt = kvm_vcpu_sys_get_rt(esr);
+ int Rt2 = (esr >> 10) & 0x1f;
params.is_aarch32 = true;
params.is_32bit = false;
- params.CRm = (hsr >> 1) & 0xf;
- params.is_write = ((hsr & 1) == 0);
+ params.CRm = (esr >> 1) & 0xf;
+ params.is_write = ((esr & 1) == 0);
params.Op0 = 0;
- params.Op1 = (hsr >> 16) & 0xf;
+ params.Op1 = (esr >> 16) & 0xf;
params.Op2 = 0;
params.CRn = 0;
@@ -2273,18 +2273,18 @@ static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
size_t nr_specific)
{
struct sys_reg_params params;
- u32 hsr = kvm_vcpu_get_esr(vcpu);
- int Rt = kvm_vcpu_sys_get_rt(hsr);
+ u32 esr = kvm_vcpu_get_esr(vcpu);
+ int Rt = kvm_vcpu_sys_get_rt(esr);
params.is_aarch32 = true;
params.is_32bit = true;
- params.CRm = (hsr >> 1) & 0xf;
+ params.CRm = (esr >> 1) & 0xf;
params.regval = vcpu_get_reg(vcpu, Rt);
- params.is_write = ((hsr & 1) == 0);
- params.CRn = (hsr >> 10) & 0xf;
+ params.is_write = ((esr & 1) == 0);
+ params.CRn = (esr >> 10) & 0xf;
params.Op0 = 0;
- params.Op1 = (hsr >> 14) & 0x7;
- params.Op2 = (hsr >> 17) & 0x7;
+ params.Op1 = (esr >> 14) & 0x7;
+ params.Op2 = (esr >> 17) & 0x7;
if (!emulate_cp(vcpu, ¶ms, target_specific, nr_specific) ||
!emulate_cp(vcpu, ¶ms, global, nr_global)) {
--
2.23.0
next prev parent reply other threads:[~2020-05-08 3:31 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-05-08 3:29 [PATCH RFCv2 0/9] kvm/arm64: Support Async Page Fault Gavin Shan
2020-05-08 3:29 ` [PATCH RFCv2 1/9] arm64: Probe for the presence of KVM hypervisor services during boot Gavin Shan
2020-05-08 3:29 ` [PATCH RFCv2 2/9] arm/arm64: KVM: Advertise KVM UID to guests via SMCCC Gavin Shan
2020-05-08 3:29 ` [PATCH RFCv2 3/9] kvm/arm64: Rename kvm_vcpu_get_hsr() to kvm_vcpu_get_esr() Gavin Shan
2020-05-26 10:42 ` Mark Rutland
2020-05-27 2:43 ` Gavin Shan
2020-05-27 7:20 ` Marc Zyngier
2020-05-28 6:34 ` Gavin Shan
2020-05-08 3:29 ` [PATCH RFCv2 4/9] kvm/arm64: Detach ESR operator from vCPU struct Gavin Shan
2020-05-26 10:51 ` Mark Rutland
2020-05-27 2:55 ` Gavin Shan
2020-05-08 3:29 ` Gavin Shan [this message]
2020-05-26 10:45 ` [PATCH RFCv2 5/9] kvm/arm64: Replace hsr with esr Mark Rutland
2020-05-27 2:56 ` Gavin Shan
2020-05-08 3:29 ` [PATCH RFCv2 6/9] kvm/arm64: Export kvm_handle_user_mem_abort() with prefault mode Gavin Shan
2020-05-26 10:58 ` Mark Rutland
2020-05-27 3:01 ` Gavin Shan
2020-05-08 3:29 ` [PATCH RFCv2 7/9] kvm/arm64: Support async page fault Gavin Shan
2020-05-26 12:34 ` Mark Rutland
2020-05-27 4:05 ` Gavin Shan
2020-05-27 7:37 ` Marc Zyngier
2020-05-28 6:32 ` Gavin Shan
2020-05-08 3:29 ` [PATCH RFCv2 8/9] kernel/sched: Add cpu_rq_is_locked() Gavin Shan
2020-05-08 3:29 ` [PATCH RFCv2 9/9] arm64: Support async page fault Gavin Shan
2020-05-26 12:56 ` Mark Rutland
2020-05-27 6:48 ` Paolo Bonzini
2020-05-28 6:14 ` Gavin Shan
2020-05-28 7:03 ` Marc Zyngier
2020-05-28 10:53 ` Paolo Bonzini
2020-05-28 10:48 ` Paolo Bonzini
2020-05-28 23:02 ` Gavin Shan
2020-05-29 9:41 ` Marc Zyngier
2020-05-29 11:11 ` Paolo Bonzini
2020-05-31 12:44 ` Marc Zyngier
2020-06-01 9:21 ` Paolo Bonzini
2020-06-02 5:44 ` Gavin Shan
2020-05-25 23:39 ` [PATCH RFCv2 0/9] kvm/arm64: Support Async Page Fault Gavin Shan
2020-05-26 13:09 ` Mark Rutland
2020-05-27 2:39 ` Gavin Shan
2020-05-27 7:48 ` Marc Zyngier
2020-05-27 16:10 ` Paolo Bonzini
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200508032919.52147-6-gshan@redhat.com \
--to=gshan@redhat.com \
--cc=aarcange@redhat.com \
--cc=catalin.marinas@arm.com \
--cc=drjones@redhat.com \
--cc=eric.auger@redhat.com \
--cc=james.morse@arm.com \
--cc=kvmarm@lists.cs.columbia.edu \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mark.rutland@arm.com \
--cc=maz@kernel.org \
--cc=shan.gavin@gmail.com \
--cc=suzuki.poulose@arm.com \
--cc=will@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).