All of lore.kernel.org
 help / color / mirror / Atom feed
From: Marc Zyngier <marc.zyngier@arm.com>
To: linux-arm-kernel@lists.infradead.org, kvm@vger.kernel.org,
	kvmarm@lists.cs.columbia.edu
Cc: catalin.marinas@arm.com, will.deacon@arm.com
Subject: [PATCH v2 25/30] arm64: KVM: 32bit conditional execution emulation
Date: Tue, 26 Mar 2013 17:01:20 +0000	[thread overview]
Message-ID: <1364317285-20937-26-git-send-email-marc.zyngier@arm.com> (raw)
In-Reply-To: <1364317285-20937-1-git-send-email-marc.zyngier@arm.com>

As conditional instructions can trap on AArch32, add the thinest
possible emulation layer to keep 32bit guests happy.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
 arch/arm64/include/asm/kvm_emulate.h |  13 ++-
 arch/arm64/kvm/Makefile              |   2 +-
 arch/arm64/kvm/emulate.c             | 154 +++++++++++++++++++++++++++++++++++
 3 files changed, 166 insertions(+), 3 deletions(-)
 create mode 100644 arch/arm64/kvm/emulate.c

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 3c42f0c38..8641d63 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -31,6 +31,9 @@
 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
 unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
 
+bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
+void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
+
 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
@@ -57,12 +60,18 @@ static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
 
 static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
 {
-	return true;	/* No conditionals on arm64 */
+	if (vcpu_mode_is_32bit(vcpu))
+		return kvm_condition_valid32(vcpu);
+
+	return true;
 }
 
 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
 {
-	*vcpu_pc(vcpu) += 4;
+	if (vcpu_mode_is_32bit(vcpu))
+		kvm_skip_instr32(vcpu, is_wide_instr);
+	else
+		*vcpu_pc(vcpu) += 4;
 }
 
 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index e9c911f..be11204 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_KVM_ARM_HOST) += $(addprefix ../../../virt/kvm/, kvm_main.o coalesc
 
 obj-$(CONFIG_KVM_ARM_HOST) += $(addprefix ../../../arch/arm/kvm/, arm.o mmu.o mmio.o psci.o perf.o)
 
-obj-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o
+obj-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
 obj-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o idmap.o
 obj-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
 
diff --git a/arch/arm64/kvm/emulate.c b/arch/arm64/kvm/emulate.c
new file mode 100644
index 0000000..01d4713
--- /dev/null
+++ b/arch/arm64/kvm/emulate.c
@@ -0,0 +1,154 @@
+/*
+ * (not much of an) Emulation layer for 32bit guests.
+ *
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+
+/*
+ * stolen from arch/arm/kernel/opcodes.c
+ *
+ * condition code lookup table
+ * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
+ *
+ * bit position in short is condition code: NZCV
+ */
+static const unsigned short cc_map[16] = {
+	0xF0F0,			/* EQ == Z set            */
+	0x0F0F,			/* NE                     */
+	0xCCCC,			/* CS == C set            */
+	0x3333,			/* CC                     */
+	0xFF00,			/* MI == N set            */
+	0x00FF,			/* PL                     */
+	0xAAAA,			/* VS == V set            */
+	0x5555,			/* VC                     */
+	0x0C0C,			/* HI == C set && Z clear */
+	0xF3F3,			/* LS == C clear || Z set */
+	0xAA55,			/* GE == (N==V)           */
+	0x55AA,			/* LT == (N!=V)           */
+	0x0A05,			/* GT == (!Z && (N==V))   */
+	0xF5FA,			/* LE == (Z || (N!=V))    */
+	0xFFFF,			/* AL always              */
+	0			/* NV                     */
+};
+
+static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
+{
+	u32 esr = kvm_vcpu_get_hsr(vcpu);
+
+	if (esr & ESR_EL2_CV)
+		return (esr & ESR_EL2_COND) >> ESR_EL2_COND_SHIFT;
+
+	return -1;
+}
+
+/*
+ * Check if a trapped instruction should have been executed or not.
+ */
+bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
+{
+	unsigned long cpsr;
+	u32 cpsr_cond;
+	int cond;
+
+	/* Top two bits non-zero?  Unconditional. */
+	if (kvm_vcpu_get_hsr(vcpu) >> 30)
+		return true;
+
+	/* Is condition field valid? */
+	cond = kvm_vcpu_get_condition(vcpu);
+	if (cond == 0xE)
+		return true;
+
+	cpsr = *vcpu_cpsr(vcpu);
+
+	if (cond < 0) {
+		/* This can happen in Thumb mode: examine IT state. */
+		unsigned long it;
+
+		it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
+
+		/* it == 0 => unconditional. */
+		if (it == 0)
+			return true;
+
+		/* The cond for this insn works out as the top 4 bits. */
+		cond = (it >> 4);
+	}
+
+	cpsr_cond = cpsr >> 28;
+
+	if (!((cc_map[cond] >> cpsr_cond) & 1))
+		return false;
+
+	return true;
+}
+
+/**
+ * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
+ * @vcpu:	The VCPU pointer
+ *
+ * When exceptions occur while instructions are executed in Thumb IF-THEN
+ * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have
+ * to do this little bit of work manually. The fields map like this:
+ *
+ * IT[7:0] -> CPSR[26:25],CPSR[15:10]
+ */
+static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
+{
+	unsigned long itbits, cond;
+	unsigned long cpsr = *vcpu_cpsr(vcpu);
+	bool is_arm = !(cpsr & COMPAT_PSR_T_BIT);
+
+	BUG_ON(is_arm && (cpsr & COMPAT_PSR_IT_MASK));
+
+	if (!(cpsr & COMPAT_PSR_IT_MASK))
+		return;
+
+	cond = (cpsr & 0xe000) >> 13;
+	itbits = (cpsr & 0x1c00) >> (10 - 2);
+	itbits |= (cpsr & (0x3 << 25)) >> 25;
+
+	/* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */
+	if ((itbits & 0x7) == 0)
+		itbits = cond = 0;
+	else
+		itbits = (itbits << 1) & 0x1f;
+
+	cpsr &= ~COMPAT_PSR_IT_MASK;
+	cpsr |= cond << 13;
+	cpsr |= (itbits & 0x1c) << (10 - 2);
+	cpsr |= (itbits & 0x3) << 25;
+	*vcpu_cpsr(vcpu) = cpsr;
+}
+
+/**
+ * kvm_skip_instr - skip a trapped instruction and proceed to the next
+ * @vcpu: The vcpu pointer
+ */
+void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
+{
+	bool is_thumb;
+
+	is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT);
+	if (is_thumb && !is_wide_instr)
+		*vcpu_pc(vcpu) += 2;
+	else
+		*vcpu_pc(vcpu) += 4;
+	kvm_adjust_itstate(vcpu);
+}
-- 
1.8.1.4



WARNING: multiple messages have this Message-ID (diff)
From: marc.zyngier@arm.com (Marc Zyngier)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH v2 25/30] arm64: KVM: 32bit conditional execution emulation
Date: Tue, 26 Mar 2013 17:01:20 +0000	[thread overview]
Message-ID: <1364317285-20937-26-git-send-email-marc.zyngier@arm.com> (raw)
In-Reply-To: <1364317285-20937-1-git-send-email-marc.zyngier@arm.com>

As conditional instructions can trap on AArch32, add the thinest
possible emulation layer to keep 32bit guests happy.

Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
---
 arch/arm64/include/asm/kvm_emulate.h |  13 ++-
 arch/arm64/kvm/Makefile              |   2 +-
 arch/arm64/kvm/emulate.c             | 154 +++++++++++++++++++++++++++++++++++
 3 files changed, 166 insertions(+), 3 deletions(-)
 create mode 100644 arch/arm64/kvm/emulate.c

diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 3c42f0c38..8641d63 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -31,6 +31,9 @@
 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
 unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
 
+bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
+void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
+
 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
@@ -57,12 +60,18 @@ static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
 
 static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
 {
-	return true;	/* No conditionals on arm64 */
+	if (vcpu_mode_is_32bit(vcpu))
+		return kvm_condition_valid32(vcpu);
+
+	return true;
 }
 
 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
 {
-	*vcpu_pc(vcpu) += 4;
+	if (vcpu_mode_is_32bit(vcpu))
+		kvm_skip_instr32(vcpu, is_wide_instr);
+	else
+		*vcpu_pc(vcpu) += 4;
 }
 
 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index e9c911f..be11204 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_KVM_ARM_HOST) += $(addprefix ../../../virt/kvm/, kvm_main.o coalesc
 
 obj-$(CONFIG_KVM_ARM_HOST) += $(addprefix ../../../arch/arm/kvm/, arm.o mmu.o mmio.o psci.o perf.o)
 
-obj-$(CONFIG_KVM_ARM_HOST) += inject_fault.o regmap.o
+obj-$(CONFIG_KVM_ARM_HOST) += emulate.o inject_fault.o regmap.o
 obj-$(CONFIG_KVM_ARM_HOST) += hyp.o hyp-init.o handle_exit.o idmap.o
 obj-$(CONFIG_KVM_ARM_HOST) += guest.o reset.o sys_regs.o sys_regs_generic_v8.o
 
diff --git a/arch/arm64/kvm/emulate.c b/arch/arm64/kvm/emulate.c
new file mode 100644
index 0000000..01d4713
--- /dev/null
+++ b/arch/arm64/kvm/emulate.c
@@ -0,0 +1,154 @@
+/*
+ * (not much of an) Emulation layer for 32bit guests.
+ *
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_emulate.h>
+
+/*
+ * stolen from arch/arm/kernel/opcodes.c
+ *
+ * condition code lookup table
+ * index into the table is test code: EQ, NE, ... LT, GT, AL, NV
+ *
+ * bit position in short is condition code: NZCV
+ */
+static const unsigned short cc_map[16] = {
+	0xF0F0,			/* EQ == Z set            */
+	0x0F0F,			/* NE                     */
+	0xCCCC,			/* CS == C set            */
+	0x3333,			/* CC                     */
+	0xFF00,			/* MI == N set            */
+	0x00FF,			/* PL                     */
+	0xAAAA,			/* VS == V set            */
+	0x5555,			/* VC                     */
+	0x0C0C,			/* HI == C set && Z clear */
+	0xF3F3,			/* LS == C clear || Z set */
+	0xAA55,			/* GE == (N==V)           */
+	0x55AA,			/* LT == (N!=V)           */
+	0x0A05,			/* GT == (!Z && (N==V))   */
+	0xF5FA,			/* LE == (Z || (N!=V))    */
+	0xFFFF,			/* AL always              */
+	0			/* NV                     */
+};
+
+static int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
+{
+	u32 esr = kvm_vcpu_get_hsr(vcpu);
+
+	if (esr & ESR_EL2_CV)
+		return (esr & ESR_EL2_COND) >> ESR_EL2_COND_SHIFT;
+
+	return -1;
+}
+
+/*
+ * Check if a trapped instruction should have been executed or not.
+ */
+bool kvm_condition_valid32(const struct kvm_vcpu *vcpu)
+{
+	unsigned long cpsr;
+	u32 cpsr_cond;
+	int cond;
+
+	/* Top two bits non-zero?  Unconditional. */
+	if (kvm_vcpu_get_hsr(vcpu) >> 30)
+		return true;
+
+	/* Is condition field valid? */
+	cond = kvm_vcpu_get_condition(vcpu);
+	if (cond == 0xE)
+		return true;
+
+	cpsr = *vcpu_cpsr(vcpu);
+
+	if (cond < 0) {
+		/* This can happen in Thumb mode: examine IT state. */
+		unsigned long it;
+
+		it = ((cpsr >> 8) & 0xFC) | ((cpsr >> 25) & 0x3);
+
+		/* it == 0 => unconditional. */
+		if (it == 0)
+			return true;
+
+		/* The cond for this insn works out as the top 4 bits. */
+		cond = (it >> 4);
+	}
+
+	cpsr_cond = cpsr >> 28;
+
+	if (!((cc_map[cond] >> cpsr_cond) & 1))
+		return false;
+
+	return true;
+}
+
+/**
+ * adjust_itstate - adjust ITSTATE when emulating instructions in IT-block
+ * @vcpu:	The VCPU pointer
+ *
+ * When exceptions occur while instructions are executed in Thumb IF-THEN
+ * blocks, the ITSTATE field of the CPSR is not advanved (updated), so we have
+ * to do this little bit of work manually. The fields map like this:
+ *
+ * IT[7:0] -> CPSR[26:25],CPSR[15:10]
+ */
+static void kvm_adjust_itstate(struct kvm_vcpu *vcpu)
+{
+	unsigned long itbits, cond;
+	unsigned long cpsr = *vcpu_cpsr(vcpu);
+	bool is_arm = !(cpsr & COMPAT_PSR_T_BIT);
+
+	BUG_ON(is_arm && (cpsr & COMPAT_PSR_IT_MASK));
+
+	if (!(cpsr & COMPAT_PSR_IT_MASK))
+		return;
+
+	cond = (cpsr & 0xe000) >> 13;
+	itbits = (cpsr & 0x1c00) >> (10 - 2);
+	itbits |= (cpsr & (0x3 << 25)) >> 25;
+
+	/* Perform ITAdvance (see page A-52 in ARM DDI 0406C) */
+	if ((itbits & 0x7) == 0)
+		itbits = cond = 0;
+	else
+		itbits = (itbits << 1) & 0x1f;
+
+	cpsr &= ~COMPAT_PSR_IT_MASK;
+	cpsr |= cond << 13;
+	cpsr |= (itbits & 0x1c) << (10 - 2);
+	cpsr |= (itbits & 0x3) << 25;
+	*vcpu_cpsr(vcpu) = cpsr;
+}
+
+/**
+ * kvm_skip_instr - skip a trapped instruction and proceed to the next
+ * @vcpu: The vcpu pointer
+ */
+void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
+{
+	bool is_thumb;
+
+	is_thumb = !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_T_BIT);
+	if (is_thumb && !is_wide_instr)
+		*vcpu_pc(vcpu) += 2;
+	else
+		*vcpu_pc(vcpu) += 4;
+	kvm_adjust_itstate(vcpu);
+}
-- 
1.8.1.4

  parent reply	other threads:[~2013-03-26 17:02 UTC|newest]

Thread overview: 74+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2013-03-26 17:00 [PATCH v2 00/30] Port of KVM to arm64 Marc Zyngier
2013-03-26 17:00 ` Marc Zyngier
2013-03-26 17:00 ` [PATCH v2 01/30] arm64: add explicit symbols to ESR_EL1 decoding Marc Zyngier
2013-03-26 17:00   ` Marc Zyngier
2013-03-27 13:51   ` Catalin Marinas
2013-03-27 13:51     ` Catalin Marinas
2013-03-27 14:05     ` Marc Zyngier
2013-03-27 14:05       ` Marc Zyngier
2013-03-26 17:00 ` [PATCH v2 02/30] arm64: KVM: define HYP and Stage-2 translation page flags Marc Zyngier
2013-03-26 17:00   ` Marc Zyngier
2013-03-27 14:11   ` Catalin Marinas
2013-03-27 14:11     ` Catalin Marinas
2013-03-27 14:20     ` Marc Zyngier
2013-03-27 14:20       ` Marc Zyngier
2013-03-26 17:00 ` [PATCH v2 03/30] arm64: KVM: HYP mode idmap support Marc Zyngier
2013-03-26 17:00   ` Marc Zyngier
2013-03-26 17:00 ` [PATCH v2 04/30] arm64: KVM: EL2 register definitions Marc Zyngier
2013-03-26 17:00   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 05/30] arm64: KVM: system register definitions for 64bit guests Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 06/30] arm64: KVM: Basic ESR_EL2 helpers and vcpu register access Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 07/30] arm64: KVM: fault injection into a guest Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 08/30] arm64: KVM: architecture specific MMU backend Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 09/30] arm64: KVM: user space interface Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 10/30] arm64: KVM: system register handling Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 11/30] arm64: KVM: CPU specific system registers handling Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 12/30] arm64: KVM: virtual CPU reset Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 13/30] arm64: KVM: kvm_arch and kvm_vcpu_arch definitions Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 14/30] arm64: KVM: MMIO access backend Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 15/30] arm64: KVM: guest one-reg interface Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 16/30] arm64: KVM: hypervisor initialization code Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 17/30] arm64: KVM: HYP mode world switch implementation Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 18/30] arm64: KVM: Exit handling Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 19/30] arm64: KVM: Plug the VGIC Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 20/30] arm64: KVM: Plug the arch timer Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 21/30] arm64: KVM: PSCI implementation Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 22/30] arm64: KVM: Build system integration Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 23/30] arm64: KVM: define 32bit specific registers Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 24/30] arm64: KVM: 32bit GP register access Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` Marc Zyngier [this message]
2013-03-26 17:01   ` [PATCH v2 25/30] arm64: KVM: 32bit conditional execution emulation Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 26/30] arm64: KVM: 32bit handling of coprocessor traps Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 27/30] arm64: KVM: CPU specific 32bit coprocessor access Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 28/30] arm64: KVM: 32bit specific register world switch Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 29/30] arm64: KVM: 32bit guest fault injection Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-26 17:01 ` [PATCH v2 30/30] arm64: KVM: enable initialization of a 32bit vcpu Marc Zyngier
2013-03-26 17:01   ` Marc Zyngier
2013-03-29 14:57 ` [PATCH v2 00/30] Port of KVM to arm64 Christopher Covington
2013-03-29 14:57   ` Christopher Covington
2013-03-29 18:37   ` Marc Zyngier
2013-03-29 18:37     ` Marc Zyngier

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1364317285-20937-26-git-send-email-marc.zyngier@arm.com \
    --to=marc.zyngier@arm.com \
    --cc=catalin.marinas@arm.com \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.