kvm.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
* [PATCH V2 0/5] KVM vPMU support for AMD
@ 2015-04-08 16:18 Wei Huang
  2015-04-08 16:18 ` [PATCH V2 1/5] KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch Wei Huang
                   ` (6 more replies)
  0 siblings, 7 replies; 19+ messages in thread
From: Wei Huang @ 2015-04-08 16:18 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, rkrcmar, gleb, wei

Currently KVM only supports vPMU for Intel platforms. This patch set 
enable vPMU support for AMD platform by creating a common PMU
interface for x86. The PMU calls from guest VMs are dispatched
to corresponding functions defined in arch specific files.

V2:
  * Create a generic pmu.c file which is shared by Intel and AMD CPUs;
  * pmu.c code becomes part of kvm.ko module. Similarly pmu_intel.c and
    pmu_amd.c are linked to kvm-intel.ko and kvm-amd.ko respectively;
  * Re-define kvm_pmu_ops function pointers. Per Radim Krcmar's comments,
    a large portion of Intel vPMU code are now consolidated and moved to
    pmu.c;
  * Polish pmu_amd.c code to comply with new definition of kvm_pmu_ops;

V1:
  * Adopt the file layout suggested by Radim Krcmar
  * Link arch module with its specific PMU file

RFC:
  * Initial version for RFC

Wei Huang (5):
  KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch
  KVM: x86/vPMU: Rename pmu.c file to pmu_intel.c
  KVM: x86/vPMU: Create vPMU interface for VMX and SVM
  KVM: x86/vPMU: Implement vPMU code AMD CPUs
  KVM: x86/vPMU: Enable PMU handling for AMD PERFCTRn and EVNTSELn MSRs

 arch/x86/include/asm/kvm_host.h |  38 ++-
 arch/x86/kvm/Makefile           |   4 +-
 arch/x86/kvm/cpuid.c            |   3 +-
 arch/x86/kvm/pmu.c              | 563 +++++++++++-----------------------------
 arch/x86/kvm/pmu.h              |  98 +++++++
 arch/x86/kvm/pmu_amd.c          | 198 ++++++++++++++
 arch/x86/kvm/pmu_intel.c        | 360 +++++++++++++++++++++++++
 arch/x86/kvm/svm.c              |   8 +
 arch/x86/kvm/vmx.c              |   8 +
 arch/x86/kvm/x86.c              |  57 ++--
 10 files changed, 896 insertions(+), 441 deletions(-)
 create mode 100644 arch/x86/kvm/pmu.h
 create mode 100644 arch/x86/kvm/pmu_amd.c
 create mode 100644 arch/x86/kvm/pmu_intel.c

-- 
1.8.3.1


^ permalink raw reply	[flat|nested] 19+ messages in thread

* [PATCH V2 1/5] KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch
  2015-04-08 16:18 [PATCH V2 0/5] KVM vPMU support for AMD Wei Huang
@ 2015-04-08 16:18 ` Wei Huang
  2015-04-08 16:18 ` [PATCH V2 2/5] KVM: x86/vPMU: Rename pmu.c file to pmu_intel.c Wei Huang
                   ` (5 subsequent siblings)
  6 siblings, 0 replies; 19+ messages in thread
From: Wei Huang @ 2015-04-08 16:18 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, rkrcmar, gleb, wei

This patch defines a new function pointer struct (kvm_pmu_ops) to
support vPMU for both Intel and AMD. The functions pointers defined in
this new struct will be linked up with Intel and AMD functions. In the
meanwhile the struct that maps from event_sel bits to PERF_TYPE_HARDWARE
events is moved from Intel specific code to kvm_host.h as a common struct
now.

Signed-off-by: Wei Huang <wei@redhat.com>
---
 arch/x86/include/asm/kvm_host.h | 35 +++++++++++++++++++++++++++++------
 arch/x86/kvm/pmu.c              | 21 ++++++++-------------
 2 files changed, 37 insertions(+), 19 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a236e39..6ef6acd 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -310,6 +310,12 @@ struct kvm_mmu {
 	u64 pdptrs[4]; /* pae */
 };
 
+struct msr_data {
+	bool host_initiated;
+	u32 index;
+	u64 data;
+};
+
 enum pmc_type {
 	KVM_PMC_GP = 0,
 	KVM_PMC_FIXED,
@@ -324,6 +330,12 @@ struct kvm_pmc {
 	struct kvm_vcpu *vcpu;
 };
 
+struct kvm_event_hw_type_mapping {
+	u8 eventsel;
+	u8 unit_mask;
+	unsigned event_type;
+};
+
 struct kvm_pmu {
 	unsigned nr_arch_gp_counters;
 	unsigned nr_arch_fixed_counters;
@@ -342,6 +354,23 @@ struct kvm_pmu {
 	u64 reprogram_pmi;
 };
 
+struct kvm_pmu_ops {
+	unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
+				    u8 unit_mask);
+	unsigned (*find_fixed_event)(int idx);
+	bool (*pmc_enabled)(struct kvm_pmc *pmc);
+	struct kvm_pmc *(*glb_idx_to_pmc)(struct kvm_pmu *pmu, int glb_idx);
+	int (*check_msr_idx)(struct kvm_vcpu *vcpu, unsigned msr_idx);
+	struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu,
+					  unsigned msr_idx);
+	bool (*is_pmu_msr)(struct kvm_vcpu *vcpu, u32 msr);
+	int (*get_msr)(struct kvm_vcpu *vcpu, u32 index, u64 *data);
+	int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
+	void (*refresh)(struct kvm_vcpu *vcpu);
+	void (*init)(struct kvm_vcpu *vcpu);
+	void (*reset)(struct kvm_vcpu *vcpu);
+};
+
 enum {
 	KVM_DEBUGREG_BP_ENABLED = 1,
 	KVM_DEBUGREG_WONT_EXIT = 2,
@@ -671,12 +700,6 @@ struct kvm_vcpu_stat {
 
 struct x86_instruction_info;
 
-struct msr_data {
-	bool host_initiated;
-	u32 index;
-	u64 data;
-};
-
 struct kvm_lapic_irq {
 	u32 vector;
 	u32 delivery_mode;
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index 8e6b7d8..0bf0d1c 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -20,12 +20,7 @@
 #include "cpuid.h"
 #include "lapic.h"
 
-static struct kvm_arch_event_perf_mapping {
-	u8 eventsel;
-	u8 unit_mask;
-	unsigned event_type;
-	bool inexact;
-} arch_events[] = {
+static struct kvm_event_hw_type_mapping intel_arch_events[] = {
 	/* Index must match CPUID 0x0A.EBX bit vector */
 	[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
 	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
@@ -37,7 +32,7 @@ static struct kvm_arch_event_perf_mapping {
 	[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
 };
 
-/* mapping between fixed pmc index and arch_events array */
+/* mapping between fixed pmc index and intel_arch_events array */
 int fixed_pmc_events[] = {1, 0, 7};
 
 static bool pmc_is_gp(struct kvm_pmc *pmc)
@@ -202,16 +197,16 @@ static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
 {
 	int i;
 
-	for (i = 0; i < ARRAY_SIZE(arch_events); i++)
-		if (arch_events[i].eventsel == event_select
-				&& arch_events[i].unit_mask == unit_mask
+	for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
+		if (intel_arch_events[i].eventsel == event_select
+				&& intel_arch_events[i].unit_mask == unit_mask
 				&& (pmu->available_event_types & (1 << i)))
 			break;
 
-	if (i == ARRAY_SIZE(arch_events))
+	if (i == ARRAY_SIZE(intel_arch_events))
 		return PERF_COUNT_HW_MAX;
 
-	return arch_events[i].event_type;
+	return intel_arch_events[i].event_type;
 }
 
 static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
@@ -265,7 +260,7 @@ static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
 		return;
 
 	reprogram_counter(pmc, PERF_TYPE_HARDWARE,
-			arch_events[fixed_pmc_events[idx]].event_type,
+			intel_arch_events[fixed_pmc_events[idx]].event_type,
 			!(en & 0x2), /* exclude user */
 			!(en & 0x1), /* exclude kernel */
 			pmi, false, false);
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH V2 2/5] KVM: x86/vPMU: Rename pmu.c file to pmu_intel.c
  2015-04-08 16:18 [PATCH V2 0/5] KVM vPMU support for AMD Wei Huang
  2015-04-08 16:18 ` [PATCH V2 1/5] KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch Wei Huang
@ 2015-04-08 16:18 ` Wei Huang
  2015-04-09 19:10   ` Radim Krčmář
  2015-04-08 16:18 ` [PATCH V2 3/5] KVM: x86/vPMU: Create vPMU interface for VMX and SVM Wei Huang
                   ` (4 subsequent siblings)
  6 siblings, 1 reply; 19+ messages in thread
From: Wei Huang @ 2015-04-08 16:18 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, rkrcmar, gleb, wei

As a preparation to unify vPMU implementation, this patch renames pmu.c
to pmu_intel.c. Makefile is changed accordingly.

Signed-off-by: Wei Huang <wei@redhat.com>
---
 arch/x86/kvm/Makefile    |   2 +-
 arch/x86/kvm/pmu.c       | 571 -----------------------------------------------
 arch/x86/kvm/pmu_intel.c | 571 +++++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 572 insertions(+), 572 deletions(-)
 delete mode 100644 arch/x86/kvm/pmu.c
 create mode 100644 arch/x86/kvm/pmu_intel.c

diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index 08f790d..ac20dd3 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -12,7 +12,7 @@ kvm-y			+= $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
 kvm-$(CONFIG_KVM_ASYNC_PF)	+= $(KVM)/async_pf.o
 
 kvm-y			+= x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
-			   i8254.o ioapic.o irq_comm.o cpuid.o pmu.o
+			   i8254.o ioapic.o irq_comm.o cpuid.o pmu_intel.o
 kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT)	+= assigned-dev.o iommu.o
 kvm-intel-y		+= vmx.o
 kvm-amd-y		+= svm.o
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
deleted file mode 100644
index 0bf0d1c..0000000
--- a/arch/x86/kvm/pmu.c
+++ /dev/null
@@ -1,571 +0,0 @@
-/*
- * Kernel-based Virtual Machine -- Performance Monitoring Unit support
- *
- * Copyright 2011 Red Hat, Inc. and/or its affiliates.
- *
- * Authors:
- *   Avi Kivity   <avi@redhat.com>
- *   Gleb Natapov <gleb@redhat.com>
- *
- * This work is licensed under the terms of the GNU GPL, version 2.  See
- * the COPYING file in the top-level directory.
- *
- */
-
-#include <linux/types.h>
-#include <linux/kvm_host.h>
-#include <linux/perf_event.h>
-#include <asm/perf_event.h>
-#include "x86.h"
-#include "cpuid.h"
-#include "lapic.h"
-
-static struct kvm_event_hw_type_mapping intel_arch_events[] = {
-	/* Index must match CPUID 0x0A.EBX bit vector */
-	[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
-	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
-	[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
-	[3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
-	[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
-	[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
-	[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
-	[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
-};
-
-/* mapping between fixed pmc index and intel_arch_events array */
-int fixed_pmc_events[] = {1, 0, 7};
-
-static bool pmc_is_gp(struct kvm_pmc *pmc)
-{
-	return pmc->type == KVM_PMC_GP;
-}
-
-static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
-{
-	struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
-
-	return pmu->counter_bitmask[pmc->type];
-}
-
-static inline bool pmc_enabled(struct kvm_pmc *pmc)
-{
-	struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
-	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
-}
-
-static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
-					 u32 base)
-{
-	if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
-		return &pmu->gp_counters[msr - base];
-	return NULL;
-}
-
-static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
-{
-	int base = MSR_CORE_PERF_FIXED_CTR0;
-	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
-		return &pmu->fixed_counters[msr - base];
-	return NULL;
-}
-
-static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
-{
-	return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
-}
-
-static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
-{
-	if (idx < INTEL_PMC_IDX_FIXED)
-		return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
-	else
-		return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
-}
-
-void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
-{
-	if (vcpu->arch.apic)
-		kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
-}
-
-static void trigger_pmi(struct irq_work *irq_work)
-{
-	struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
-			irq_work);
-	struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
-			arch.pmu);
-
-	kvm_deliver_pmi(vcpu);
-}
-
-static void kvm_perf_overflow(struct perf_event *perf_event,
-			      struct perf_sample_data *data,
-			      struct pt_regs *regs)
-{
-	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
-	struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
-	if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
-		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
-		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
-	}
-}
-
-static void kvm_perf_overflow_intr(struct perf_event *perf_event,
-		struct perf_sample_data *data, struct pt_regs *regs)
-{
-	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
-	struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
-	if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
-		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
-		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
-		/*
-		 * Inject PMI. If vcpu was in a guest mode during NMI PMI
-		 * can be ejected on a guest mode re-entry. Otherwise we can't
-		 * be sure that vcpu wasn't executing hlt instruction at the
-		 * time of vmexit and is not going to re-enter guest mode until,
-		 * woken up. So we should wake it, but this is impossible from
-		 * NMI context. Do it from irq work instead.
-		 */
-		if (!kvm_is_in_guest())
-			irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
-		else
-			kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
-	}
-}
-
-static u64 read_pmc(struct kvm_pmc *pmc)
-{
-	u64 counter, enabled, running;
-
-	counter = pmc->counter;
-
-	if (pmc->perf_event)
-		counter += perf_event_read_value(pmc->perf_event,
-						 &enabled, &running);
-
-	/* FIXME: Scaling needed? */
-
-	return counter & pmc_bitmask(pmc);
-}
-
-static void stop_counter(struct kvm_pmc *pmc)
-{
-	if (pmc->perf_event) {
-		pmc->counter = read_pmc(pmc);
-		perf_event_release_kernel(pmc->perf_event);
-		pmc->perf_event = NULL;
-	}
-}
-
-static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
-		unsigned config, bool exclude_user, bool exclude_kernel,
-		bool intr, bool in_tx, bool in_tx_cp)
-{
-	struct perf_event *event;
-	struct perf_event_attr attr = {
-		.type = type,
-		.size = sizeof(attr),
-		.pinned = true,
-		.exclude_idle = true,
-		.exclude_host = 1,
-		.exclude_user = exclude_user,
-		.exclude_kernel = exclude_kernel,
-		.config = config,
-	};
-	if (in_tx)
-		attr.config |= HSW_IN_TX;
-	if (in_tx_cp)
-		attr.config |= HSW_IN_TX_CHECKPOINTED;
-
-	attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
-
-	event = perf_event_create_kernel_counter(&attr, -1, current,
-						 intr ? kvm_perf_overflow_intr :
-						 kvm_perf_overflow, pmc);
-	if (IS_ERR(event)) {
-		printk_once("kvm: pmu event creation failed %ld\n",
-				PTR_ERR(event));
-		return;
-	}
-
-	pmc->perf_event = event;
-	clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
-}
-
-static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
-		u8 unit_mask)
-{
-	int i;
-
-	for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
-		if (intel_arch_events[i].eventsel == event_select
-				&& intel_arch_events[i].unit_mask == unit_mask
-				&& (pmu->available_event_types & (1 << i)))
-			break;
-
-	if (i == ARRAY_SIZE(intel_arch_events))
-		return PERF_COUNT_HW_MAX;
-
-	return intel_arch_events[i].event_type;
-}
-
-static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
-{
-	unsigned config, type = PERF_TYPE_RAW;
-	u8 event_select, unit_mask;
-
-	if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
-		printk_once("kvm pmu: pin control bit is ignored\n");
-
-	pmc->eventsel = eventsel;
-
-	stop_counter(pmc);
-
-	if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc))
-		return;
-
-	event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
-	unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
-
-	if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
-				ARCH_PERFMON_EVENTSEL_INV |
-				ARCH_PERFMON_EVENTSEL_CMASK |
-				HSW_IN_TX |
-				HSW_IN_TX_CHECKPOINTED))) {
-		config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
-				unit_mask);
-		if (config != PERF_COUNT_HW_MAX)
-			type = PERF_TYPE_HARDWARE;
-	}
-
-	if (type == PERF_TYPE_RAW)
-		config = eventsel & X86_RAW_EVENT_MASK;
-
-	reprogram_counter(pmc, type, config,
-			!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
-			!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
-			eventsel & ARCH_PERFMON_EVENTSEL_INT,
-			(eventsel & HSW_IN_TX),
-			(eventsel & HSW_IN_TX_CHECKPOINTED));
-}
-
-static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
-{
-	unsigned en = en_pmi & 0x3;
-	bool pmi = en_pmi & 0x8;
-
-	stop_counter(pmc);
-
-	if (!en || !pmc_enabled(pmc))
-		return;
-
-	reprogram_counter(pmc, PERF_TYPE_HARDWARE,
-			intel_arch_events[fixed_pmc_events[idx]].event_type,
-			!(en & 0x2), /* exclude user */
-			!(en & 0x1), /* exclude kernel */
-			pmi, false, false);
-}
-
-static inline u8 fixed_en_pmi(u64 ctrl, int idx)
-{
-	return (ctrl >> (idx * 4)) & 0xf;
-}
-
-static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
-{
-	int i;
-
-	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
-		u8 en_pmi = fixed_en_pmi(data, i);
-		struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
-
-		if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi)
-			continue;
-
-		reprogram_fixed_counter(pmc, en_pmi, i);
-	}
-
-	pmu->fixed_ctr_ctrl = data;
-}
-
-static void reprogram_idx(struct kvm_pmu *pmu, int idx)
-{
-	struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
-
-	if (!pmc)
-		return;
-
-	if (pmc_is_gp(pmc))
-		reprogram_gp_counter(pmc, pmc->eventsel);
-	else {
-		int fidx = idx - INTEL_PMC_IDX_FIXED;
-		reprogram_fixed_counter(pmc,
-				fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
-	}
-}
-
-static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
-{
-	int bit;
-	u64 diff = pmu->global_ctrl ^ data;
-
-	pmu->global_ctrl = data;
-
-	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
-		reprogram_idx(pmu, bit);
-}
-
-bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
-{
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
-	int ret;
-
-	switch (msr) {
-	case MSR_CORE_PERF_FIXED_CTR_CTRL:
-	case MSR_CORE_PERF_GLOBAL_STATUS:
-	case MSR_CORE_PERF_GLOBAL_CTRL:
-	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
-		ret = pmu->version > 1;
-		break;
-	default:
-		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
-			|| get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
-			|| get_fixed_pmc(pmu, msr);
-		break;
-	}
-	return ret;
-}
-
-int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
-{
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
-	struct kvm_pmc *pmc;
-
-	switch (index) {
-	case MSR_CORE_PERF_FIXED_CTR_CTRL:
-		*data = pmu->fixed_ctr_ctrl;
-		return 0;
-	case MSR_CORE_PERF_GLOBAL_STATUS:
-		*data = pmu->global_status;
-		return 0;
-	case MSR_CORE_PERF_GLOBAL_CTRL:
-		*data = pmu->global_ctrl;
-		return 0;
-	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
-		*data = pmu->global_ovf_ctrl;
-		return 0;
-	default:
-		if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
-				(pmc = get_fixed_pmc(pmu, index))) {
-			*data = read_pmc(pmc);
-			return 0;
-		} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
-			*data = pmc->eventsel;
-			return 0;
-		}
-	}
-	return 1;
-}
-
-int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
-{
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
-	struct kvm_pmc *pmc;
-	u32 index = msr_info->index;
-	u64 data = msr_info->data;
-
-	switch (index) {
-	case MSR_CORE_PERF_FIXED_CTR_CTRL:
-		if (pmu->fixed_ctr_ctrl == data)
-			return 0;
-		if (!(data & 0xfffffffffffff444ull)) {
-			reprogram_fixed_counters(pmu, data);
-			return 0;
-		}
-		break;
-	case MSR_CORE_PERF_GLOBAL_STATUS:
-		if (msr_info->host_initiated) {
-			pmu->global_status = data;
-			return 0;
-		}
-		break; /* RO MSR */
-	case MSR_CORE_PERF_GLOBAL_CTRL:
-		if (pmu->global_ctrl == data)
-			return 0;
-		if (!(data & pmu->global_ctrl_mask)) {
-			global_ctrl_changed(pmu, data);
-			return 0;
-		}
-		break;
-	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
-		if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
-			if (!msr_info->host_initiated)
-				pmu->global_status &= ~data;
-			pmu->global_ovf_ctrl = data;
-			return 0;
-		}
-		break;
-	default:
-		if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
-				(pmc = get_fixed_pmc(pmu, index))) {
-			if (!msr_info->host_initiated)
-				data = (s64)(s32)data;
-			pmc->counter += data - read_pmc(pmc);
-			return 0;
-		} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
-			if (data == pmc->eventsel)
-				return 0;
-			if (!(data & pmu->reserved_bits)) {
-				reprogram_gp_counter(pmc, data);
-				return 0;
-			}
-		}
-	}
-	return 1;
-}
-
-int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc)
-{
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
-	bool fixed = pmc & (1u << 30);
-	pmc &= ~(3u << 30);
-	return (!fixed && pmc >= pmu->nr_arch_gp_counters) ||
-		(fixed && pmc >= pmu->nr_arch_fixed_counters);
-}
-
-int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
-{
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
-	bool fast_mode = pmc & (1u << 31);
-	bool fixed = pmc & (1u << 30);
-	struct kvm_pmc *counters;
-	u64 ctr;
-
-	pmc &= ~(3u << 30);
-	if (!fixed && pmc >= pmu->nr_arch_gp_counters)
-		return 1;
-	if (fixed && pmc >= pmu->nr_arch_fixed_counters)
-		return 1;
-	counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
-	ctr = read_pmc(&counters[pmc]);
-	if (fast_mode)
-		ctr = (u32)ctr;
-	*data = ctr;
-
-	return 0;
-}
-
-void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
-{
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
-	struct kvm_cpuid_entry2 *entry;
-	union cpuid10_eax eax;
-	union cpuid10_edx edx;
-
-	pmu->nr_arch_gp_counters = 0;
-	pmu->nr_arch_fixed_counters = 0;
-	pmu->counter_bitmask[KVM_PMC_GP] = 0;
-	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
-	pmu->version = 0;
-	pmu->reserved_bits = 0xffffffff00200000ull;
-
-	entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
-	if (!entry)
-		return;
-	eax.full = entry->eax;
-	edx.full = entry->edx;
-
-	pmu->version = eax.split.version_id;
-	if (!pmu->version)
-		return;
-
-	pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
-					INTEL_PMC_MAX_GENERIC);
-	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
-	pmu->available_event_types = ~entry->ebx &
-					((1ull << eax.split.mask_length) - 1);
-
-	if (pmu->version == 1) {
-		pmu->nr_arch_fixed_counters = 0;
-	} else {
-		pmu->nr_arch_fixed_counters =
-			min_t(int, edx.split.num_counters_fixed,
-				INTEL_PMC_MAX_FIXED);
-		pmu->counter_bitmask[KVM_PMC_FIXED] =
-			((u64)1 << edx.split.bit_width_fixed) - 1;
-	}
-
-	pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
-		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
-	pmu->global_ctrl_mask = ~pmu->global_ctrl;
-
-	entry = kvm_find_cpuid_entry(vcpu, 7, 0);
-	if (entry &&
-	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
-	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
-		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
-}
-
-void kvm_pmu_init(struct kvm_vcpu *vcpu)
-{
-	int i;
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
-
-	memset(pmu, 0, sizeof(*pmu));
-	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
-		pmu->gp_counters[i].type = KVM_PMC_GP;
-		pmu->gp_counters[i].vcpu = vcpu;
-		pmu->gp_counters[i].idx = i;
-	}
-	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
-		pmu->fixed_counters[i].type = KVM_PMC_FIXED;
-		pmu->fixed_counters[i].vcpu = vcpu;
-		pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
-	}
-	init_irq_work(&pmu->irq_work, trigger_pmi);
-	kvm_pmu_cpuid_update(vcpu);
-}
-
-void kvm_pmu_reset(struct kvm_vcpu *vcpu)
-{
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
-	int i;
-
-	irq_work_sync(&pmu->irq_work);
-	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
-		struct kvm_pmc *pmc = &pmu->gp_counters[i];
-		stop_counter(pmc);
-		pmc->counter = pmc->eventsel = 0;
-	}
-
-	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
-		stop_counter(&pmu->fixed_counters[i]);
-
-	pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
-		pmu->global_ovf_ctrl = 0;
-}
-
-void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
-{
-	kvm_pmu_reset(vcpu);
-}
-
-void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
-{
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
-	u64 bitmask;
-	int bit;
-
-	bitmask = pmu->reprogram_pmi;
-
-	for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
-		struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
-
-		if (unlikely(!pmc || !pmc->perf_event)) {
-			clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
-			continue;
-		}
-
-		reprogram_idx(pmu, bit);
-	}
-}
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
new file mode 100644
index 0000000..0bf0d1c
--- /dev/null
+++ b/arch/x86/kvm/pmu_intel.c
@@ -0,0 +1,571 @@
+/*
+ * Kernel-based Virtual Machine -- Performance Monitoring Unit support
+ *
+ * Copyright 2011 Red Hat, Inc. and/or its affiliates.
+ *
+ * Authors:
+ *   Avi Kivity   <avi@redhat.com>
+ *   Gleb Natapov <gleb@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/kvm_host.h>
+#include <linux/perf_event.h>
+#include <asm/perf_event.h>
+#include "x86.h"
+#include "cpuid.h"
+#include "lapic.h"
+
+static struct kvm_event_hw_type_mapping intel_arch_events[] = {
+	/* Index must match CPUID 0x0A.EBX bit vector */
+	[0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
+	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
+	[2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES  },
+	[3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
+	[4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
+	[5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+	[6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
+	[7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
+};
+
+/* mapping between fixed pmc index and intel_arch_events array */
+int fixed_pmc_events[] = {1, 0, 7};
+
+static bool pmc_is_gp(struct kvm_pmc *pmc)
+{
+	return pmc->type == KVM_PMC_GP;
+}
+
+static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
+{
+	struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+
+	return pmu->counter_bitmask[pmc->type];
+}
+
+static inline bool pmc_enabled(struct kvm_pmc *pmc)
+{
+	struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
+}
+
+static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
+					 u32 base)
+{
+	if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
+		return &pmu->gp_counters[msr - base];
+	return NULL;
+}
+
+static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
+{
+	int base = MSR_CORE_PERF_FIXED_CTR0;
+	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
+		return &pmu->fixed_counters[msr - base];
+	return NULL;
+}
+
+static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
+{
+	return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
+}
+
+static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
+{
+	if (idx < INTEL_PMC_IDX_FIXED)
+		return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
+	else
+		return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
+}
+
+void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
+{
+	if (vcpu->arch.apic)
+		kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
+}
+
+static void trigger_pmi(struct irq_work *irq_work)
+{
+	struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
+			irq_work);
+	struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
+			arch.pmu);
+
+	kvm_deliver_pmi(vcpu);
+}
+
+static void kvm_perf_overflow(struct perf_event *perf_event,
+			      struct perf_sample_data *data,
+			      struct pt_regs *regs)
+{
+	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+	struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+	if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
+		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
+		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+	}
+}
+
+static void kvm_perf_overflow_intr(struct perf_event *perf_event,
+		struct perf_sample_data *data, struct pt_regs *regs)
+{
+	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+	struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
+	if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
+		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
+		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+		/*
+		 * Inject PMI. If vcpu was in a guest mode during NMI PMI
+		 * can be ejected on a guest mode re-entry. Otherwise we can't
+		 * be sure that vcpu wasn't executing hlt instruction at the
+		 * time of vmexit and is not going to re-enter guest mode until,
+		 * woken up. So we should wake it, but this is impossible from
+		 * NMI context. Do it from irq work instead.
+		 */
+		if (!kvm_is_in_guest())
+			irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
+		else
+			kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
+	}
+}
+
+static u64 read_pmc(struct kvm_pmc *pmc)
+{
+	u64 counter, enabled, running;
+
+	counter = pmc->counter;
+
+	if (pmc->perf_event)
+		counter += perf_event_read_value(pmc->perf_event,
+						 &enabled, &running);
+
+	/* FIXME: Scaling needed? */
+
+	return counter & pmc_bitmask(pmc);
+}
+
+static void stop_counter(struct kvm_pmc *pmc)
+{
+	if (pmc->perf_event) {
+		pmc->counter = read_pmc(pmc);
+		perf_event_release_kernel(pmc->perf_event);
+		pmc->perf_event = NULL;
+	}
+}
+
+static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
+		unsigned config, bool exclude_user, bool exclude_kernel,
+		bool intr, bool in_tx, bool in_tx_cp)
+{
+	struct perf_event *event;
+	struct perf_event_attr attr = {
+		.type = type,
+		.size = sizeof(attr),
+		.pinned = true,
+		.exclude_idle = true,
+		.exclude_host = 1,
+		.exclude_user = exclude_user,
+		.exclude_kernel = exclude_kernel,
+		.config = config,
+	};
+	if (in_tx)
+		attr.config |= HSW_IN_TX;
+	if (in_tx_cp)
+		attr.config |= HSW_IN_TX_CHECKPOINTED;
+
+	attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
+
+	event = perf_event_create_kernel_counter(&attr, -1, current,
+						 intr ? kvm_perf_overflow_intr :
+						 kvm_perf_overflow, pmc);
+	if (IS_ERR(event)) {
+		printk_once("kvm: pmu event creation failed %ld\n",
+				PTR_ERR(event));
+		return;
+	}
+
+	pmc->perf_event = event;
+	clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
+}
+
+static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
+		u8 unit_mask)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
+		if (intel_arch_events[i].eventsel == event_select
+				&& intel_arch_events[i].unit_mask == unit_mask
+				&& (pmu->available_event_types & (1 << i)))
+			break;
+
+	if (i == ARRAY_SIZE(intel_arch_events))
+		return PERF_COUNT_HW_MAX;
+
+	return intel_arch_events[i].event_type;
+}
+
+static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+{
+	unsigned config, type = PERF_TYPE_RAW;
+	u8 event_select, unit_mask;
+
+	if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
+		printk_once("kvm pmu: pin control bit is ignored\n");
+
+	pmc->eventsel = eventsel;
+
+	stop_counter(pmc);
+
+	if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc))
+		return;
+
+	event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
+	unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+
+	if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
+				ARCH_PERFMON_EVENTSEL_INV |
+				ARCH_PERFMON_EVENTSEL_CMASK |
+				HSW_IN_TX |
+				HSW_IN_TX_CHECKPOINTED))) {
+		config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
+				unit_mask);
+		if (config != PERF_COUNT_HW_MAX)
+			type = PERF_TYPE_HARDWARE;
+	}
+
+	if (type == PERF_TYPE_RAW)
+		config = eventsel & X86_RAW_EVENT_MASK;
+
+	reprogram_counter(pmc, type, config,
+			!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
+			!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
+			eventsel & ARCH_PERFMON_EVENTSEL_INT,
+			(eventsel & HSW_IN_TX),
+			(eventsel & HSW_IN_TX_CHECKPOINTED));
+}
+
+static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
+{
+	unsigned en = en_pmi & 0x3;
+	bool pmi = en_pmi & 0x8;
+
+	stop_counter(pmc);
+
+	if (!en || !pmc_enabled(pmc))
+		return;
+
+	reprogram_counter(pmc, PERF_TYPE_HARDWARE,
+			intel_arch_events[fixed_pmc_events[idx]].event_type,
+			!(en & 0x2), /* exclude user */
+			!(en & 0x1), /* exclude kernel */
+			pmi, false, false);
+}
+
+static inline u8 fixed_en_pmi(u64 ctrl, int idx)
+{
+	return (ctrl >> (idx * 4)) & 0xf;
+}
+
+static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
+{
+	int i;
+
+	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
+		u8 en_pmi = fixed_en_pmi(data, i);
+		struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
+
+		if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi)
+			continue;
+
+		reprogram_fixed_counter(pmc, en_pmi, i);
+	}
+
+	pmu->fixed_ctr_ctrl = data;
+}
+
+static void reprogram_idx(struct kvm_pmu *pmu, int idx)
+{
+	struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
+
+	if (!pmc)
+		return;
+
+	if (pmc_is_gp(pmc))
+		reprogram_gp_counter(pmc, pmc->eventsel);
+	else {
+		int fidx = idx - INTEL_PMC_IDX_FIXED;
+		reprogram_fixed_counter(pmc,
+				fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
+	}
+}
+
+static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
+{
+	int bit;
+	u64 diff = pmu->global_ctrl ^ data;
+
+	pmu->global_ctrl = data;
+
+	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
+		reprogram_idx(pmu, bit);
+}
+
+bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
+{
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	int ret;
+
+	switch (msr) {
+	case MSR_CORE_PERF_FIXED_CTR_CTRL:
+	case MSR_CORE_PERF_GLOBAL_STATUS:
+	case MSR_CORE_PERF_GLOBAL_CTRL:
+	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+		ret = pmu->version > 1;
+		break;
+	default:
+		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
+			|| get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
+			|| get_fixed_pmc(pmu, msr);
+		break;
+	}
+	return ret;
+}
+
+int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+{
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	struct kvm_pmc *pmc;
+
+	switch (index) {
+	case MSR_CORE_PERF_FIXED_CTR_CTRL:
+		*data = pmu->fixed_ctr_ctrl;
+		return 0;
+	case MSR_CORE_PERF_GLOBAL_STATUS:
+		*data = pmu->global_status;
+		return 0;
+	case MSR_CORE_PERF_GLOBAL_CTRL:
+		*data = pmu->global_ctrl;
+		return 0;
+	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+		*data = pmu->global_ovf_ctrl;
+		return 0;
+	default:
+		if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
+				(pmc = get_fixed_pmc(pmu, index))) {
+			*data = read_pmc(pmc);
+			return 0;
+		} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
+			*data = pmc->eventsel;
+			return 0;
+		}
+	}
+	return 1;
+}
+
+int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	struct kvm_pmc *pmc;
+	u32 index = msr_info->index;
+	u64 data = msr_info->data;
+
+	switch (index) {
+	case MSR_CORE_PERF_FIXED_CTR_CTRL:
+		if (pmu->fixed_ctr_ctrl == data)
+			return 0;
+		if (!(data & 0xfffffffffffff444ull)) {
+			reprogram_fixed_counters(pmu, data);
+			return 0;
+		}
+		break;
+	case MSR_CORE_PERF_GLOBAL_STATUS:
+		if (msr_info->host_initiated) {
+			pmu->global_status = data;
+			return 0;
+		}
+		break; /* RO MSR */
+	case MSR_CORE_PERF_GLOBAL_CTRL:
+		if (pmu->global_ctrl == data)
+			return 0;
+		if (!(data & pmu->global_ctrl_mask)) {
+			global_ctrl_changed(pmu, data);
+			return 0;
+		}
+		break;
+	case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
+		if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
+			if (!msr_info->host_initiated)
+				pmu->global_status &= ~data;
+			pmu->global_ovf_ctrl = data;
+			return 0;
+		}
+		break;
+	default:
+		if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
+				(pmc = get_fixed_pmc(pmu, index))) {
+			if (!msr_info->host_initiated)
+				data = (s64)(s32)data;
+			pmc->counter += data - read_pmc(pmc);
+			return 0;
+		} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
+			if (data == pmc->eventsel)
+				return 0;
+			if (!(data & pmu->reserved_bits)) {
+				reprogram_gp_counter(pmc, data);
+				return 0;
+			}
+		}
+	}
+	return 1;
+}
+
+int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc)
+{
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	bool fixed = pmc & (1u << 30);
+	pmc &= ~(3u << 30);
+	return (!fixed && pmc >= pmu->nr_arch_gp_counters) ||
+		(fixed && pmc >= pmu->nr_arch_fixed_counters);
+}
+
+int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
+{
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	bool fast_mode = pmc & (1u << 31);
+	bool fixed = pmc & (1u << 30);
+	struct kvm_pmc *counters;
+	u64 ctr;
+
+	pmc &= ~(3u << 30);
+	if (!fixed && pmc >= pmu->nr_arch_gp_counters)
+		return 1;
+	if (fixed && pmc >= pmu->nr_arch_fixed_counters)
+		return 1;
+	counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
+	ctr = read_pmc(&counters[pmc]);
+	if (fast_mode)
+		ctr = (u32)ctr;
+	*data = ctr;
+
+	return 0;
+}
+
+void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
+{
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	struct kvm_cpuid_entry2 *entry;
+	union cpuid10_eax eax;
+	union cpuid10_edx edx;
+
+	pmu->nr_arch_gp_counters = 0;
+	pmu->nr_arch_fixed_counters = 0;
+	pmu->counter_bitmask[KVM_PMC_GP] = 0;
+	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
+	pmu->version = 0;
+	pmu->reserved_bits = 0xffffffff00200000ull;
+
+	entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
+	if (!entry)
+		return;
+	eax.full = entry->eax;
+	edx.full = entry->edx;
+
+	pmu->version = eax.split.version_id;
+	if (!pmu->version)
+		return;
+
+	pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
+					INTEL_PMC_MAX_GENERIC);
+	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
+	pmu->available_event_types = ~entry->ebx &
+					((1ull << eax.split.mask_length) - 1);
+
+	if (pmu->version == 1) {
+		pmu->nr_arch_fixed_counters = 0;
+	} else {
+		pmu->nr_arch_fixed_counters =
+			min_t(int, edx.split.num_counters_fixed,
+				INTEL_PMC_MAX_FIXED);
+		pmu->counter_bitmask[KVM_PMC_FIXED] =
+			((u64)1 << edx.split.bit_width_fixed) - 1;
+	}
+
+	pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
+		(((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
+	pmu->global_ctrl_mask = ~pmu->global_ctrl;
+
+	entry = kvm_find_cpuid_entry(vcpu, 7, 0);
+	if (entry &&
+	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
+	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
+		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
+}
+
+void kvm_pmu_init(struct kvm_vcpu *vcpu)
+{
+	int i;
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+
+	memset(pmu, 0, sizeof(*pmu));
+	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
+		pmu->gp_counters[i].type = KVM_PMC_GP;
+		pmu->gp_counters[i].vcpu = vcpu;
+		pmu->gp_counters[i].idx = i;
+	}
+	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
+		pmu->fixed_counters[i].type = KVM_PMC_FIXED;
+		pmu->fixed_counters[i].vcpu = vcpu;
+		pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
+	}
+	init_irq_work(&pmu->irq_work, trigger_pmi);
+	kvm_pmu_cpuid_update(vcpu);
+}
+
+void kvm_pmu_reset(struct kvm_vcpu *vcpu)
+{
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	int i;
+
+	irq_work_sync(&pmu->irq_work);
+	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
+		struct kvm_pmc *pmc = &pmu->gp_counters[i];
+		stop_counter(pmc);
+		pmc->counter = pmc->eventsel = 0;
+	}
+
+	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
+		stop_counter(&pmu->fixed_counters[i]);
+
+	pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
+		pmu->global_ovf_ctrl = 0;
+}
+
+void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
+{
+	kvm_pmu_reset(vcpu);
+}
+
+void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
+{
+	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	u64 bitmask;
+	int bit;
+
+	bitmask = pmu->reprogram_pmi;
+
+	for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
+		struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
+
+		if (unlikely(!pmc || !pmc->perf_event)) {
+			clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
+			continue;
+		}
+
+		reprogram_idx(pmu, bit);
+	}
+}
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH V2 3/5] KVM: x86/vPMU: Create vPMU interface for VMX and SVM
  2015-04-08 16:18 [PATCH V2 0/5] KVM vPMU support for AMD Wei Huang
  2015-04-08 16:18 ` [PATCH V2 1/5] KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch Wei Huang
  2015-04-08 16:18 ` [PATCH V2 2/5] KVM: x86/vPMU: Rename pmu.c file to pmu_intel.c Wei Huang
@ 2015-04-08 16:18 ` Wei Huang
  2015-04-09 19:43   ` Radim Krčmář
  2015-04-10 12:57   ` Radim Krčmář
  2015-04-08 16:18 ` [PATCH V2 4/5] KVM: x86/vPMU: Implement vPMU code AMD CPUs Wei Huang
                   ` (3 subsequent siblings)
  6 siblings, 2 replies; 19+ messages in thread
From: Wei Huang @ 2015-04-08 16:18 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, rkrcmar, gleb, wei

This patch converts existing Intel specific vPMU code into a common vPMU
interface with the following steps:

- A large portion of Intel vPMU code is now moved to pmu.c file. The rest
  is re-arranged and hooked up with the newly-defined intel_pmu_ops.
- Create a corresponding pmu_amd.c file with empty functions for AMD SVM
- The PMU function pointer, kvm_pmu_ops, is initialized by calling
  kvm_x86_ops->get_pmu_ops().
- To reduce the code size, Intel and AMD modules are now generated
  from their corrponding arch and PMU files; In the meanwhile, due
  to this arrangement several functions are exposed as public ones
  to allow calling from PMU code.

Signed-off-by: Wei Huang <wei@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |   3 +
 arch/x86/kvm/Makefile           |   6 +-
 arch/x86/kvm/cpuid.c            |   3 +-
 arch/x86/kvm/pmu.c              | 325 ++++++++++++++++++++++++++++++
 arch/x86/kvm/pmu.h              |  98 ++++++++++
 arch/x86/kvm/pmu_amd.c          |  97 +++++++++
 arch/x86/kvm/pmu_intel.c        | 423 ++++++++++------------------------------
 arch/x86/kvm/svm.c              |   8 +
 arch/x86/kvm/vmx.c              |   8 +
 arch/x86/kvm/x86.c              |  23 ++-
 10 files changed, 664 insertions(+), 330 deletions(-)
 create mode 100644 arch/x86/kvm/pmu.c
 create mode 100644 arch/x86/kvm/pmu.h
 create mode 100644 arch/x86/kvm/pmu_amd.c

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 6ef6acd..12bc4f3 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -848,6 +848,8 @@ struct kvm_x86_ops {
 	void (*enable_log_dirty_pt_masked)(struct kvm *kvm,
 					   struct kvm_memory_slot *slot,
 					   gfn_t offset, unsigned long mask);
+
+	struct kvm_pmu_ops *(*get_pmu_ops)(void);
 };
 
 struct kvm_arch_async_pf {
@@ -858,6 +860,7 @@ struct kvm_arch_async_pf {
 };
 
 extern struct kvm_x86_ops *kvm_x86_ops;
+extern struct kvm_pmu_ops *kvm_pmu_ops;
 
 static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
 					   s64 adjustment)
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index ac20dd3..c4aaeb7 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -12,10 +12,10 @@ kvm-y			+= $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
 kvm-$(CONFIG_KVM_ASYNC_PF)	+= $(KVM)/async_pf.o
 
 kvm-y			+= x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
-			   i8254.o ioapic.o irq_comm.o cpuid.o pmu_intel.o
+			   i8254.o ioapic.o irq_comm.o cpuid.o pmu.o
 kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT)	+= assigned-dev.o iommu.o
-kvm-intel-y		+= vmx.o
-kvm-amd-y		+= svm.o
+kvm-intel-y		+= vmx.o pmu_intel.o
+kvm-amd-y		+= svm.o pmu_amd.o
 
 obj-$(CONFIG_KVM)	+= kvm.o
 obj-$(CONFIG_KVM_INTEL)	+= kvm-intel.o
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 8a80737..c5fc3c5 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -22,6 +22,7 @@
 #include "lapic.h"
 #include "mmu.h"
 #include "trace.h"
+#include "pmu.h"
 
 static u32 xstate_required_size(u64 xstate_bv, bool compacted)
 {
@@ -104,7 +105,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
 		((best->eax & 0xff00) >> 8) != 0)
 		return -EINVAL;
 
-	kvm_pmu_cpuid_update(vcpu);
+	kvm_pmu_refresh(vcpu);
 	return 0;
 }
 
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
new file mode 100644
index 0000000..fb560c5
--- /dev/null
+++ b/arch/x86/kvm/pmu.c
@@ -0,0 +1,325 @@
+/*
+ * Kernel-based Virtual Machine -- Performance Monitoring Unit support
+ *
+ * Copyright 2014 Red Hat, Inc. and/or its affiliates.
+ *
+ * Authors:
+ *   Avi Kivity   <avi@redhat.com>
+ *   Gleb Natapov <gleb@redhat.com>
+ *   Wei Huang    <wei@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ */
+#include <linux/types.h>
+#include <linux/kvm_host.h>
+#include <linux/perf_event.h>
+#include <asm/perf_event.h>
+#include "x86.h"
+#include "cpuid.h"
+#include "lapic.h"
+#include "pmu.h"
+
+/* Design Note:
+ *   - Each Perf counter is mainly defined as "struct kvm_pmc";
+ *   - There are two types of perf counters: general purpose (gp) and fixed.
+ *     gp-counters are stored in gp_counters[] and fixed-counters are stored
+ *     in fixed_counters[] respectively. Both of them are part of "struct 
+ *     kvm_pmu";
+ *   - pmu.c understands the difference between gp-counters and fixed-counters.
+ *     However AMD doesn't support fixed-counters;
+ *   - There are three types of index to access perf counters (PMC):
+ *       1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
+ *          has MSR_K7_PERFCTRn.
+ *       2. MSR Index (named msr_idx): This normally is used by RDPMC
+ *          instruction. For instance AMD RDPMC instruction uses 0000_0003h
+ *          in ECX to access C001_0007h (MSR_K7_PERCTR3). Intel has a similar
+ *          mechanism, except that it also supports fixed counters.
+ *       3. Global Index (named glb_idx): glb_idx is an index specific to PMU
+ *          code. Each PMC has an index stored in kvm_pmc.idx field. The
+ *          mapping relationship is as the following:
+ *          * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
+ *                   [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
+ *          * AMD:   [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
+ */
+
+struct kvm_pmu_ops *kvm_pmu_ops;
+
+static void pmi_trigger_fn(struct irq_work *irq_work)
+{
+        struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
+        struct kvm_vcpu *vcpu = PMU_TO_VCPU(pmu);
+	
+        kvm_pmu_deliver_pmi(vcpu);
+}
+
+static void kvm_perf_overflow(struct perf_event *perf_event,
+			      struct perf_sample_data *data,
+			      struct pt_regs *regs)
+{
+	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+	struct kvm_pmu *pmu = PMC_TO_PMU(pmc);
+
+	if (!test_and_set_bit(pmc->idx,
+			      (unsigned long *)&pmu->reprogram_pmi)) {
+		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
+		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+	}
+}
+
+static void kvm_perf_overflow_intr(struct perf_event *perf_event,
+				   struct perf_sample_data *data, 
+				   struct pt_regs *regs)
+{
+	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+	struct kvm_pmu *pmu = PMC_TO_PMU(pmc);
+
+	if (!test_and_set_bit(pmc->idx, 
+			      (unsigned long *)&pmu->reprogram_pmi)) {
+		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
+		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+
+		/*
+		 * Inject PMI. If vcpu was in a guest mode during NMI PMI
+		 * can be ejected on a guest mode re-entry. Otherwise we can't
+		 * be sure that vcpu wasn't executing hlt instruction at the
+		 * time of vmexit and is not going to re-enter guest mode until
+		 * woken up. So we should wake it, but this is impossible from
+		 * NMI context. Do it from irq work instead.
+		 */
+		if (!kvm_is_in_guest())
+			irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
+		else
+			kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
+	}
+}
+
+static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, 
+				  unsigned config, bool exclude_user, 
+				  bool exclude_kernel, bool intr, 
+				  bool in_tx, bool in_tx_cp)
+{
+	struct perf_event *event;
+	struct perf_event_attr attr = {
+		.type = type,
+		.size = sizeof(attr),
+		.pinned = true,
+		.exclude_idle = true,
+		.exclude_host = 1,
+		.exclude_user = exclude_user,
+		.exclude_kernel = exclude_kernel,
+		.config = config,
+	};
+
+	if (in_tx)
+		attr.config |= HSW_IN_TX;
+	if (in_tx_cp)
+		attr.config |= HSW_IN_TX_CHECKPOINTED;
+
+	attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
+
+	event = perf_event_create_kernel_counter(&attr, -1, current,
+						 intr ? kvm_perf_overflow_intr:
+						 kvm_perf_overflow, pmc);
+	if (IS_ERR(event)) {
+		printk_once("kvm_pmu: event creation failed %ld\n",
+			    PTR_ERR(event));
+		return;
+	}
+
+	pmc->perf_event = event;
+	clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
+}
+
+void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+{
+	unsigned config, type = PERF_TYPE_RAW;
+	u8 event_select, unit_mask;
+
+	if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
+		printk_once("kvm pmu: pin control bit is ignored\n");
+
+	pmc->eventsel = eventsel;
+
+	pmc_stop_counter(pmc);
+
+	if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
+		return;
+
+	event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
+	unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
+
+	if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
+			  ARCH_PERFMON_EVENTSEL_INV |
+			  ARCH_PERFMON_EVENTSEL_CMASK |
+			  HSW_IN_TX |
+			  HSW_IN_TX_CHECKPOINTED))) {
+		config = kvm_pmu_ops->find_arch_event(&pmc->vcpu->arch.pmu, 
+						      event_select,
+						      unit_mask);
+		if (config != PERF_COUNT_HW_MAX)
+			type = PERF_TYPE_HARDWARE;
+	}
+
+	if (type == PERF_TYPE_RAW)
+		config = eventsel & X86_RAW_EVENT_MASK;
+
+	pmc_reprogram_counter(pmc, type, config,
+			      !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
+			      !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
+			      eventsel & ARCH_PERFMON_EVENTSEL_INT,
+			      (eventsel & HSW_IN_TX),
+			      (eventsel & HSW_IN_TX_CHECKPOINTED));
+}
+EXPORT_SYMBOL_GPL(reprogram_gp_counter);
+
+void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx)
+{
+	unsigned en_field = ctrl & 0x3;
+	bool pmi = ctrl & 0x8;
+
+	pmc_stop_counter(pmc);
+
+	if (!en_field || !pmc_is_enabled(pmc))
+		return;
+
+	pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
+			      kvm_pmu_ops->find_fixed_event(fixed_idx),
+			      !(en_field & 0x2), /* exclude user */
+			      !(en_field & 0x1), /* exclude kernel */
+			      pmi, false, false);
+}
+EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
+
+void reprogram_counter(struct kvm_pmu *pmu, int glb_idx)
+{
+	struct kvm_pmc *pmc = kvm_pmu_ops->glb_idx_to_pmc(pmu, glb_idx);
+
+	if (!pmc)
+		return;
+
+	if (pmc_is_gp(pmc))
+		reprogram_gp_counter(pmc, pmc->eventsel);
+	else {
+		int fixed_idx = glb_idx - INTEL_PMC_IDX_FIXED;
+		u8 ctrl = fixed_ctr_ctrl_field(pmu->fixed_ctr_ctrl, fixed_idx);
+
+		reprogram_fixed_counter(pmc, ctrl, fixed_idx);
+	}
+}
+EXPORT_SYMBOL_GPL(reprogram_counter);
+
+
+void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
+{
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+	u64 bitmask;
+	int bit;
+
+	bitmask = pmu->reprogram_pmi;
+
+	for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
+		struct kvm_pmc *pmc = kvm_pmu_ops->glb_idx_to_pmc(pmu, bit);
+
+		if (unlikely(!pmc || !pmc->perf_event)) {
+			clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
+			continue;
+		}
+
+		reprogram_counter(pmu, bit);
+	}
+}
+
+/* check if msr_idx is a valid index to access PMU */
+inline int kvm_pmu_check_msr_idx(struct kvm_vcpu *vcpu, unsigned msr_idx)
+{
+	return kvm_pmu_ops->check_msr_idx(vcpu, msr_idx);
+}
+
+int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned msr_idx, u64 *data)
+{
+	bool fast_mode = msr_idx & (1u << 31);
+	struct kvm_pmc *pmc;
+	u64 ctr_val;
+
+	pmc = kvm_pmu_ops->msr_idx_to_pmc(vcpu, msr_idx);
+	if (!pmc)
+		return 1;
+
+	ctr_val = pmc_read_counter(pmc);
+	if (fast_mode)
+		ctr_val = (u32)ctr_val;
+
+	*data = ctr_val;
+	return 0;
+}
+
+void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
+{
+	if (vcpu->arch.apic)
+                kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
+}
+
+bool kvm_pmu_is_msr(struct kvm_vcpu *vcpu, u32 msr)
+{
+	return kvm_pmu_ops->is_pmu_msr(vcpu, msr);
+}
+
+int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
+{
+	return kvm_pmu_ops->get_msr(vcpu, msr, data);
+}
+
+int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+	return kvm_pmu_ops->set_msr(vcpu, msr_info);
+	
+}
+
+/* Refresh PMU settings. This function generally is called when underlying
+ * settings are changed (such as changes of PMU CPUID), which should rarely
+ * happen. 
+ */
+void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
+{
+	kvm_pmu_ops->refresh(vcpu);
+}
+
+void kvm_pmu_reset(struct kvm_vcpu *vcpu)
+{
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+	
+	irq_work_sync(&pmu->irq_work);
+	kvm_pmu_ops->reset(vcpu);
+}
+
+void kvm_pmu_init(struct kvm_vcpu *vcpu)
+{
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+
+	memset(pmu, 0, sizeof(*pmu));
+	kvm_pmu_ops->init(vcpu);
+	init_irq_work(&pmu->irq_work, pmi_trigger_fn);
+	kvm_pmu_refresh(vcpu);
+}
+
+void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
+{
+	kvm_pmu_reset(vcpu);
+}
+
+/* Called before using any PMU functions above */
+int kvm_pmu_arch_init(struct kvm_x86_ops *x86_ops)
+{
+	kvm_pmu_ops = x86_ops->get_pmu_ops();
+
+	if (x86_ops && (kvm_pmu_ops = x86_ops->get_pmu_ops()) != NULL)
+		return 0;
+	else
+		return -EINVAL;
+}
+
+void kvm_pmu_arch_exit(void)
+{
+	kvm_pmu_ops = NULL;
+}
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
new file mode 100644
index 0000000..4a711ec
--- /dev/null
+++ b/arch/x86/kvm/pmu.h
@@ -0,0 +1,98 @@
+#ifndef __KVM_X86_PMU_H
+#define __KVM_X86_PMU_H
+
+#define VCPU_TO_PMU(vcpu) (&(vcpu)->arch.pmu)
+#define PMU_TO_VCPU(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
+#define PMC_TO_PMU(pmc)   (&(pmc)->vcpu->arch.pmu)
+
+/* retrieve the control fields of IA32_FIXED_CTR_CTRL */
+#define fixed_ctr_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
+
+static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
+{
+	struct kvm_pmu *pmu = PMC_TO_PMU(pmc);
+	
+	return pmu->counter_bitmask[pmc->type];
+}
+
+static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
+{
+	u64 counter, enabled, running;
+
+	counter = pmc->counter;
+	if (pmc->perf_event)
+		counter += perf_event_read_value(pmc->perf_event,
+						 &enabled, &running);
+	/* FIXME: Scaling needed? */
+	return counter & pmc_bitmask(pmc);
+}
+
+static inline void pmc_stop_counter(struct kvm_pmc *pmc)
+{
+	if (pmc->perf_event) {
+		pmc->counter = pmc_read_counter(pmc);
+		perf_event_release_kernel(pmc->perf_event);
+		pmc->perf_event = NULL;
+	}
+}
+
+static inline bool pmc_is_gp(struct kvm_pmc *pmc)
+{
+	return pmc->type == KVM_PMC_GP;
+}
+
+static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
+{
+	return pmc->type == KVM_PMC_FIXED;
+}
+
+static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
+{
+	return kvm_pmu_ops->pmc_enabled(pmc);
+}
+
+/* Returns general purpose PMC with the specified MSR. Note that it can be
+ * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
+ * paramenter to tell them apart.
+ */
+static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
+					 u32 base)
+{
+	if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
+		return &pmu->gp_counters[msr - base];
+
+	return NULL;
+}
+
+/* Returns fixed PMC with the specified MSR */
+static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
+{
+	int base = MSR_CORE_PERF_FIXED_CTR0;
+
+	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
+		return &pmu->fixed_counters[msr - base];
+
+	return NULL;
+}
+
+void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel);
+void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx);
+void reprogram_counter(struct kvm_pmu *pmu, int glb_idx);
+
+void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
+int kvm_pmu_check_msr_idx(struct kvm_vcpu *vcpu, unsigned msr_idx);
+int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
+void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
+bool kvm_pmu_is_msr(struct kvm_vcpu *vcpu, u32 msr);
+int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
+int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
+void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
+void kvm_pmu_reset(struct kvm_vcpu *vcpu);
+void kvm_pmu_init(struct kvm_vcpu *vcpu);
+void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
+int kvm_pmu_arch_init(struct kvm_x86_ops *x86_ops);
+void kvm_pmu_arch_exit(void);
+
+extern struct kvm_pmu_ops intel_pmu_ops;
+extern struct kvm_pmu_ops amd_pmu_ops;
+#endif /* __KVM_X86_PMU_H */
diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
new file mode 100644
index 0000000..7d81b30
--- /dev/null
+++ b/arch/x86/kvm/pmu_amd.c
@@ -0,0 +1,97 @@
+/*
+ * KVM PMU support for AMD
+ *
+ * Copyright 2015, Red Hat, Inc. and/or its affiliates.
+ *
+ * Author:
+ *   Wei Huang <wei@redhat.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2.  See
+ * the COPYING file in the top-level directory.
+ *
+ * Implementation is based on pmu_intel.c file
+ */
+#include <linux/types.h>
+#include <linux/kvm_host.h>
+#include <linux/perf_event.h>
+#include "x86.h"
+#include "cpuid.h"
+#include "lapic.h"
+#include "pmu.h"
+
+static unsigned amd_find_arch_event(struct kvm_pmu *pmu, 
+				    u8 event_select,
+				    u8 unit_mask)
+{
+	return PERF_COUNT_HW_MAX;
+}
+
+static unsigned amd_find_fixed_event(int fixed_idx)
+{
+	return PERF_COUNT_HW_MAX;
+}
+
+static bool amd_pmc_enabled(struct kvm_pmc *pmc)
+{
+	return false;
+}
+
+static struct kvm_pmc *amd_glb_idx_to_pmc(struct kvm_pmu *pmu, int glb_idx)
+{
+	return NULL;
+}
+
+/* Returns 0 if msr_idx's corresponding MSR exists; otherwise returns 1. */
+static int amd_check_msr_idx(struct kvm_vcpu *vcpu, unsigned msr_idx)
+{
+	return 1;
+}
+
+static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, 
+					  unsigned msr_idx)
+{
+	return NULL;
+}
+
+static bool amd_is_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
+{
+	return false;
+}
+
+static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
+{
+	return 1;
+}
+
+static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+{
+	return 1;
+}
+
+static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
+{
+}
+
+static void amd_pmu_init(struct kvm_vcpu *vcpu)
+{
+}
+
+static void amd_pmu_reset(struct kvm_vcpu *vcpu)
+{
+}
+
+struct kvm_pmu_ops amd_pmu_ops = {
+	.find_arch_event = amd_find_arch_event,
+	.find_fixed_event = amd_find_fixed_event,
+	.pmc_enabled = amd_pmc_enabled,
+	.glb_idx_to_pmc = amd_glb_idx_to_pmc,
+	.check_msr_idx = amd_check_msr_idx,
+	.msr_idx_to_pmc = amd_msr_idx_to_pmc,
+	.is_pmu_msr = amd_is_pmu_msr,
+	.get_msr = amd_pmu_get_msr,
+	.set_msr = amd_pmu_set_msr,
+	.refresh = amd_pmu_refresh,
+	.init = amd_pmu_init,
+	.reset = amd_pmu_reset,
+};
+EXPORT_SYMBOL_GPL(amd_pmu_ops);
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index 0bf0d1c..83afa67 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -1,5 +1,5 @@
 /*
- * Kernel-based Virtual Machine -- Performance Monitoring Unit support
+ * KVM PMU support for Intel CPUs
  *
  * Copyright 2011 Red Hat, Inc. and/or its affiliates.
  *
@@ -11,7 +11,6 @@
  * the COPYING file in the top-level directory.
  *
  */
-
 #include <linux/types.h>
 #include <linux/kvm_host.h>
 #include <linux/perf_event.h>
@@ -19,6 +18,7 @@
 #include "x86.h"
 #include "cpuid.h"
 #include "lapic.h"
+#include "pmu.h"
 
 static struct kvm_event_hw_type_mapping intel_arch_events[] = {
 	/* Index must match CPUID 0x0A.EBX bit vector */
@@ -33,167 +33,44 @@ static struct kvm_event_hw_type_mapping intel_arch_events[] = {
 };
 
 /* mapping between fixed pmc index and intel_arch_events array */
-int fixed_pmc_events[] = {1, 0, 7};
-
-static bool pmc_is_gp(struct kvm_pmc *pmc)
-{
-	return pmc->type == KVM_PMC_GP;
-}
-
-static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
-{
-	struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
-
-	return pmu->counter_bitmask[pmc->type];
-}
-
-static inline bool pmc_enabled(struct kvm_pmc *pmc)
-{
-	struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
-	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
-}
-
-static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
-					 u32 base)
-{
-	if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
-		return &pmu->gp_counters[msr - base];
-	return NULL;
-}
+static int fixed_pmc_events[] = {1, 0, 7};
 
-static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
-{
-	int base = MSR_CORE_PERF_FIXED_CTR0;
-	if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
-		return &pmu->fixed_counters[msr - base];
-	return NULL;
-}
-
-static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
-{
-	return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
-}
-
-static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
+static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
 {
-	if (idx < INTEL_PMC_IDX_FIXED)
-		return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
-	else
-		return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
-}
+	int i;
 
-void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
-{
-	if (vcpu->arch.apic)
-		kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
-}
+	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
+		u8 new_ctrl = fixed_ctr_ctrl_field(data, i);
+		u8 old_ctrl = fixed_ctr_ctrl_field(pmu->fixed_ctr_ctrl, i);
+		struct kvm_pmc *pmc;
 
-static void trigger_pmi(struct irq_work *irq_work)
-{
-	struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
-			irq_work);
-	struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
-			arch.pmu);
+		pmc = get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + i);
 
-	kvm_deliver_pmi(vcpu);
-}
+		if (old_ctrl == new_ctrl)
+			continue;
 
-static void kvm_perf_overflow(struct perf_event *perf_event,
-			      struct perf_sample_data *data,
-			      struct pt_regs *regs)
-{
-	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
-	struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
-	if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
-		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
-		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
+		reprogram_fixed_counter(pmc, new_ctrl, i);
 	}
-}
 
-static void kvm_perf_overflow_intr(struct perf_event *perf_event,
-		struct perf_sample_data *data, struct pt_regs *regs)
-{
-	struct kvm_pmc *pmc = perf_event->overflow_handler_context;
-	struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
-	if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
-		__set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
-		kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
-		/*
-		 * Inject PMI. If vcpu was in a guest mode during NMI PMI
-		 * can be ejected on a guest mode re-entry. Otherwise we can't
-		 * be sure that vcpu wasn't executing hlt instruction at the
-		 * time of vmexit and is not going to re-enter guest mode until,
-		 * woken up. So we should wake it, but this is impossible from
-		 * NMI context. Do it from irq work instead.
-		 */
-		if (!kvm_is_in_guest())
-			irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
-		else
-			kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
-	}
+	pmu->fixed_ctr_ctrl = data;
 }
 
-static u64 read_pmc(struct kvm_pmc *pmc)
+/* Function is called when global control register has been updated. */
+static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
 {
-	u64 counter, enabled, running;
-
-	counter = pmc->counter;
-
-	if (pmc->perf_event)
-		counter += perf_event_read_value(pmc->perf_event,
-						 &enabled, &running);
-
-	/* FIXME: Scaling needed? */
-
-	return counter & pmc_bitmask(pmc);
-}
+	int bit;
+	u64 diff = pmu->global_ctrl ^ data;
 
-static void stop_counter(struct kvm_pmc *pmc)
-{
-	if (pmc->perf_event) {
-		pmc->counter = read_pmc(pmc);
-		perf_event_release_kernel(pmc->perf_event);
-		pmc->perf_event = NULL;
-	}
-}
+	pmu->global_ctrl = data;
 
-static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
-		unsigned config, bool exclude_user, bool exclude_kernel,
-		bool intr, bool in_tx, bool in_tx_cp)
-{
-	struct perf_event *event;
-	struct perf_event_attr attr = {
-		.type = type,
-		.size = sizeof(attr),
-		.pinned = true,
-		.exclude_idle = true,
-		.exclude_host = 1,
-		.exclude_user = exclude_user,
-		.exclude_kernel = exclude_kernel,
-		.config = config,
-	};
-	if (in_tx)
-		attr.config |= HSW_IN_TX;
-	if (in_tx_cp)
-		attr.config |= HSW_IN_TX_CHECKPOINTED;
-
-	attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
-
-	event = perf_event_create_kernel_counter(&attr, -1, current,
-						 intr ? kvm_perf_overflow_intr :
-						 kvm_perf_overflow, pmc);
-	if (IS_ERR(event)) {
-		printk_once("kvm: pmu event creation failed %ld\n",
-				PTR_ERR(event));
-		return;
+	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) {
+		reprogram_counter(pmu, bit);
 	}
-
-	pmc->perf_event = event;
-	clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
 }
 
-static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
-		u8 unit_mask)
+static unsigned intel_find_arch_event(struct kvm_pmu *pmu, 
+				      u8 event_select,
+				      u8 unit_mask)
 {
 	int i;
 
@@ -209,115 +86,66 @@ static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
 	return intel_arch_events[i].event_type;
 }
 
-static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
+static unsigned intel_find_fixed_event(int fixed_idx)
 {
-	unsigned config, type = PERF_TYPE_RAW;
-	u8 event_select, unit_mask;
-
-	if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
-		printk_once("kvm pmu: pin control bit is ignored\n");
 
-	pmc->eventsel = eventsel;
-
-	stop_counter(pmc);
-
-	if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc))
-		return;
-
-	event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
-	unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
-
-	if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
-				ARCH_PERFMON_EVENTSEL_INV |
-				ARCH_PERFMON_EVENTSEL_CMASK |
-				HSW_IN_TX |
-				HSW_IN_TX_CHECKPOINTED))) {
-		config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
-				unit_mask);
-		if (config != PERF_COUNT_HW_MAX)
-			type = PERF_TYPE_HARDWARE;
-	}
-
-	if (type == PERF_TYPE_RAW)
-		config = eventsel & X86_RAW_EVENT_MASK;
+	if (fixed_idx >= ARRAY_SIZE(fixed_pmc_events))
+		return PERF_COUNT_HW_MAX;
 
-	reprogram_counter(pmc, type, config,
-			!(eventsel & ARCH_PERFMON_EVENTSEL_USR),
-			!(eventsel & ARCH_PERFMON_EVENTSEL_OS),
-			eventsel & ARCH_PERFMON_EVENTSEL_INT,
-			(eventsel & HSW_IN_TX),
-			(eventsel & HSW_IN_TX_CHECKPOINTED));
+	return intel_arch_events[fixed_pmc_events[fixed_idx]].event_type;
 }
 
-static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
+/* Check if a PMC is enabled by comparising it with globl_ctrl bits. */
+static bool intel_pmc_enabled(struct kvm_pmc *pmc)
 {
-	unsigned en = en_pmi & 0x3;
-	bool pmi = en_pmi & 0x8;
+	struct kvm_pmu *pmu = PMC_TO_PMU(pmc);
 
-	stop_counter(pmc);
-
-	if (!en || !pmc_enabled(pmc))
-		return;
-
-	reprogram_counter(pmc, PERF_TYPE_HARDWARE,
-			intel_arch_events[fixed_pmc_events[idx]].event_type,
-			!(en & 0x2), /* exclude user */
-			!(en & 0x1), /* exclude kernel */
-			pmi, false, false);
-}
-
-static inline u8 fixed_en_pmi(u64 ctrl, int idx)
-{
-	return (ctrl >> (idx * 4)) & 0xf;
+	return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
 }
 
-static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
+static struct kvm_pmc *intel_glb_idx_to_pmc(struct kvm_pmu *pmu, int glb_idx)
 {
-	int i;
-
-	for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
-		u8 en_pmi = fixed_en_pmi(data, i);
-		struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
-
-		if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi)
-			continue;
-
-		reprogram_fixed_counter(pmc, en_pmi, i);
+	if (glb_idx < INTEL_PMC_IDX_FIXED)
+		return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + glb_idx,
+				  MSR_P6_EVNTSEL0);
+	else {
+		u32 fidx = glb_idx - INTEL_PMC_IDX_FIXED;
+		return get_fixed_pmc(pmu, fidx + MSR_CORE_PERF_FIXED_CTR0);
 	}
-
-	pmu->fixed_ctr_ctrl = data;
 }
 
-static void reprogram_idx(struct kvm_pmu *pmu, int idx)
+/* Returns 0 if msr_idx's corresponding MSR exists; otherwise returns 1. */
+static int intel_check_msr_idx(struct kvm_vcpu *vcpu, unsigned msr_idx)
 {
-	struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+	bool fixed = msr_idx & (1u << 30);
 
-	if (!pmc)
-		return;
+	msr_idx &= ~(3u << 30);
 
-	if (pmc_is_gp(pmc))
-		reprogram_gp_counter(pmc, pmc->eventsel);
-	else {
-		int fidx = idx - INTEL_PMC_IDX_FIXED;
-		reprogram_fixed_counter(pmc,
-				fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
-	}
+	return (!fixed && msr_idx >= pmu->nr_arch_gp_counters) ||
+		(fixed && msr_idx >= pmu->nr_arch_fixed_counters);
 }
 
-static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
+static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
+					    unsigned msr_idx)
 {
-	int bit;
-	u64 diff = pmu->global_ctrl ^ data;
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+	bool fixed = msr_idx & (1u << 30);
+	struct kvm_pmc *counters;
 
-	pmu->global_ctrl = data;
+	msr_idx &= ~(3u << 30);
+	if (!fixed && msr_idx >= pmu->nr_arch_gp_counters)
+		return NULL;
+	if (fixed && msr_idx >= pmu->nr_arch_fixed_counters)
+		return NULL;
+	counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
 
-	for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
-		reprogram_idx(pmu, bit);
+	return &counters[msr_idx];
 }
 
-bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
+static bool intel_is_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
 {
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
 	int ret;
 
 	switch (msr) {
@@ -328,20 +156,21 @@ bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
 		ret = pmu->version > 1;
 		break;
 	default:
-		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
-			|| get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
-			|| get_fixed_pmc(pmu, msr);
+		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
+			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
+			get_fixed_pmc(pmu, msr);
 		break;
 	}
+
 	return ret;
 }
 
-int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
+static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
 {
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
 	struct kvm_pmc *pmc;
 
-	switch (index) {
+	switch (msr) {
 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
 		*data = pmu->fixed_ctr_ctrl;
 		return 0;
@@ -355,26 +184,27 @@ int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
 		*data = pmu->global_ovf_ctrl;
 		return 0;
 	default:
-		if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
-				(pmc = get_fixed_pmc(pmu, index))) {
-			*data = read_pmc(pmc);
+		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
+		    (pmc = get_fixed_pmc(pmu, msr))) {
+			*data = pmc_read_counter(pmc);
 			return 0;
-		} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
+		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
 			*data = pmc->eventsel;
 			return 0;
 		}
 	}
+
 	return 1;
 }
 
-int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
+static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
 	struct kvm_pmc *pmc;
-	u32 index = msr_info->index;
+	u32 msr = msr_info->index;
 	u64 data = msr_info->data;
 
-	switch (index) {
+	switch (msr) {
 	case MSR_CORE_PERF_FIXED_CTR_CTRL:
 		if (pmu->fixed_ctr_ctrl == data)
 			return 0;
@@ -406,13 +236,13 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		}
 		break;
 	default:
-		if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
-				(pmc = get_fixed_pmc(pmu, index))) {
+		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
+		    (pmc = get_fixed_pmc(pmu, msr))) {
 			if (!msr_info->host_initiated)
 				data = (s64)(s32)data;
-			pmc->counter += data - read_pmc(pmc);
+			pmc->counter += data - pmc_read_counter(pmc);
 			return 0;
-		} else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
+		} else if ((pmc = get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0))) {
 			if (data == pmc->eventsel)
 				return 0;
 			if (!(data & pmu->reserved_bits)) {
@@ -421,43 +251,13 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 			}
 		}
 	}
-	return 1;
-}
-
-int kvm_pmu_check_pmc(struct kvm_vcpu *vcpu, unsigned pmc)
-{
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
-	bool fixed = pmc & (1u << 30);
-	pmc &= ~(3u << 30);
-	return (!fixed && pmc >= pmu->nr_arch_gp_counters) ||
-		(fixed && pmc >= pmu->nr_arch_fixed_counters);
-}
-
-int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
-{
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
-	bool fast_mode = pmc & (1u << 31);
-	bool fixed = pmc & (1u << 30);
-	struct kvm_pmc *counters;
-	u64 ctr;
-
-	pmc &= ~(3u << 30);
-	if (!fixed && pmc >= pmu->nr_arch_gp_counters)
-		return 1;
-	if (fixed && pmc >= pmu->nr_arch_fixed_counters)
-		return 1;
-	counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
-	ctr = read_pmc(&counters[pmc]);
-	if (fast_mode)
-		ctr = (u32)ctr;
-	*data = ctr;
 
-	return 0;
+	return 1;
 }
 
-void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
+static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 {
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
 	struct kvm_cpuid_entry2 *entry;
 	union cpuid10_eax eax;
 	union cpuid10_edx edx;
@@ -506,66 +306,55 @@ void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
 		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
 }
 
-void kvm_pmu_init(struct kvm_vcpu *vcpu)
+static void intel_pmu_init(struct kvm_vcpu *vcpu)
 {
 	int i;
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
 
-	memset(pmu, 0, sizeof(*pmu));
 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
 		pmu->gp_counters[i].type = KVM_PMC_GP;
 		pmu->gp_counters[i].vcpu = vcpu;
 		pmu->gp_counters[i].idx = i;
 	}
+
 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
 		pmu->fixed_counters[i].type = KVM_PMC_FIXED;
 		pmu->fixed_counters[i].vcpu = vcpu;
 		pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
 	}
-	init_irq_work(&pmu->irq_work, trigger_pmi);
-	kvm_pmu_cpuid_update(vcpu);
 }
 
-void kvm_pmu_reset(struct kvm_vcpu *vcpu)
+static void intel_pmu_reset(struct kvm_vcpu *vcpu)
 {
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
 	int i;
 
-	irq_work_sync(&pmu->irq_work);
 	for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
 		struct kvm_pmc *pmc = &pmu->gp_counters[i];
-		stop_counter(pmc);
+
+		pmc_stop_counter(pmc);
 		pmc->counter = pmc->eventsel = 0;
 	}
 
 	for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
-		stop_counter(&pmu->fixed_counters[i]);
+		pmc_stop_counter(&pmu->fixed_counters[i]);
 
 	pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
 		pmu->global_ovf_ctrl = 0;
 }
 
-void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
-{
-	kvm_pmu_reset(vcpu);
-}
-
-void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
-{
-	struct kvm_pmu *pmu = &vcpu->arch.pmu;
-	u64 bitmask;
-	int bit;
-
-	bitmask = pmu->reprogram_pmi;
-
-	for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
-		struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
-
-		if (unlikely(!pmc || !pmc->perf_event)) {
-			clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
-			continue;
-		}
-
-		reprogram_idx(pmu, bit);
-	}
-}
+struct kvm_pmu_ops intel_pmu_ops = {
+	.find_arch_event = intel_find_arch_event,
+	.find_fixed_event = intel_find_fixed_event,
+	.pmc_enabled = intel_pmc_enabled,
+	.glb_idx_to_pmc = intel_glb_idx_to_pmc,
+	.check_msr_idx = intel_check_msr_idx,
+	.msr_idx_to_pmc = intel_msr_idx_to_pmc,
+	.is_pmu_msr = intel_is_pmu_msr,
+	.get_msr = intel_pmu_get_msr,
+	.set_msr = intel_pmu_set_msr,
+	.refresh = intel_pmu_refresh,
+	.init = intel_pmu_init,
+	.reset = intel_pmu_reset,
+};
+EXPORT_SYMBOL_GPL(intel_pmu_ops);
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index cc618c8..fb422d8 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -39,6 +39,7 @@
 
 #include <asm/virtext.h>
 #include "trace.h"
+#include "pmu.h"
 
 #define __ex(x) __kvm_handle_fault_on_reboot(x)
 
@@ -4330,6 +4331,11 @@ static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
 {
 }
 
+static struct kvm_pmu_ops *svm_get_pmu_ops(void)
+{
+	return &amd_pmu_ops;
+}
+
 static struct kvm_x86_ops svm_x86_ops = {
 	.cpu_has_kvm_support = has_svm,
 	.disabled_by_bios = is_disabled,
@@ -4432,6 +4438,8 @@ static struct kvm_x86_ops svm_x86_ops = {
 	.handle_external_intr = svm_handle_external_intr,
 
 	.sched_in = svm_sched_in,
+
+	.get_pmu_ops = svm_get_pmu_ops,
 };
 
 static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ae4f6d3..6988127 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -48,6 +48,7 @@
 #include <asm/apic.h>
 
 #include "trace.h"
+#include "pmu.h"
 
 #define __ex(x) __kvm_handle_fault_on_reboot(x)
 #define __ex_clear(x, reg) \
@@ -10127,6 +10128,11 @@ static void vmx_enable_log_dirty_pt_masked(struct kvm *kvm,
 	kvm_mmu_clear_dirty_pt_masked(kvm, memslot, offset, mask);
 }
 
+static struct kvm_pmu_ops *vmx_get_pmu_ops(void)
+{
+	return &intel_pmu_ops;
+}
+
 static struct kvm_x86_ops vmx_x86_ops = {
 	.cpu_has_kvm_support = cpu_has_kvm_support,
 	.disabled_by_bios = vmx_disabled_by_bios,
@@ -10240,6 +10246,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
 	.slot_disable_log_dirty = vmx_slot_disable_log_dirty,
 	.flush_log_dirty = vmx_flush_log_dirty,
 	.enable_log_dirty_pt_masked = vmx_enable_log_dirty_pt_masked,
+
+	.get_pmu_ops = vmx_get_pmu_ops,
 };
 
 static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 32bf19e..4d9e7de 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -28,6 +28,7 @@
 #include "x86.h"
 #include "cpuid.h"
 #include "assigned-dev.h"
+#include "pmu.h"
 
 #include <linux/clocksource.h>
 #include <linux/interrupt.h>
@@ -899,7 +900,7 @@ bool kvm_rdpmc(struct kvm_vcpu *vcpu)
 	u64 data;
 	int err;
 
-	err = kvm_pmu_read_pmc(vcpu, ecx, &data);
+	err = kvm_pmu_rdpmc(vcpu, ecx, &data);
 	if (err)
 		return err;
 	kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
@@ -2279,7 +2280,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		pr = true;
 	case MSR_P6_EVNTSEL0:
 	case MSR_P6_EVNTSEL1:
-		if (kvm_pmu_msr(vcpu, msr))
+		if (kvm_pmu_is_msr(vcpu, msr))
 			return kvm_pmu_set_msr(vcpu, msr_info);
 
 		if (pr || data != 0)
@@ -2325,7 +2326,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	default:
 		if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
 			return xen_hvm_config(vcpu, data);
-		if (kvm_pmu_msr(vcpu, msr))
+		if (kvm_pmu_is_msr(vcpu, msr))
 			return kvm_pmu_set_msr(vcpu, msr_info);
 		if (!ignore_msrs) {
 			vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
@@ -2519,7 +2520,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 	case MSR_P6_PERFCTR1:
 	case MSR_P6_EVNTSEL0:
 	case MSR_P6_EVNTSEL1:
-		if (kvm_pmu_msr(vcpu, msr))
+		if (kvm_pmu_is_msr(vcpu, msr))
 			return kvm_pmu_get_msr(vcpu, msr, pdata);
 		data = 0;
 		break;
@@ -2642,7 +2643,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 		data = vcpu->arch.osvw.status;
 		break;
 	default:
-		if (kvm_pmu_msr(vcpu, msr))
+		if (kvm_pmu_is_msr(vcpu, msr))
 			return kvm_pmu_get_msr(vcpu, msr, pdata);
 		if (!ignore_msrs) {
 			vcpu_unimpl(vcpu, "unhandled rdmsr: 0x%x\n", msr);
@@ -4918,13 +4919,13 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
 static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
 			      u32 pmc)
 {
-	return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc);
+	return kvm_pmu_check_msr_idx(emul_to_vcpu(ctxt), pmc);
 }
 
 static int emulator_read_pmc(struct x86_emulate_ctxt *ctxt,
 			     u32 pmc, u64 *pdata)
 {
-	return kvm_pmu_read_pmc(emul_to_vcpu(ctxt), pmc, pdata);
+	return kvm_pmu_rdpmc(emul_to_vcpu(ctxt), pmc, pdata);
 }
 
 static void emulator_halt(struct x86_emulate_ctxt *ctxt)
@@ -5782,6 +5783,9 @@ int kvm_arch_init(void *opaque)
 
 	kvm_timer_init();
 
+        /* init pointers of PMU operations */
+	kvm_pmu_arch_init(ops);
+
 	perf_register_guest_info_callbacks(&kvm_guest_cbs);
 
 	if (cpu_has_xsave)
@@ -5812,6 +5816,7 @@ void kvm_arch_exit(void)
 	pvclock_gtod_unregister_notifier(&pvclock_gtod_notifier);
 #endif
 	kvm_x86_ops = NULL;
+	kvm_pmu_arch_exit();
 	kvm_mmu_module_exit();
 	free_percpu(shared_msrs);
 }
@@ -6218,9 +6223,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
 		if (kvm_check_request(KVM_REQ_NMI, vcpu))
 			process_nmi(vcpu);
 		if (kvm_check_request(KVM_REQ_PMU, vcpu))
-			kvm_handle_pmu_event(vcpu);
+			kvm_pmu_handle_event(vcpu);
 		if (kvm_check_request(KVM_REQ_PMI, vcpu))
-			kvm_deliver_pmi(vcpu);
+			kvm_pmu_deliver_pmi(vcpu);
 		if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu))
 			vcpu_scan_ioapic(vcpu);
 		if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu))
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH V2 4/5] KVM: x86/vPMU: Implement vPMU code AMD CPUs
  2015-04-08 16:18 [PATCH V2 0/5] KVM vPMU support for AMD Wei Huang
                   ` (2 preceding siblings ...)
  2015-04-08 16:18 ` [PATCH V2 3/5] KVM: x86/vPMU: Create vPMU interface for VMX and SVM Wei Huang
@ 2015-04-08 16:18 ` Wei Huang
  2015-04-08 16:18 ` [PATCH V2 5/5] KVM: x86/vPMU: Enable PMU handling for AMD PERFCTRn and EVNTSELn MSRs Wei Huang
                   ` (2 subsequent siblings)
  6 siblings, 0 replies; 19+ messages in thread
From: Wei Huang @ 2015-04-08 16:18 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, rkrcmar, gleb, wei

This patch replaces the empty AMD vPMU functions in pmu_amd.c with real
implmenetation. The features of AMD vPMU are now complete.

Signed-off-by: Wei Huang <wei@redhat.com>
---
 arch/x86/kvm/pmu_amd.c | 113 ++++++++++++++++++++++++++++++++++++++++++++++---
 1 file changed, 107 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
index 7d81b30..7ece431 100644
--- a/arch/x86/kvm/pmu_amd.c
+++ b/arch/x86/kvm/pmu_amd.c
@@ -19,65 +19,166 @@
 #include "lapic.h"
 #include "pmu.h"
 
+/* duplicated from amd_perfmon_event_map, K7 and above should work. */
+static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
+	[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
+	[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
+	[2] = { 0x80, 0x00, PERF_COUNT_HW_CACHE_REFERENCES },
+	[3] = { 0x81, 0x00, PERF_COUNT_HW_CACHE_MISSES },
+	[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
+	[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
+	[6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
+	[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
+};
+
 static unsigned amd_find_arch_event(struct kvm_pmu *pmu, 
 				    u8 event_select,
 				    u8 unit_mask)
 {
-	return PERF_COUNT_HW_MAX;
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
+		if (amd_event_mapping[i].eventsel == event_select
+		    && amd_event_mapping[i].unit_mask == unit_mask)
+			break;
+
+	if (i == ARRAY_SIZE(amd_event_mapping))
+		return PERF_COUNT_HW_MAX;
+
+	return amd_event_mapping[i].event_type;
 }
 
+/* Return PERF_COUNT_HW_MAX because AMD doesn't have fixed events. */
 static unsigned amd_find_fixed_event(int fixed_idx)
 {
 	return PERF_COUNT_HW_MAX;
 }
 
+/* Check if a PMC is enabled by comparing it against global_ctrl bits. Because
+ * AMD CPU doesn't have global_ctrl MSR, so all PMCs are enabled (return TRUE).
+ */
 static bool amd_pmc_enabled(struct kvm_pmc *pmc)
 {
-	return false;
+	return true;
 }
 
 static struct kvm_pmc *amd_glb_idx_to_pmc(struct kvm_pmu *pmu, int glb_idx)
 {
-	return NULL;
+	return get_gp_pmc(pmu, MSR_K7_EVNTSEL0 + glb_idx, MSR_K7_EVNTSEL0);
 }
 
 /* Returns 0 if msr_idx's corresponding MSR exists; otherwise returns 1. */
 static int amd_check_msr_idx(struct kvm_vcpu *vcpu, unsigned msr_idx)
 {
-	return 1;
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+
+	msr_idx &= ~(3u << 30);
+
+	return (msr_idx >= pmu->nr_arch_gp_counters);
 }
 
 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, 
 					  unsigned msr_idx)
 {
-	return NULL;
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+	struct kvm_pmc *counters;
+
+	msr_idx &= ~(3u << 30);
+	if (msr_idx >= pmu->nr_arch_gp_counters)
+		return NULL;
+	counters = pmu->gp_counters;
+
+	return &counters[msr_idx];
 }
 
 static bool amd_is_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
 {
-	return false;
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+	int ret = false;
+
+	ret = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0) ||
+		get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0);
+
+	return ret;
 }
 
 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
 {
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+	struct kvm_pmc *pmc;
+
+	if ((pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0))) {
+		*data = pmc_read_counter(pmc);
+		return 0;
+	} else if ((pmc = get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0))) {
+		*data = pmc->eventsel;
+		return 0;
+	}
+
 	return 1;
 }
 
 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 {
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+	struct kvm_pmc *pmc;
+	u32 msr = msr_info->index;
+	u64 data = msr_info->data;
+
+	if ((pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0))) {
+		if (!msr_info->host_initiated)
+			data = (s64)data;
+		pmc->counter += data - pmc_read_counter(pmc);
+		return 0;
+	} else if ((pmc = get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0))) {
+		if (data == pmc->eventsel)
+			return 0;
+		if (!(data & pmu->reserved_bits)) {
+			reprogram_gp_counter(pmc, data);
+			return 0;
+		}
+	}
+
 	return 1;
 }
 
 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
 {
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+
+	pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
+	pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
+	pmu->reserved_bits = 0xffffffff00200000ull;
+	/* Not applicable to AMD; but clean them to prevent any fall out */
+	pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
+	pmu->nr_arch_fixed_counters = 0;
+	pmu->version = 0;
+	pmu->global_status = 0;
 }
 
 static void amd_pmu_init(struct kvm_vcpu *vcpu)
 {
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+	int i;
+
+	for (i = 0; i < AMD64_NUM_COUNTERS ; i++) {
+		pmu->gp_counters[i].type = KVM_PMC_GP;
+		pmu->gp_counters[i].vcpu = vcpu;
+		pmu->gp_counters[i].idx = i;
+	}
 }
 
 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
 {
+	struct kvm_pmu *pmu = VCPU_TO_PMU(vcpu);
+	int i;
+
+	for (i = 0; i < AMD64_NUM_COUNTERS; i++) {
+		struct kvm_pmc *pmc = &pmu->gp_counters[i];
+
+		pmc_stop_counter(pmc);
+		pmc->counter = pmc->eventsel = 0;
+	}
 }
 
 struct kvm_pmu_ops amd_pmu_ops = {
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* [PATCH V2 5/5] KVM: x86/vPMU: Enable PMU handling for AMD PERFCTRn and EVNTSELn MSRs
  2015-04-08 16:18 [PATCH V2 0/5] KVM vPMU support for AMD Wei Huang
                   ` (3 preceding siblings ...)
  2015-04-08 16:18 ` [PATCH V2 4/5] KVM: x86/vPMU: Implement vPMU code AMD CPUs Wei Huang
@ 2015-04-08 16:18 ` Wei Huang
  2015-04-08 16:22 ` [PATCH V2 0/5] KVM vPMU support for AMD Wei Huang
  2015-04-09 19:05 ` Radim Krčmář
  6 siblings, 0 replies; 19+ messages in thread
From: Wei Huang @ 2015-04-08 16:18 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, rkrcmar, gleb, wei

This patch enables PMU handling (R/W) for AMD performance counters, which
include PERFCTR[0..3] and EVNTSEL[0..3].

Signed-off-by: Wei Huang <wei@redhat.com>
---
 arch/x86/kvm/x86.c | 34 ++++++++++++++++++----------------
 1 file changed, 18 insertions(+), 16 deletions(-)

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4d9e7de..2e0b81b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2257,24 +2257,22 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 	 * which we perfectly emulate ;-). Any other value should be at least
 	 * reported, some guests depend on them.
 	 */
+	case MSR_K7_PERFCTR0:
+	case MSR_K7_PERFCTR1:
+	case MSR_K7_PERFCTR2:
+	case MSR_K7_PERFCTR3:
+		pr = true;
 	case MSR_K7_EVNTSEL0:
 	case MSR_K7_EVNTSEL1:
 	case MSR_K7_EVNTSEL2:
 	case MSR_K7_EVNTSEL3:
-		if (data != 0)
-			vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
+		if (kvm_pmu_ops->is_pmu_msr(vcpu, msr))
+			return kvm_pmu_set_msr(vcpu, msr_info);
+
+		if (pr || data != 0)
+			vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
 				    "0x%x data 0x%llx\n", msr, data);
 		break;
-	/* at least RHEL 4 unconditionally writes to the perfctr registers,
-	 * so we ignore writes to make it happy.
-	 */
-	case MSR_K7_PERFCTR0:
-	case MSR_K7_PERFCTR1:
-	case MSR_K7_PERFCTR2:
-	case MSR_K7_PERFCTR3:
-		vcpu_unimpl(vcpu, "unimplemented perfctr wrmsr: "
-			    "0x%x data 0x%llx\n", msr, data);
-		break;
 	case MSR_P6_PERFCTR0:
 	case MSR_P6_PERFCTR1:
 		pr = true;
@@ -2502,6 +2500,12 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 	case MSR_K8_SYSCFG:
 	case MSR_K7_HWCR:
 	case MSR_VM_HSAVE_PA:
+	case MSR_K8_INT_PENDING_MSG:
+	case MSR_AMD64_NB_CFG:
+	case MSR_FAM10H_MMIO_CONF_BASE:
+	case MSR_AMD64_BU_CFG2:
+		data = 0;
+		break;
 	case MSR_K7_EVNTSEL0:
 	case MSR_K7_EVNTSEL1:
 	case MSR_K7_EVNTSEL2:
@@ -2510,10 +2514,8 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 	case MSR_K7_PERFCTR1:
 	case MSR_K7_PERFCTR2:
 	case MSR_K7_PERFCTR3:
-	case MSR_K8_INT_PENDING_MSG:
-	case MSR_AMD64_NB_CFG:
-	case MSR_FAM10H_MMIO_CONF_BASE:
-	case MSR_AMD64_BU_CFG2:
+		if (kvm_pmu_ops->is_pmu_msr(vcpu, msr))
+			return kvm_pmu_get_msr(vcpu, msr, pdata);
 		data = 0;
 		break;
 	case MSR_P6_PERFCTR0:
-- 
1.8.3.1


^ permalink raw reply related	[flat|nested] 19+ messages in thread

* Re: [PATCH V2 0/5] KVM vPMU support for AMD
  2015-04-08 16:18 [PATCH V2 0/5] KVM vPMU support for AMD Wei Huang
                   ` (4 preceding siblings ...)
  2015-04-08 16:18 ` [PATCH V2 5/5] KVM: x86/vPMU: Enable PMU handling for AMD PERFCTRn and EVNTSELn MSRs Wei Huang
@ 2015-04-08 16:22 ` Wei Huang
  2015-04-09 19:05 ` Radim Krčmář
  6 siblings, 0 replies; 19+ messages in thread
From: Wei Huang @ 2015-04-08 16:22 UTC (permalink / raw)
  To: kvm; +Cc: pbonzini, rkrcmar, gleb

Hi Radim: This version should address most of your comments to V1.
Please help to review it.

Hi Paolo: This version has been polished since V1. Could you please
review it?

Thanks,
-Wei

On 04/08/2015 11:18 AM, Wei Huang wrote:
> Currently KVM only supports vPMU for Intel platforms. This patch set 
> enable vPMU support for AMD platform by creating a common PMU
> interface for x86. The PMU calls from guest VMs are dispatched
> to corresponding functions defined in arch specific files.
> 
> V2:
>   * Create a generic pmu.c file which is shared by Intel and AMD CPUs;
>   * pmu.c code becomes part of kvm.ko module. Similarly pmu_intel.c and
>     pmu_amd.c are linked to kvm-intel.ko and kvm-amd.ko respectively;
>   * Re-define kvm_pmu_ops function pointers. Per Radim Krcmar's comments,
>     a large portion of Intel vPMU code are now consolidated and moved to
>     pmu.c;
>   * Polish pmu_amd.c code to comply with new definition of kvm_pmu_ops;
> 
> V1:
>   * Adopt the file layout suggested by Radim Krcmar
>   * Link arch module with its specific PMU file
> 
> RFC:
>   * Initial version for RFC
> 
> Wei Huang (5):
>   KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch
>   KVM: x86/vPMU: Rename pmu.c file to pmu_intel.c
>   KVM: x86/vPMU: Create vPMU interface for VMX and SVM
>   KVM: x86/vPMU: Implement vPMU code AMD CPUs
>   KVM: x86/vPMU: Enable PMU handling for AMD PERFCTRn and EVNTSELn MSRs
> 
>  arch/x86/include/asm/kvm_host.h |  38 ++-
>  arch/x86/kvm/Makefile           |   4 +-
>  arch/x86/kvm/cpuid.c            |   3 +-
>  arch/x86/kvm/pmu.c              | 563 +++++++++++-----------------------------
>  arch/x86/kvm/pmu.h              |  98 +++++++
>  arch/x86/kvm/pmu_amd.c          | 198 ++++++++++++++
>  arch/x86/kvm/pmu_intel.c        | 360 +++++++++++++++++++++++++
>  arch/x86/kvm/svm.c              |   8 +
>  arch/x86/kvm/vmx.c              |   8 +
>  arch/x86/kvm/x86.c              |  57 ++--
>  10 files changed, 896 insertions(+), 441 deletions(-)
>  create mode 100644 arch/x86/kvm/pmu.h
>  create mode 100644 arch/x86/kvm/pmu_amd.c
>  create mode 100644 arch/x86/kvm/pmu_intel.c
> 

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH V2 0/5] KVM vPMU support for AMD
  2015-04-08 16:18 [PATCH V2 0/5] KVM vPMU support for AMD Wei Huang
                   ` (5 preceding siblings ...)
  2015-04-08 16:22 ` [PATCH V2 0/5] KVM vPMU support for AMD Wei Huang
@ 2015-04-09 19:05 ` Radim Krčmář
  2015-04-09 19:19   ` Wei Huang
  6 siblings, 1 reply; 19+ messages in thread
From: Radim Krčmář @ 2015-04-09 19:05 UTC (permalink / raw)
  To: Wei Huang; +Cc: kvm, pbonzini, gleb

2015-04-08 12:18-0400, Wei Huang:
> Currently KVM only supports vPMU for Intel platforms. This patch set 
> enable vPMU support for AMD platform by creating a common PMU
> interface for x86. The PMU calls from guest VMs are dispatched
> to corresponding functions defined in arch specific files.
> 
> V2:
>   * Create a generic pmu.c file which is shared by Intel and AMD CPUs;
>   * pmu.c code becomes part of kvm.ko module. Similarly pmu_intel.c and
>     pmu_amd.c are linked to kvm-intel.ko and kvm-amd.ko respectively;
>   * Re-define kvm_pmu_ops function pointers. Per Radim Krcmar's comments,
>     a large portion of Intel vPMU code are now consolidated and moved to
>     pmu.c;
>   * Polish pmu_amd.c code to comply with new definition of kvm_pmu_ops;

It doesn't apply on top of current kvm queue, I'll have cosmetic change
requests that would go well with a rebase.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH V2 2/5] KVM: x86/vPMU: Rename pmu.c file to pmu_intel.c
  2015-04-08 16:18 ` [PATCH V2 2/5] KVM: x86/vPMU: Rename pmu.c file to pmu_intel.c Wei Huang
@ 2015-04-09 19:10   ` Radim Krčmář
  2015-04-09 19:23     ` Wei Huang
  0 siblings, 1 reply; 19+ messages in thread
From: Radim Krčmář @ 2015-04-09 19:10 UTC (permalink / raw)
  To: Wei Huang; +Cc: kvm, pbonzini, gleb

2015-04-08 12:18-0400, Wei Huang:
> As a preparation to unify vPMU implementation, this patch renames pmu.c
> to pmu_intel.c. Makefile is changed accordingly.
> 
> Signed-off-by: Wei Huang <wei@redhat.com>
> ---

(Since v2 does move quite a bit of it back to pmu.c, we can also decide
 to keep git history in pmu.c, instead of pmu_intel.c ... I suppose
 there isn't huge advantage in doing it differently, though.)

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH V2 0/5] KVM vPMU support for AMD
  2015-04-09 19:05 ` Radim Krčmář
@ 2015-04-09 19:19   ` Wei Huang
  0 siblings, 0 replies; 19+ messages in thread
From: Wei Huang @ 2015-04-09 19:19 UTC (permalink / raw)
  To: Radim Krčmář; +Cc: kvm, pbonzini, gleb

On 4/9/15 14:05, Radim Krčmář wrote:
> 2015-04-08 12:18-0400, Wei Huang:
>> Currently KVM only supports vPMU for Intel platforms. This patch set
>> enable vPMU support for AMD platform by creating a common PMU
>> interface for x86. The PMU calls from guest VMs are dispatched
>> to corresponding functions defined in arch specific files.
>>
>> V2:
>>    * Create a generic pmu.c file which is shared by Intel and AMD CPUs;
>>    * pmu.c code becomes part of kvm.ko module. Similarly pmu_intel.c and
>>      pmu_amd.c are linked to kvm-intel.ko and kvm-amd.ko respectively;
>>    * Re-define kvm_pmu_ops function pointers. Per Radim Krcmar's comments,
>>      a large portion of Intel vPMU code are now consolidated and moved to
>>      pmu.c;
>>    * Polish pmu_amd.c code to comply with new definition of kvm_pmu_ops;
>
> It doesn't apply on top of current kvm queue, I'll have cosmetic change
> requests that would go well with a rebase.
Sorry about it. It was applied on top of Linux 4.0-rc6 (e42391cd04880).

-Wei
>

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH V2 2/5] KVM: x86/vPMU: Rename pmu.c file to pmu_intel.c
  2015-04-09 19:10   ` Radim Krčmář
@ 2015-04-09 19:23     ` Wei Huang
  0 siblings, 0 replies; 19+ messages in thread
From: Wei Huang @ 2015-04-09 19:23 UTC (permalink / raw)
  To: Radim Krčmář; +Cc: kvm, pbonzini, gleb

On 4/9/15 14:10, Radim Krčmář wrote:
> 2015-04-08 12:18-0400, Wei Huang:
>> As a preparation to unify vPMU implementation, this patch renames pmu.c
>> to pmu_intel.c. Makefile is changed accordingly.
>>
>> Signed-off-by: Wei Huang <wei@redhat.com>
>> ---
>
> (Since v2 does move quite a bit of it back to pmu.c, we can also decide
>   to keep git history in pmu.c, instead of pmu_intel.c ... I suppose
>   there isn't huge advantage in doing it differently, though.)
That should be easy to fix; I will do it in the next spin.

-Wei
>

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH V2 3/5] KVM: x86/vPMU: Create vPMU interface for VMX and SVM
  2015-04-08 16:18 ` [PATCH V2 3/5] KVM: x86/vPMU: Create vPMU interface for VMX and SVM Wei Huang
@ 2015-04-09 19:43   ` Radim Krčmář
  2015-04-09 20:03     ` Wei Huang
  2015-04-20 18:33     ` Wei Huang
  2015-04-10 12:57   ` Radim Krčmář
  1 sibling, 2 replies; 19+ messages in thread
From: Radim Krčmář @ 2015-04-09 19:43 UTC (permalink / raw)
  To: Wei Huang; +Cc: kvm, pbonzini, gleb

2015-04-08 12:18-0400, Wei Huang:
> This patch converts existing Intel specific vPMU code into a common vPMU
> interface with the following steps:
> 
> - A large portion of Intel vPMU code is now moved to pmu.c file. The rest
>   is re-arranged and hooked up with the newly-defined intel_pmu_ops.
> - Create a corresponding pmu_amd.c file with empty functions for AMD SVM
> - The PMU function pointer, kvm_pmu_ops, is initialized by calling
>   kvm_x86_ops->get_pmu_ops().
> - To reduce the code size, Intel and AMD modules are now generated
>   from their corrponding arch and PMU files; In the meanwhile, due
>   to this arrangement several functions are exposed as public ones
>   to allow calling from PMU code.
> 
> Signed-off-by: Wei Huang <wei@redhat.com>
> ---
> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
| [...]
> +static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, 
> +				  unsigned config, bool exclude_user, 
> +				  bool exclude_kernel, bool intr, 

This patch introduces a lot of trailing whitespaces, please remove them.
(`git am` says 15.)

| [...]
> +EXPORT_SYMBOL_GPL(reprogram_counter);
> +

(double line)

> +
> +void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
| [...]
> +/* check if msr_idx is a valid index to access PMU */
> +inline int kvm_pmu_check_msr_idx(struct kvm_vcpu *vcpu, unsigned msr_idx)

If we really want it inline, it's better done in header.
(I think GCC would inline this in-module anyway, but other modules still
 have to call it.)

> +{
> +	return kvm_pmu_ops->check_msr_idx(vcpu, msr_idx);
> +}
> +
| [...]
> +bool kvm_pmu_is_msr(struct kvm_vcpu *vcpu, u32 msr)

(Might make sense to inline these trivial wrappers.)

> +{
> +	return kvm_pmu_ops->is_pmu_msr(vcpu, msr);
> +}
> +
> +int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
> +{
> +	return kvm_pmu_ops->get_msr(vcpu, msr, data);
> +}
> +
> +int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> +{
> +	return kvm_pmu_ops->set_msr(vcpu, msr_info);
> +	

(Technically also a trailing newline.)

> +}
| [...]
> +/* Called before using any PMU functions above */
> +int kvm_pmu_arch_init(struct kvm_x86_ops *x86_ops)
> +{
> +	kvm_pmu_ops = x86_ops->get_pmu_ops();

I guess you forgot this line ^^.

> +
> +	if (x86_ops && (kvm_pmu_ops = x86_ops->get_pmu_ops()) != NULL)
> +		return 0;
> +	else
> +		return -EINVAL;
> +}
> diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
> @@ -0,0 +1,98 @@
> +#ifndef __KVM_X86_PMU_H
> +#define __KVM_X86_PMU_H
> +
> +#define VCPU_TO_PMU(vcpu) (&(vcpu)->arch.pmu)
> +#define PMU_TO_VCPU(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
> +#define PMC_TO_PMU(pmc)   (&(pmc)->vcpu->arch.pmu)

(We are replacing inline functions with these macros and they wouldn't
 have been in caps, so I would use lower case; looks better too, IMO.)

> +/* retrieve the control fields of IA32_FIXED_CTR_CTRL */
> +#define fixed_ctr_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
| [...]
> diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
> @@ -328,20 +156,21 @@ bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
>  		ret = pmu->version > 1;
>  		break;
>  	default:
> -		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
> -			|| get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
> -			|| get_fixed_pmc(pmu, msr);
> +		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
> +			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
> +			get_fixed_pmc(pmu, msr);
>  		break;
>  	}
> +
>  	return ret;
>  }

(This hunk and the following hunks where you mostly change index -> msr
 could have been in a separate cleanup patch.)

> -int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
> +static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
| [...]
> -int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> +static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
| [...]
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 32bf19e..4d9e7de 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -28,6 +28,7 @@
>  #include "x86.h"
>  #include "cpuid.h"
>  #include "assigned-dev.h"
> +#include "pmu.h"
>  
>  #include <linux/clocksource.h>
>  #include <linux/interrupt.h>
> @@ -899,7 +900,7 @@ bool kvm_rdpmc(struct kvm_vcpu *vcpu)
>  	u64 data;
>  	int err;
>  
> -	err = kvm_pmu_read_pmc(vcpu, ecx, &data);
> +	err = kvm_pmu_rdpmc(vcpu, ecx, &data);

(What was the reason behind the new name?)

>  	if (err)
>  		return err;
>  	kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
> @@ -2279,7 +2280,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>  		pr = true;
>  	case MSR_P6_EVNTSEL0:
>  	case MSR_P6_EVNTSEL1:
> -		if (kvm_pmu_msr(vcpu, msr))
> +		if (kvm_pmu_is_msr(vcpu, msr))

(I liked kvm_pmu_msr better.  The 'is' is in a wrong spot ... we know it
 is "a MSR", we want to know if it is "PMU MSR".)

> @@ -4918,13 +4919,13 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
>  static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
>  			      u32 pmc)
>  {
> -	return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc);
> +	return kvm_pmu_check_msr_idx(emul_to_vcpu(ctxt), pmc);

(Why not pmc?)

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH V2 3/5] KVM: x86/vPMU: Create vPMU interface for VMX and SVM
  2015-04-09 19:43   ` Radim Krčmář
@ 2015-04-09 20:03     ` Wei Huang
  2015-04-09 20:54       ` Radim Krčmář
  2015-04-20 18:33     ` Wei Huang
  1 sibling, 1 reply; 19+ messages in thread
From: Wei Huang @ 2015-04-09 20:03 UTC (permalink / raw)
  To: Radim Krčmář; +Cc: kvm, pbonzini, gleb



On 4/9/15 14:43, Radim Krčmář wrote:
> 2015-04-08 12:18-0400, Wei Huang:
>> This patch converts existing Intel specific vPMU code into a common vPMU
>> interface with the following steps:
>>
>> - A large portion of Intel vPMU code is now moved to pmu.c file. The rest
>>    is re-arranged and hooked up with the newly-defined intel_pmu_ops.
>> - Create a corresponding pmu_amd.c file with empty functions for AMD SVM
>> - The PMU function pointer, kvm_pmu_ops, is initialized by calling
>>    kvm_x86_ops->get_pmu_ops().
>> - To reduce the code size, Intel and AMD modules are now generated
>>    from their corrponding arch and PMU files; In the meanwhile, due
>>    to this arrangement several functions are exposed as public ones
>>    to allow calling from PMU code.
>>
>> Signed-off-by: Wei Huang <wei@redhat.com>
>> ---
>> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
> | [...]
>> +static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
>> +				  unsigned config, bool exclude_user,
>> +				  bool exclude_kernel, bool intr,
>
> This patch introduces a lot of trailing whitespaces, please remove them.
> (`git am` says 15.)
Some of them were carried from original pmu.c file. I will purge them in V3.

>
> | [...]
>> +EXPORT_SYMBOL_GPL(reprogram_counter);
>> +
>
> (double line)
>
OK

>> +
>> +void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
> | [...]
>> +/* check if msr_idx is a valid index to access PMU */
>> +inline int kvm_pmu_check_msr_idx(struct kvm_vcpu *vcpu, unsigned msr_idx)
>
> If we really want it inline, it's better done in header.
> (I think GCC would inline this in-module anyway, but other modules still
>   have to call it.)
OK

>
>> +{
>> +	return kvm_pmu_ops->check_msr_idx(vcpu, msr_idx);
>> +}
>> +
> | [...]
>> +bool kvm_pmu_is_msr(struct kvm_vcpu *vcpu, u32 msr)
>
> (Might make sense to inline these trivial wrappers.)
>
OK, I will move them to pmu.h header as inline.

>> +{
>> +	return kvm_pmu_ops->is_pmu_msr(vcpu, msr);
>> +}
>> +
>> +int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
>> +{
>> +	return kvm_pmu_ops->get_msr(vcpu, msr, data);
>> +}
>> +
>> +int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>> +{
>> +	return kvm_pmu_ops->set_msr(vcpu, msr_info);
>> +	
>
> (Technically also a trailing newline.)
OK

>
>> +}
> | [...]
>> +/* Called before using any PMU functions above */
>> +int kvm_pmu_arch_init(struct kvm_x86_ops *x86_ops)
>> +{
>> +	kvm_pmu_ops = x86_ops->get_pmu_ops();
>
> I guess you forgot this line ^^.
>
Ahh, will fix it.

>> +
>> +	if (x86_ops && (kvm_pmu_ops = x86_ops->get_pmu_ops()) != NULL)
>> +		return 0;
>> +	else
>> +		return -EINVAL;
>> +}
>> diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
>> @@ -0,0 +1,98 @@
>> +#ifndef __KVM_X86_PMU_H
>> +#define __KVM_X86_PMU_H
>> +
>> +#define VCPU_TO_PMU(vcpu) (&(vcpu)->arch.pmu)
>> +#define PMU_TO_VCPU(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
>> +#define PMC_TO_PMU(pmc)   (&(pmc)->vcpu->arch.pmu)
>
> (We are replacing inline functions with these macros and they wouldn't
>   have been in caps, so I would use lower case; looks better too, IMO.)
>
OK, will make it to lower case.

>> +/* retrieve the control fields of IA32_FIXED_CTR_CTRL */
>> +#define fixed_ctr_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
> | [...]
>> diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
>> @@ -328,20 +156,21 @@ bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
>>   		ret = pmu->version > 1;
>>   		break;
>>   	default:
>> -		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
>> -			|| get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
>> -			|| get_fixed_pmc(pmu, msr);
>> +		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
>> +			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
>> +			get_fixed_pmc(pmu, msr);
>>   		break;
>>   	}
>> +
>>   	return ret;
>>   }
>
> (This hunk and the following hunks where you mostly change index -> msr
>   could have been in a separate cleanup patch.)
>
OK, agreed that such changes can be separated. I will extract them to 
make your life easier.

>> -int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
>> +static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
> | [...]
>> -int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>> +static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
> | [...]
>> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
>> index 32bf19e..4d9e7de 100644
>> --- a/arch/x86/kvm/x86.c
>> +++ b/arch/x86/kvm/x86.c
>> @@ -28,6 +28,7 @@
>>   #include "x86.h"
>>   #include "cpuid.h"
>>   #include "assigned-dev.h"
>> +#include "pmu.h"
>>
>>   #include <linux/clocksource.h>
>>   #include <linux/interrupt.h>
>> @@ -899,7 +900,7 @@ bool kvm_rdpmc(struct kvm_vcpu *vcpu)
>>   	u64 data;
>>   	int err;
>>
>> -	err = kvm_pmu_read_pmc(vcpu, ecx, &data);
>> +	err = kvm_pmu_rdpmc(vcpu, ecx, &data);
>
> (What was the reason behind the new name?)
Original kvm_pmu_read_pmc() was only used for RDPMC instruction. Such 
change make it easier to be correlated with RDPMC directly; plus pmc is 
a confusing term in vPMU code (see "Design Note" in pmu.c file), which 
makes me to think kvm_pmu_read_pmc() should read from "struct kvm_pmc" 
directly.

>
>>   	if (err)
>>   		return err;
>>   	kvm_register_write(vcpu, VCPU_REGS_RAX, (u32)data);
>> @@ -2279,7 +2280,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>>   		pr = true;
>>   	case MSR_P6_EVNTSEL0:
>>   	case MSR_P6_EVNTSEL1:
>> -		if (kvm_pmu_msr(vcpu, msr))
>> +		if (kvm_pmu_is_msr(vcpu, msr))
>
> (I liked kvm_pmu_msr better.  The 'is' is in a wrong spot ... we know it
>   is "a MSR", we want to know if it is "PMU MSR".)
>
OK

>> @@ -4918,13 +4919,13 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
>>   static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
>>   			      u32 pmc)
>>   {
>> -	return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc);
>> +	return kvm_pmu_check_msr_idx(emul_to_vcpu(ctxt), pmc);
>
> (Why not pmc?)
See "Design Note" in pmu.c for a better explanation. I tried to use msr 
as real x86 MSR; and msr_idx refers to MSR offset.

>

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH V2 3/5] KVM: x86/vPMU: Create vPMU interface for VMX and SVM
  2015-04-09 20:03     ` Wei Huang
@ 2015-04-09 20:54       ` Radim Krčmář
  2015-04-09 21:08         ` Wei Huang
  0 siblings, 1 reply; 19+ messages in thread
From: Radim Krčmář @ 2015-04-09 20:54 UTC (permalink / raw)
  To: Wei Huang; +Cc: kvm, pbonzini, gleb

2015-04-09 15:03-0500, Wei Huang:
> On 4/9/15 14:43, Radim Krčmář wrote:
>> This patch introduces a lot of trailing whitespaces, please remove them.
>> (`git am` says 15.)
> Some of them were carried from original pmu.c file. I will purge them in V3.

(Ah, I didn't see them in kvm_intel.c and didn't check further.)

>>>-	err = kvm_pmu_read_pmc(vcpu, ecx, &data);
>>>+	err = kvm_pmu_rdpmc(vcpu, ecx, &data);
>>
>>(What was the reason behind the new name?)
> Original kvm_pmu_read_pmc() was only used for RDPMC instruction. Such change
> make it easier to be correlated with RDPMC directly; plus pmc is a confusing
> term in vPMU code (see "Design Note" in pmu.c file), which makes me to think
> kvm_pmu_read_pmc() should read from "struct kvm_pmc" directly.

Makes sense,

Thanks.

>>>@@ -4918,13 +4919,13 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
>>>  static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
>>>  			      u32 pmc)
>>>  {
>>>-	return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc);
>>>+	return kvm_pmu_check_msr_idx(emul_to_vcpu(ctxt), pmc);
>>
>>(Why not pmc?)
> See "Design Note" in pmu.c for a better explanation. I tried to use msr as
> real x86 MSR; and msr_idx refers to MSR offset.

I skipped the comment as I thought it was there before, sorry ...

I wouldn't call it MSR index, MSR is just a related interface for PMC,
and MSR indices don't even have simple mapping to RDPMC ones.
We are indexing PMC without MSR, so index/pmc_idx/pmc seems better.

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH V2 3/5] KVM: x86/vPMU: Create vPMU interface for VMX and SVM
  2015-04-09 20:54       ` Radim Krčmář
@ 2015-04-09 21:08         ` Wei Huang
  2015-04-10 12:53           ` Radim Krčmář
  0 siblings, 1 reply; 19+ messages in thread
From: Wei Huang @ 2015-04-09 21:08 UTC (permalink / raw)
  To: Radim Krčmář; +Cc: kvm, pbonzini, gleb

<snip>
>>>> @@ -4918,13 +4919,13 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
>>>>  static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
>>>>  			      u32 pmc)
>>>>  {
>>>> -	return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc);
>>>> +	return kvm_pmu_check_msr_idx(emul_to_vcpu(ctxt), pmc);
>>>
>>> (Why not pmc?)
>> See "Design Note" in pmu.c for a better explanation. I tried to use msr as
>> real x86 MSR; and msr_idx refers to MSR offset.
> 
> I skipped the comment as I thought it was there before, sorry ...
> 
> I wouldn't call it MSR index, MSR is just a related interface for PMC,
> and MSR indices don't even have simple mapping to RDPMC ones.
> We are indexing PMC without MSR, so index/pmc_idx/pmc seems better.
> 
I can fix the name of this function (maybe back to kvm_pmu_check_pmc(),
let me think about it). In the meanwhile, do you have any comments on
the following names? They will impact the rest code:

* msr: MSR for x86
* msr_idx: offset of MSR registers (used by rdpmc)
* glb_idx: a unified index for both GP and fixed counters (should we
rename it to idx instead?)

Are they confusing to you? Maybe I should move "Design Note" to commit
message instead of real code?

Thanks,
-Wei

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH V2 3/5] KVM: x86/vPMU: Create vPMU interface for VMX and SVM
  2015-04-09 21:08         ` Wei Huang
@ 2015-04-10 12:53           ` Radim Krčmář
  0 siblings, 0 replies; 19+ messages in thread
From: Radim Krčmář @ 2015-04-10 12:53 UTC (permalink / raw)
  To: Wei Huang; +Cc: kvm, pbonzini, gleb

2015-04-09 16:08-0500, Wei Huang:
> <snip>
> >>>> @@ -4918,13 +4919,13 @@ static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
> >>>>  static int emulator_check_pmc(struct x86_emulate_ctxt *ctxt,
> >>>>  			      u32 pmc)
> >>>>  {
> >>>> -	return kvm_pmu_check_pmc(emul_to_vcpu(ctxt), pmc);
> >>>> +	return kvm_pmu_check_msr_idx(emul_to_vcpu(ctxt), pmc);
> >>>
> >>> (Why not pmc?)
> >> See "Design Note" in pmu.c for a better explanation. I tried to use msr as
> >> real x86 MSR; and msr_idx refers to MSR offset.
> > 
> > I skipped the comment as I thought it was there before, sorry ...
> > 
> > I wouldn't call it MSR index, MSR is just a related interface for PMC,
> > and MSR indices don't even have simple mapping to RDPMC ones.
> > We are indexing PMC without MSR, so index/pmc_idx/pmc seems better.
> > 
> I can fix the name of this function (maybe back to kvm_pmu_check_pmc(),
> let me think about it). In the meanwhile, do you have any comments on
> the following names? They will impact the rest code:
> 
> * msr: MSR for x86

Ok, it's the MSR identifier.

> * msr_idx: offset of MSR registers (used by rdpmc)

Same argument as for the function name.

I haven't been thinking much about glb_idx before, so msr_idx could be
named rdpmc_idx;  well, that name also has problems (too to close to
rdpmc function) and I'm fine with anything that doesn't tie it to MSR.

> * glb_idx: a unified index for both GP and fixed counters (should we
> rename it to idx instead?)

This one is the "real" pmc index, which we then have in pmc->idx, so it
could be pmc_idx ... glb_idx is good, we have to differentiate it from
the index in used in rdpmc as well.

> Are they confusing to you?

Yes, but I'm confused most of the time :)

>                            Maybe I should move "Design Note" to commit
> message instead of real code?

The comment couldn't get outdated that way.
(I'm ambivalent, there might be a benefit in having code comments.)

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH V2 3/5] KVM: x86/vPMU: Create vPMU interface for VMX and SVM
  2015-04-08 16:18 ` [PATCH V2 3/5] KVM: x86/vPMU: Create vPMU interface for VMX and SVM Wei Huang
  2015-04-09 19:43   ` Radim Krčmář
@ 2015-04-10 12:57   ` Radim Krčmář
  1 sibling, 0 replies; 19+ messages in thread
From: Radim Krčmář @ 2015-04-10 12:57 UTC (permalink / raw)
  To: Wei Huang; +Cc: kvm, pbonzini, gleb

2015-04-08 12:18-0400, Wei Huang:
> +/* Called before using any PMU functions above */
> +int kvm_pmu_arch_init(struct kvm_x86_ops *x86_ops)
> +{
> +	kvm_pmu_ops = x86_ops->get_pmu_ops();
> +
> +	if (x86_ops && (kvm_pmu_ops = x86_ops->get_pmu_ops()) != NULL)
> +		return 0;
> +	else
> +		return -EINVAL;
> +}
> @@ -5782,6 +5783,9 @@ int kvm_arch_init(void *opaque)
>  
>  	kvm_timer_init();
>  
> +        /* init pointers of PMU operations */
> +	kvm_pmu_arch_init(ops);

I missed this yesterday ... the return value isn't used, so we could
check and return here, or save all the complexity in kvm_pmu_arch_init()
when we are going to BUG anyway.

(Also, the comment misalignment is caused by indentation using spaces.)

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH V2 3/5] KVM: x86/vPMU: Create vPMU interface for VMX and SVM
  2015-04-09 19:43   ` Radim Krčmář
  2015-04-09 20:03     ` Wei Huang
@ 2015-04-20 18:33     ` Wei Huang
  2015-04-21  9:33       ` Radim Krčmář
  1 sibling, 1 reply; 19+ messages in thread
From: Wei Huang @ 2015-04-20 18:33 UTC (permalink / raw)
  To: Radim Krčmář; +Cc: kvm, Paolo Bonzini, gleb

<snip>
>> +/* check if msr_idx is a valid index to access PMU */
>> +inline int kvm_pmu_check_msr_idx(struct kvm_vcpu *vcpu, unsigned msr_idx)
>
> If we really want it inline, it's better done in header.
> (I think GCC would inline this in-module anyway, but other modules still
>   have to call it.)
>
>> +{
>> +	return kvm_pmu_ops->check_msr_idx(vcpu, msr_idx);
>> +}
>> +
> | [...]
>> +bool kvm_pmu_is_msr(struct kvm_vcpu *vcpu, u32 msr)
>
> (Might make sense to inline these trivial wrappers.)
Hi Radim,

I forgot to mention that I didn't create inline for these functions in 
V3. For an inline to work on across source files, I have to explicitly 
use "extern"; so I decided not to touch it in V3 yet. If you insist, I 
will change it.

Another solution is to replace the functions with 
kvm_opmu_ops->blah_blah(). But this looks less appealing to me.

Thanks,
-Wei

>
>> +{
>> +	return kvm_pmu_ops->is_pmu_msr(vcpu, msr);
>> +}
>> +
>> +int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
>> +{
>> +	return kvm_pmu_ops->get_msr(vcpu, msr, data);
>> +}
>> +
>> +int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
>> +{
>> +	return kvm_pmu_ops->set_msr(vcpu, msr_info);
>> +	
>
<snip>

^ permalink raw reply	[flat|nested] 19+ messages in thread

* Re: [PATCH V2 3/5] KVM: x86/vPMU: Create vPMU interface for VMX and SVM
  2015-04-20 18:33     ` Wei Huang
@ 2015-04-21  9:33       ` Radim Krčmář
  0 siblings, 0 replies; 19+ messages in thread
From: Radim Krčmář @ 2015-04-21  9:33 UTC (permalink / raw)
  To: Wei Huang; +Cc: kvm, Paolo Bonzini, gleb

2015-04-20 13:33-0500, Wei Huang:
> <snip>
> >>+/* check if msr_idx is a valid index to access PMU */
> >>+inline int kvm_pmu_check_msr_idx(struct kvm_vcpu *vcpu, unsigned msr_idx)
> >
> >If we really want it inline, it's better done in header.
> >(I think GCC would inline this in-module anyway, but other modules still
> >  have to call it.)
> >
> >>+{
> >>+	return kvm_pmu_ops->check_msr_idx(vcpu, msr_idx);
> >>+}
> >>+
> >| [...]
> >>+bool kvm_pmu_is_msr(struct kvm_vcpu *vcpu, u32 msr)
> >
> >(Might make sense to inline these trivial wrappers.)
> Hi Radim,
> 
> I forgot to mention that I didn't create inline for these functions in V3.
> For an inline to work on across source files, I have to explicitly use
> "extern"; so I decided not to touch it in V3 yet.

(No need for the "extern inline" magic -- you can simply define them as
 "static inline" in pmu.h.)

>                                                   If you insist, I will
> change it.

I don't, it has minimal impact.

> Another solution is to replace the functions with kvm_opmu_ops->blah_blah().
> But this looks less appealing to me.

Agreed, wrappers were a good idea.

Will review today, thanks for the explanation.

^ permalink raw reply	[flat|nested] 19+ messages in thread

end of thread, other threads:[~2015-04-21  9:33 UTC | newest]

Thread overview: 19+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2015-04-08 16:18 [PATCH V2 0/5] KVM vPMU support for AMD Wei Huang
2015-04-08 16:18 ` [PATCH V2 1/5] KVM: x86/vPMU: Define kvm_pmu_ops to support vPMU function dispatch Wei Huang
2015-04-08 16:18 ` [PATCH V2 2/5] KVM: x86/vPMU: Rename pmu.c file to pmu_intel.c Wei Huang
2015-04-09 19:10   ` Radim Krčmář
2015-04-09 19:23     ` Wei Huang
2015-04-08 16:18 ` [PATCH V2 3/5] KVM: x86/vPMU: Create vPMU interface for VMX and SVM Wei Huang
2015-04-09 19:43   ` Radim Krčmář
2015-04-09 20:03     ` Wei Huang
2015-04-09 20:54       ` Radim Krčmář
2015-04-09 21:08         ` Wei Huang
2015-04-10 12:53           ` Radim Krčmář
2015-04-20 18:33     ` Wei Huang
2015-04-21  9:33       ` Radim Krčmář
2015-04-10 12:57   ` Radim Krčmář
2015-04-08 16:18 ` [PATCH V2 4/5] KVM: x86/vPMU: Implement vPMU code AMD CPUs Wei Huang
2015-04-08 16:18 ` [PATCH V2 5/5] KVM: x86/vPMU: Enable PMU handling for AMD PERFCTRn and EVNTSELn MSRs Wei Huang
2015-04-08 16:22 ` [PATCH V2 0/5] KVM vPMU support for AMD Wei Huang
2015-04-09 19:05 ` Radim Krčmář
2015-04-09 19:19   ` Wei Huang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).