All of lore.kernel.org
 help / color / mirror / Atom feed
From: Jan Dakinevich <jan.dakinevich@virtuozzo.com>
To: linux-kernel@vger.kernel.org
Cc: "Jan Dakinevich" <jan.dakinevich@virtuozzo.com>,
	"Denis V . Lunev" <den@virtuozzo.com>,
	"Roman Kagan" <rkagan@virtuozzo.com>,
	"Peter Zijlstra" <peterz@infradead.org>,
	"Ingo Molnar" <mingo@redhat.com>,
	"Arnaldo Carvalho de Melo" <acme@kernel.org>,
	"Alexander Shishkin" <alexander.shishkin@linux.intel.com>,
	"Jiri Olsa" <jolsa@redhat.com>,
	"Namhyung Kim" <namhyung@kernel.org>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"H. Peter Anvin" <hpa@zytor.com>,
	x86@kernel.org, "Paolo Bonzini" <pbonzini@redhat.com>,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Andi Kleen" <ak@linux.intel.com>,
	"Kan Liang" <kan.liang@intel.com>,
	"Stephane Eranian" <eranian@google.com>,
	"Colin King" <colin.king@canonical.com>,
	"Sebastian Andrzej Siewior" <bigeasy@linutronix.de>,
	"Jin Yao" <yao.jin@linux.intel.com>,
	kvm@vger.kernel.org
Subject: [PATCH RFC 2/2] KVM: x86/vPMU: ignore access to LBR-related MSRs
Date: Wed,  6 Dec 2017 14:43:03 +0300	[thread overview]
Message-ID: <1512560585-27263-3-git-send-email-jan.dakinevich@virtuozzo.com> (raw)
In-Reply-To: <1512560585-27263-1-git-send-email-jan.dakinevich@virtuozzo.com>

Windows Server 2016 Essentials (for yet unknown reason) attempts to
access MSR_LBR_TOS and other LBR-related registers at startup.  These
are not currently hadled by KVM so the guest gets #GP and crashes.

To prevent that, identify LBR-related MSRs pertinent to the CPU model
exposed to the guest, and dummy handle them (ignore writes and return
zero on reads).

Signed-off-by: Jan Dakinevich <jan.dakinevich@virtuozzo.com>
---
 arch/x86/include/asm/kvm_host.h |  2 ++
 arch/x86/kvm/pmu_intel.c        | 33 +++++++++++++++++++++++++++++++++
 2 files changed, 35 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 977de5f..04324dd 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -34,6 +34,7 @@
 #include <asm/msr-index.h>
 #include <asm/asm.h>
 #include <asm/kvm_page_track.h>
+#include <asm/perf_event.h>
 
 #define KVM_MAX_VCPUS 288
 #define KVM_SOFT_MAX_VCPUS 240
@@ -416,6 +417,7 @@ struct kvm_pmu {
 	struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
 	struct irq_work irq_work;
 	u64 reprogram_pmi;
+	struct x86_pmu_lbr lbr;
 };
 
 struct kvm_pmu_ops;
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index 5ab4a36..7edf191 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -142,6 +142,24 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
 	return &counters[idx];
 }
 
+static bool intel_is_lbr_msr(struct kvm_vcpu *vcpu, u32 msr)
+{
+	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+	struct x86_pmu_lbr *lbr = &pmu->lbr;
+
+	if (!lbr->nr)
+		return false;
+
+	if (msr == lbr->tos)
+		return true;
+	if (msr >= lbr->from && msr < lbr->from + lbr->nr)
+		return true;
+	if (msr >= lbr->to && msr < lbr->to + lbr->nr)
+		return true;
+
+	return false;
+}
+
 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -155,6 +173,10 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 		ret = pmu->version > 1;
 		break;
 	default:
+		if (intel_is_lbr_msr(vcpu, msr)) {
+			ret = true;
+			break;
+		}
 		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
 			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
 			get_fixed_pmc(pmu, msr);
@@ -183,6 +205,10 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
 		*data = pmu->global_ovf_ctrl;
 		return 0;
 	default:
+		if (intel_is_lbr_msr(vcpu, msr)) {
+			*data = 0;
+			return 0;
+		}
 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
 		    (pmc = get_fixed_pmc(pmu, msr))) {
 			*data = pmc_read_counter(pmc);
@@ -235,6 +261,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		}
 		break;
 	default:
+		if (intel_is_lbr_msr(vcpu, msr))
+			return 0;
 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
 		    (pmc = get_fixed_pmc(pmu, msr))) {
 			if (!msr_info->host_initiated)
@@ -303,6 +331,11 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
 	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
 		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
+
+	entry = kvm_find_cpuid_entry(vcpu, 1, 0);
+	if (entry)
+		intel_pmu_lbr_fill(&pmu->lbr,
+			x86_family(entry->eax), x86_model(entry->eax));
 }
 
 static void intel_pmu_init(struct kvm_vcpu *vcpu)
-- 
2.1.4

WARNING: multiple messages have this Message-ID (diff)
From: Jan Dakinevich <jan.dakinevich@virtuozzo.com>
To: linux-kernel@vger.kernel.org
Cc: "Jan Dakinevich" <jan.dakinevich@virtuozzo.com>,
	"Denis V . Lunev" <den@virtuozzo.com>,
	"Roman Kagan" <rkagan@virtuozzo.com>,
	"Peter Zijlstra" <peterz@infradead.org>,
	"Ingo Molnar" <mingo@redhat.com>,
	"Arnaldo Carvalho de Melo" <acme@kernel.org>,
	"Alexander Shishkin" <alexander.shishkin@linux.intel.com>,
	"Jiri Olsa" <jolsa@redhat.com>,
	"Namhyung Kim" <namhyung@kernel.org>,
	"Thomas Gleixner" <tglx@linutronix.de>,
	"H. Peter Anvin" <hpa@zytor.com>,
	x86@kernel.org, "Paolo Bonzini" <pbonzini@redhat.com>,
	"Radim Krčmář" <rkrcmar@redhat.com>,
	"Andi Kleen" <ak@linux.intel.com>,
	"Kan Liang" <kan.liang@intel.com>,
	"Stephane Eranian" <eranian@google.com>,
	"Colin King" <colin.king@canonical.com>,
	"Sebastian Andrzej Siewior" <bigeasy@linutronix.de>
Subject: [PATCH RFC 2/2] KVM: x86/vPMU: ignore access to LBR-related MSRs
Date: Wed,  6 Dec 2017 14:43:03 +0300	[thread overview]
Message-ID: <1512560585-27263-3-git-send-email-jan.dakinevich@virtuozzo.com> (raw)
In-Reply-To: <1512560585-27263-1-git-send-email-jan.dakinevich@virtuozzo.com>

Windows Server 2016 Essentials (for yet unknown reason) attempts to
access MSR_LBR_TOS and other LBR-related registers at startup.  These
are not currently hadled by KVM so the guest gets #GP and crashes.

To prevent that, identify LBR-related MSRs pertinent to the CPU model
exposed to the guest, and dummy handle them (ignore writes and return
zero on reads).

Signed-off-by: Jan Dakinevich <jan.dakinevich@virtuozzo.com>
---
 arch/x86/include/asm/kvm_host.h |  2 ++
 arch/x86/kvm/pmu_intel.c        | 33 +++++++++++++++++++++++++++++++++
 2 files changed, 35 insertions(+)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 977de5f..04324dd 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -34,6 +34,7 @@
 #include <asm/msr-index.h>
 #include <asm/asm.h>
 #include <asm/kvm_page_track.h>
+#include <asm/perf_event.h>
 
 #define KVM_MAX_VCPUS 288
 #define KVM_SOFT_MAX_VCPUS 240
@@ -416,6 +417,7 @@ struct kvm_pmu {
 	struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
 	struct irq_work irq_work;
 	u64 reprogram_pmi;
+	struct x86_pmu_lbr lbr;
 };
 
 struct kvm_pmu_ops;
diff --git a/arch/x86/kvm/pmu_intel.c b/arch/x86/kvm/pmu_intel.c
index 5ab4a36..7edf191 100644
--- a/arch/x86/kvm/pmu_intel.c
+++ b/arch/x86/kvm/pmu_intel.c
@@ -142,6 +142,24 @@ static struct kvm_pmc *intel_msr_idx_to_pmc(struct kvm_vcpu *vcpu,
 	return &counters[idx];
 }
 
+static bool intel_is_lbr_msr(struct kvm_vcpu *vcpu, u32 msr)
+{
+	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+	struct x86_pmu_lbr *lbr = &pmu->lbr;
+
+	if (!lbr->nr)
+		return false;
+
+	if (msr == lbr->tos)
+		return true;
+	if (msr >= lbr->from && msr < lbr->from + lbr->nr)
+		return true;
+	if (msr >= lbr->to && msr < lbr->to + lbr->nr)
+		return true;
+
+	return false;
+}
+
 static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 {
 	struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
@@ -155,6 +173,10 @@ static bool intel_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
 		ret = pmu->version > 1;
 		break;
 	default:
+		if (intel_is_lbr_msr(vcpu, msr)) {
+			ret = true;
+			break;
+		}
 		ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0) ||
 			get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0) ||
 			get_fixed_pmc(pmu, msr);
@@ -183,6 +205,10 @@ static int intel_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
 		*data = pmu->global_ovf_ctrl;
 		return 0;
 	default:
+		if (intel_is_lbr_msr(vcpu, msr)) {
+			*data = 0;
+			return 0;
+		}
 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
 		    (pmc = get_fixed_pmc(pmu, msr))) {
 			*data = pmc_read_counter(pmc);
@@ -235,6 +261,8 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
 		}
 		break;
 	default:
+		if (intel_is_lbr_msr(vcpu, msr))
+			return 0;
 		if ((pmc = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)) ||
 		    (pmc = get_fixed_pmc(pmu, msr))) {
 			if (!msr_info->host_initiated)
@@ -303,6 +331,11 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
 	    (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
 	    (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
 		pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
+
+	entry = kvm_find_cpuid_entry(vcpu, 1, 0);
+	if (entry)
+		intel_pmu_lbr_fill(&pmu->lbr,
+			x86_family(entry->eax), x86_model(entry->eax));
 }
 
 static void intel_pmu_init(struct kvm_vcpu *vcpu)
-- 
2.1.4

  parent reply	other threads:[~2017-12-06 11:47 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2017-12-06 11:43 [PATCH RFC 0/2] ignore LBR-related MSRs Jan Dakinevich
2017-12-06 11:43 ` Jan Dakinevich
2017-12-06 11:43 ` [PATCH RFC 1/2] perf/x86/intel: make reusable LBR initialization code Jan Dakinevich
2017-12-06 11:43   ` Jan Dakinevich
2017-12-06 12:51   ` Peter Zijlstra
2017-12-06 12:51     ` Peter Zijlstra
2017-12-06 11:43 ` Jan Dakinevich [this message]
2017-12-06 11:43   ` [PATCH RFC 2/2] KVM: x86/vPMU: ignore access to LBR-related MSRs Jan Dakinevich
2017-12-06 12:52   ` Peter Zijlstra
2017-12-06 15:57   ` Andi Kleen
2017-12-06 17:02     ` Jan Dakinevich
2017-12-06 17:55       ` Andi Kleen
2017-12-06 15:06 ` [PATCH RFC 0/2] ignore " Konrad Rzeszutek Wilk
2017-12-06 15:06   ` Konrad Rzeszutek Wilk
2017-12-06 16:39   ` Jan Dakinevich
2017-12-06 16:39     ` Jan Dakinevich
2017-12-06 16:45     ` Denis V. Lunev
2017-12-06 16:45       ` Denis V. Lunev
2017-12-06 16:23 ` Jiri Olsa
2017-12-06 16:23   ` Jiri Olsa
2017-12-20 16:26 ` Jan Dakinevich
2017-12-20 16:26   ` Jan Dakinevich

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1512560585-27263-3-git-send-email-jan.dakinevich@virtuozzo.com \
    --to=jan.dakinevich@virtuozzo.com \
    --cc=acme@kernel.org \
    --cc=ak@linux.intel.com \
    --cc=alexander.shishkin@linux.intel.com \
    --cc=bigeasy@linutronix.de \
    --cc=colin.king@canonical.com \
    --cc=den@virtuozzo.com \
    --cc=eranian@google.com \
    --cc=hpa@zytor.com \
    --cc=jolsa@redhat.com \
    --cc=kan.liang@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=mingo@redhat.com \
    --cc=namhyung@kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rkagan@virtuozzo.com \
    --cc=rkrcmar@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    --cc=yao.jin@linux.intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.