All of lore.kernel.org
 help / color / mirror / Atom feed
From: Eric Hankland <ehankland@google.com>
To: pbonzini@redhat.com, rkrcmar@redhat.com
Cc: linux-kernel@vger.kernel.org, kvm@vger.kernel.org
Subject: [PATCH v1] KVM: x86: PMU Whitelist
Date: Wed, 22 May 2019 15:23:00 -0700	[thread overview]
Message-ID: <CAOyeoRWfPNmaWY6Lifdkdj3KPPM654vzDO+s3oduEMCJP+Asow@mail.gmail.com> (raw)

- Add a VCPU ioctl that can control which events the guest can monitor.

Signed-off-by: ehankland <ehankland@google.com>
---
Some events can provide a guest with information about other guests or the
host (e.g. L3 cache stats); providing the capability to restrict access
to a "safe" set of events would limit the potential for the PMU to be used
in any side channel attacks. This change introduces a new vcpu ioctl that
sets an event whitelist. If the guest attempts to program a counter for
any unwhitelisted event, the kernel counter won't be created, so any
RDPMC/RDMSR will show 0 instances of that event.
---
 Documentation/virtual/kvm/api.txt | 16 +++++++++++
 arch/x86/include/asm/kvm_host.h   |  1 +
 arch/x86/include/uapi/asm/kvm.h   |  7 +++++
 arch/x86/kvm/pmu.c                | 44 +++++++++++++++++++++++++++++++
 arch/x86/kvm/pmu.h                |  3 +++
 arch/x86/kvm/pmu_amd.c            | 16 +++++++++++
 arch/x86/kvm/vmx/pmu_intel.c      | 16 +++++++++++
 arch/x86/kvm/x86.c                |  7 +++++
 include/uapi/linux/kvm.h          |  4 +++
 9 files changed, 114 insertions(+)

diff --git a/Documentation/virtual/kvm/api.txt
b/Documentation/virtual/kvm/api.txt
index ba6c42c576dd..79cbe7339145 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -4065,6 +4065,22 @@ KVM_ARM_VCPU_FINALIZE call.
 See KVM_ARM_VCPU_INIT for details of vcpu features that require finalization
 using this ioctl.

+4.120 KVM_SET_PMU_WHITELIST
+
+Capability: KVM_CAP_PMU_WHITELIST
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_pmu_whitelist (in)
+Returns: 0 on success, -1 on error
+
+struct kvm_pmu_whitelist {
+       __u64 event_mask;
+       __u16 num_events;
+       __u64 events[0];
+};
+This ioctl restricts the set of PMU events that the guest can program to the
+set of whitelisted events.
+
 5. The kvm_run structure
 ------------------------

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 450d69a1e6fa..942647475999 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -477,6 +477,7 @@ struct kvm_pmu {
        struct kvm_pmc fixed_counters[INTEL_PMC_MAX_FIXED];
        struct irq_work irq_work;
        u64 reprogram_pmi;
+       struct kvm_pmu_whitelist *whitelist;
 };

 struct kvm_pmu_ops;
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 7a0e64ccd6ff..2633b48b75cd 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -421,4 +421,11 @@ struct kvm_nested_state {
        __u8 data[0];
 };

+/* for KVM_SET_PMU_WHITELIST */
+struct kvm_pmu_whitelist {
+       __u64 event_mask;
+       __u16 num_events;
+       __u64 events[0];
+};
+
 #endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
index e39741997893..d0d81cd3626d 100644
--- a/arch/x86/kvm/pmu.c
+++ b/arch/x86/kvm/pmu.c
@@ -101,6 +101,9 @@ static void pmc_reprogram_counter(struct kvm_pmc
*pmc, u32 type,
                                  bool exclude_kernel, bool intr,
                                  bool in_tx, bool in_tx_cp)
 {
+       struct kvm_pmu *pmu = pmc_to_pmu(pmc);
+       int i;
+       u64 event_config;
        struct perf_event *event;
        struct perf_event_attr attr = {
                .type = type,
@@ -127,6 +130,19 @@ static void pmc_reprogram_counter(struct kvm_pmc
*pmc, u32 type,
                attr.config |= HSW_IN_TX_CHECKPOINTED;
        }

+       if (pmu->whitelist) {
+               event_config = attr.config;
+               if (type == PERF_TYPE_HARDWARE)
+                       event_config = kvm_x86_ops->pmu_ops->get_event_code(
+                               attr.config);
+               event_config &= pmu->whitelist->event_mask;
+               for (i = 0; i < pmu->whitelist->num_events; i++)
+                       if (event_config == pmu->whitelist->events[i])
+                               break;
+               if (i == pmu->whitelist->num_events)
+                       return;
+       }
+
        event = perf_event_create_kernel_counter(&attr, -1, current,
                                                 intr ? kvm_perf_overflow_intr :
                                                 kvm_perf_overflow, pmc);
@@ -244,6 +260,34 @@ int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu
*vcpu, unsigned idx)
        return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx);
 }

+int kvm_vcpu_ioctl_set_pmu_whitelist(struct kvm_vcpu *vcpu,
+                                    struct kvm_pmu_whitelist __user *whtlst)
+{
+       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+       struct kvm_pmu_whitelist *old = pmu->whitelist;
+       struct kvm_pmu_whitelist *new = NULL;
+       struct kvm_pmu_whitelist tmp;
+       int r;
+       size_t size;
+
+       r = -EFAULT;
+       if (copy_from_user(&tmp, whtlst, sizeof(struct kvm_pmu_whitelist)))
+               goto err;
+
+       size = sizeof(tmp) + sizeof(tmp.events[0]) * tmp.num_events;
+       new = kvzalloc(size, GFP_KERNEL_ACCOUNT);
+       r = -ENOMEM;
+       if (!new)
+               goto err;
+       pmu->whitelist = new;
+
+       kvfree(old);
+       return 0;
+err:
+       kvfree(new);
+       return r;
+}
+
 bool is_vmware_backdoor_pmc(u32 pmc_idx)
 {
        switch (pmc_idx) {
diff --git a/arch/x86/kvm/pmu.h b/arch/x86/kvm/pmu.h
index ba8898e1a854..7d4a37fcb043 100644
--- a/arch/x86/kvm/pmu.h
+++ b/arch/x86/kvm/pmu.h
@@ -23,6 +23,7 @@ struct kvm_pmu_ops {
        unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
                                    u8 unit_mask);
        unsigned (*find_fixed_event)(int idx);
+       u64 (*get_event_code)(unsigned event_type);
        bool (*pmc_is_enabled)(struct kvm_pmc *pmc);
        struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
        struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx);
@@ -108,6 +109,8 @@ void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx);

 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
+int kvm_vcpu_ioctl_set_pmu_whitelist(struct kvm_vcpu *vcpu,
+                                    struct kvm_pmu_whitelist __user *whtlst);
 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
 int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx);
 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
diff --git a/arch/x86/kvm/pmu_amd.c b/arch/x86/kvm/pmu_amd.c
index 1495a735b38e..1d977b20c863 100644
--- a/arch/x86/kvm/pmu_amd.c
+++ b/arch/x86/kvm/pmu_amd.c
@@ -145,6 +145,21 @@ static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
        return amd_event_mapping[i].event_type;
 }

+static u64 amd_get_event_code(unsigned event_type)
+{
+       int i;
+       u64 event_code = 0;
+
+       for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
+               if (amd_event_mapping[i].event_type == event_type) {
+                       event_code = amd_event_mapping[i].eventsel |
+                               ((u64)amd_event_mapping[i].unit_mask << 8);
+                       break;
+               }
+
+       return event_code;
+}
+
 /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
 static unsigned amd_find_fixed_event(int idx)
 {
@@ -306,6 +321,7 @@ static void amd_pmu_reset(struct kvm_vcpu *vcpu)
 struct kvm_pmu_ops amd_pmu_ops = {
        .find_arch_event = amd_find_arch_event,
        .find_fixed_event = amd_find_fixed_event,
+       .get_event_code = amd_get_event_code,
        .pmc_is_enabled = amd_pmc_is_enabled,
        .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
        .msr_idx_to_pmc = amd_msr_idx_to_pmc,
diff --git a/arch/x86/kvm/vmx/pmu_intel.c b/arch/x86/kvm/vmx/pmu_intel.c
index f8502c376b37..01a5d5a3bf3d 100644
--- a/arch/x86/kvm/vmx/pmu_intel.c
+++ b/arch/x86/kvm/vmx/pmu_intel.c
@@ -67,6 +67,21 @@ static void global_ctrl_changed(struct kvm_pmu
*pmu, u64 data)
                reprogram_counter(pmu, bit);
 }

+static u64 intel_get_event_code(unsigned event_type)
+{
+       int i;
+       u64 event_code = 0;
+
+       for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
+               if (intel_arch_events[i].event_type == event_type) {
+                       event_code = intel_arch_events[i].eventsel |
+                               ((u64) intel_arch_events[i].unit_mask << 8);
+                       break;
+               }
+
+       return event_code;
+}
+
 static unsigned intel_find_arch_event(struct kvm_pmu *pmu,
                                      u8 event_select,
                                      u8 unit_mask)
@@ -350,6 +365,7 @@ static void intel_pmu_reset(struct kvm_vcpu *vcpu)

 struct kvm_pmu_ops intel_pmu_ops = {
        .find_arch_event = intel_find_arch_event,
+       .get_event_code = intel_get_event_code,
        .find_fixed_event = intel_find_fixed_event,
        .pmc_is_enabled = intel_pmc_is_enabled,
        .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 536b78c4af6e..089de23289f4 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3089,6 +3089,7 @@ int kvm_vm_ioctl_check_extension(struct kvm
*kvm, long ext)
        case KVM_CAP_GET_MSR_FEATURES:
        case KVM_CAP_MSR_PLATFORM_INFO:
        case KVM_CAP_EXCEPTION_PAYLOAD:
+       case KVM_CAP_PMU_WHITELIST:
                r = 1;
                break;
        case KVM_CAP_SYNC_REGS:
@@ -4285,6 +4286,12 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                r = 0;
                break;
        }
+       case KVM_SET_PMU_WHITELIST: {
+               struct kvm_pmu_whitelist __user *whitelist = argp;
+
+               r = kvm_vcpu_ioctl_set_pmu_whitelist(vcpu, whitelist);
+               goto out;
+       }
        default:
                r = -EINVAL;
        }
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 2fe12b40d503..140a6ac52981 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -993,6 +993,7 @@ struct kvm_ppc_resize_hpt {
 #define KVM_CAP_ARM_SVE 170
 #define KVM_CAP_ARM_PTRAUTH_ADDRESS 171
 #define KVM_CAP_ARM_PTRAUTH_GENERIC 172
+#define KVM_CAP_PMU_WHITELIST 173

 #ifdef KVM_CAP_IRQ_ROUTING

@@ -1451,6 +1452,9 @@ struct kvm_enc_region {
 /* Available with KVM_CAP_ARM_SVE */
 #define KVM_ARM_VCPU_FINALIZE    _IOW(KVMIO,  0xc2, int)

+/* Available with KVM_CAP_PMU_WHITELIST */
+# define KVM_SET_PMU_WHITELIST   _IOW(KVMIO, 0xc3, struct kvm_pmu_whitelist)
+
 /* Secure Encrypted Virtualization command */
 enum sev_cmd_id {
        /* Guest initialization commands */

             reply	other threads:[~2019-05-22 22:23 UTC|newest]

Thread overview: 22+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-05-22 22:23 Eric Hankland [this message]
2019-05-28  2:01 ` [PATCH v1] KVM: x86: PMU Whitelist Wei Wang
2019-05-28 18:14   ` Eric Hankland
2019-05-29  7:54     ` Wei Wang
2019-05-29 17:11       ` Eric Hankland
2019-05-31  1:02         ` Wei Wang
2019-05-31 19:59           ` Eric Hankland
2019-06-01 10:55             ` Wei Wang
2019-06-03 17:30               ` Eric Hankland
2019-06-04  4:42                 ` Wei Wang
2019-06-04 15:56                   ` Eric Hankland
     [not found]                     ` <CAEU=KTHsVmrAHXUKdHu_OwcrZoy-hgV7pk4UymtchGE5bGdUGA@mail.gmail.com>
2019-06-05 21:35                       ` Eric Hankland
2019-06-06  7:36                         ` Wei Wang
2019-06-13 17:43                           ` Eric Hankland
2019-06-14  9:14                             ` Wei Wang
2019-06-14  9:26 ` Wei Wang
2019-06-25  0:32   ` Eric Hankland
2019-06-25  9:12     ` Wei Wang
2019-07-02 17:46       ` Eric Hankland
2019-07-03  9:06         ` Wei Wang
2019-06-20 18:05 ` Andi Kleen
2019-06-24 23:56   ` Eric Hankland

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=CAOyeoRWfPNmaWY6Lifdkdj3KPPM654vzDO+s3oduEMCJP+Asow@mail.gmail.com \
    --to=ehankland@google.com \
    --cc=kvm@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=pbonzini@redhat.com \
    --cc=rkrcmar@redhat.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.