From mboxrd@z Thu Jan 1 00:00:00 1970 From: Steve Rutherford Subject: [RFC PATCH 3/4] KVM: x86: Add EOI exit bitmap inference Date: Tue, 12 May 2015 18:47:31 -0700 Message-ID: <1431481652-27268-3-git-send-email-srutherford@google.com> References: <1431481652-27268-1-git-send-email-srutherford@google.com> Cc: ahonig@google.com To: kvm@vger.kernel.org Return-path: Received: from mail-ie0-f179.google.com ([209.85.223.179]:35763 "EHLO mail-ie0-f179.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S964912AbbEMBrv (ORCPT ); Tue, 12 May 2015 21:47:51 -0400 Received: by iebpz10 with SMTP id pz10so19147554ieb.2 for ; Tue, 12 May 2015 18:47:50 -0700 (PDT) In-Reply-To: <1431481652-27268-1-git-send-email-srutherford@google.com> Sender: kvm-owner@vger.kernel.org List-ID: In order to support a userspace IOAPIC interacting with an in kernel APIC, the EOI exit bitmaps need to be configurable. If the IOAPIC is in userspace (i.e. the irqchip has been split), the EOI exit bitmaps will be set whenever the GSI Routes are configured. In particular, for the low 24 MSI routes, the EOI Exit bit corresponding to the destination vector will be set for the destination VCPU. The intention is for the userspace IOAPIC to use MSI routes [0,23] to inject interrupts into the guest. This is a slight abuse of the notion of an MSI Route, given that MSIs classically bypass the IOAPIC. It might be worthwhile to add an additional route type to improve clarity. Compile tested for Intel x86. Signed-off-by: Steve Rutherford --- arch/x86/kvm/ioapic.c | 11 +++++++++++ arch/x86/kvm/ioapic.h | 1 + arch/x86/kvm/lapic.c | 2 ++ arch/x86/kvm/x86.c | 13 +++++++++++-- include/linux/kvm_host.h | 4 ++++ virt/kvm/irqchip.c | 32 ++++++++++++++++++++++++++++++++ 6 files changed, 61 insertions(+), 2 deletions(-) diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 856f791..3323c86 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -672,3 +672,14 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state) spin_unlock(&ioapic->lock); return 0; } + +void kvm_vcpu_request_scan_userspace_ioapic(struct kvm *kvm) +{ + struct kvm_ioapic *ioapic = kvm->arch.vioapic; + + if (ioapic) + return; + if (!lapic_in_kernel(kvm)) + return; + kvm_make_scan_ioapic_request(kvm); +} diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h index ca0b0b4..b7af71b 100644 --- a/arch/x86/kvm/ioapic.h +++ b/arch/x86/kvm/ioapic.h @@ -123,4 +123,5 @@ int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state); void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap, u32 *tmr); +void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); #endif diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 42fada6f..7533b87 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -211,6 +211,8 @@ out: if (!irqchip_split(kvm)) kvm_vcpu_request_scan_ioapic(kvm); + else + kvm_vcpu_request_scan_userspace_ioapic(kvm); } static inline void apic_set_spiv(struct kvm_lapic *apic, u32 val) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index cc27c35..6127fe7 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6335,8 +6335,17 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) goto out; } } - if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) - vcpu_scan_ioapic(vcpu); + if (kvm_check_request(KVM_REQ_SCAN_IOAPIC, vcpu)) { + if (irqchip_split(vcpu->kvm)) { + memset(vcpu->arch.eoi_exit_bitmaps, 0, 32); + kvm_scan_ioapic_routes( + vcpu, vcpu->arch.eoi_exit_bitmaps); + kvm_x86_ops->load_eoi_exitmap( + vcpu, vcpu->arch.eoi_exit_bitmaps); + + } else + vcpu_scan_ioapic(vcpu); + } if (kvm_check_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu)) kvm_vcpu_reload_apic_access_page(vcpu); } diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index cef20ad..678215a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -438,10 +438,14 @@ void vcpu_put(struct kvm_vcpu *vcpu); #ifdef __KVM_HAVE_IOAPIC void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); +void kvm_vcpu_request_scan_userspace_ioapic(struct kvm *kvm); #else static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) { } +static inline void kvm_vcpu_request_scan_userspace_ioapic(struct kvm *kvm) +{ +} #endif #ifdef CONFIG_HAVE_KVM_IRQFD diff --git a/virt/kvm/irqchip.c b/virt/kvm/irqchip.c index 8aaceed..8a253aa 100644 --- a/virt/kvm/irqchip.c +++ b/virt/kvm/irqchip.c @@ -205,6 +205,8 @@ int kvm_set_irq_routing(struct kvm *kvm, synchronize_srcu_expedited(&kvm->irq_srcu); + kvm_vcpu_request_scan_userspace_ioapic(kvm); + new = old; r = 0; @@ -212,3 +214,33 @@ out: kfree(new); return r; } + +void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) +{ + struct kvm *kvm = vcpu->kvm; + struct kvm_kernel_irq_routing_entry *entry; + struct kvm_irq_routing_table *table; + u32 i, nr_rt_entries; + + mutex_lock(&kvm->irq_lock); + table = kvm->irq_routing; + nr_rt_entries = min_t(u32, table->nr_rt_entries, IOAPIC_NUM_PINS); + for (i = 0; i < nr_rt_entries; ++i) { + hlist_for_each_entry(entry, &table->map[i], link) { + u32 dest_id, dest_mode; + + if (entry->type != KVM_IRQ_ROUTING_MSI) + continue; + dest_id = (entry->msi.address_lo >> 12) & 0xff; + dest_mode = (entry->msi.address_lo >> 2) & 0x1; + if (kvm_apic_match_dest(vcpu, NULL, 0, dest_id, + dest_mode)) { + u32 vector = entry->msi.data & 0xff; + + __set_bit(vector, + (unsigned long *) eoi_exit_bitmap); + } + } + } + mutex_unlock(&kvm->irq_lock); +} -- 2.2.0.rc0.207.ga3a616c