From mboxrd@z Thu Jan 1 00:00:00 1970 From: Paul Durrant Subject: [PATCH v4 07/17] x86/hvm: add length to mmio check op Date: Wed, 24 Jun 2015 12:24:39 +0100 Message-ID: <1435145089-21999-8-git-send-email-paul.durrant@citrix.com> References: <1435145089-21999-1-git-send-email-paul.durrant@citrix.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: Received: from mail6.bemta5.messagelabs.com ([195.245.231.135]) by lists.xen.org with esmtp (Exim 4.72) (envelope-from ) id 1Z7ina-0006wB-Aa for xen-devel@lists.xenproject.org; Wed, 24 Jun 2015 11:24:58 +0000 In-Reply-To: <1435145089-21999-1-git-send-email-paul.durrant@citrix.com> List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xenproject.org Cc: Andrew Cooper , Paul Durrant , Keir Fraser , Jan Beulich List-Id: xen-devel@lists.xenproject.org When memory mapped I/O is range checked by internal handlers, the length of the access should be taken into account. Signed-off-by: Paul Durrant Cc: Keir Fraser Cc: Jan Beulich Cc: Andrew Cooper --- xen/arch/x86/hvm/hpet.c | 7 ++++--- xen/arch/x86/hvm/intercept.c | 2 +- xen/arch/x86/hvm/vioapic.c | 17 ++++++++++++++--- xen/arch/x86/hvm/vlapic.c | 8 +++++--- xen/arch/x86/hvm/vmsi.c | 27 ++++++++++++++++++++------- xen/drivers/passthrough/amd/iommu_guest.c | 18 +++++++++++++++--- xen/include/asm-x86/hvm/io.h | 4 +++- 7 files changed, 62 insertions(+), 21 deletions(-) diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c index 8958873..1a1f239 100644 --- a/xen/arch/x86/hvm/hpet.c +++ b/xen/arch/x86/hvm/hpet.c @@ -498,10 +498,11 @@ static int hpet_write( return X86EMUL_OKAY; } -static int hpet_range(struct vcpu *v, unsigned long addr) +static int hpet_range(struct vcpu *v, unsigned long addr, + unsigned long length) { - return ( (addr >= HPET_BASE_ADDRESS) && - (addr < (HPET_BASE_ADDRESS + HPET_MMAP_SIZE)) ); + return (addr >= HPET_BASE_ADDRESS) && + ((addr + length) < (HPET_BASE_ADDRESS + HPET_MMAP_SIZE)); } static const struct hvm_mmio_ops hpet_mmio_ops = { diff --git a/xen/arch/x86/hvm/intercept.c b/xen/arch/x86/hvm/intercept.c index 4db024e..5e8d8b2 100644 --- a/xen/arch/x86/hvm/intercept.c +++ b/xen/arch/x86/hvm/intercept.c @@ -38,7 +38,7 @@ static bool_t hvm_mmio_accept(struct hvm_io_handler *handler, { BUG_ON(handler->type != IOREQ_TYPE_COPY); - return handler->u.mmio.ops->check(current, addr); + return handler->u.mmio.ops->check(current, addr, size); } static int hvm_mmio_read(struct hvm_io_handler *handler, diff --git a/xen/arch/x86/hvm/vioapic.c b/xen/arch/x86/hvm/vioapic.c index 9ad909b..4a9b33e 100644 --- a/xen/arch/x86/hvm/vioapic.c +++ b/xen/arch/x86/hvm/vioapic.c @@ -242,12 +242,13 @@ static int vioapic_write( return X86EMUL_OKAY; } -static int vioapic_range(struct vcpu *v, unsigned long addr) +static int vioapic_range(struct vcpu *v, unsigned long addr, + unsigned long length) { struct hvm_hw_vioapic *vioapic = domain_vioapic(v->domain); - return ((addr >= vioapic->base_address && - (addr < vioapic->base_address + VIOAPIC_MEM_LENGTH))); + return (addr >= vioapic->base_address) && + ((addr + length) <= (vioapic->base_address + VIOAPIC_MEM_LENGTH)); } static const struct hvm_mmio_ops vioapic_mmio_ops = { @@ -466,3 +467,13 @@ void vioapic_deinit(struct domain *d) xfree(d->arch.hvm_domain.vioapic); d->arch.hvm_domain.vioapic = NULL; } + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/arch/x86/hvm/vlapic.c b/xen/arch/x86/hvm/vlapic.c index f2052cf..7421fc5 100644 --- a/xen/arch/x86/hvm/vlapic.c +++ b/xen/arch/x86/hvm/vlapic.c @@ -986,14 +986,16 @@ int hvm_x2apic_msr_write(struct vcpu *v, unsigned int msr, uint64_t msr_content) return vlapic_reg_write(v, offset, (uint32_t)msr_content); } -static int vlapic_range(struct vcpu *v, unsigned long addr) +static int vlapic_range(struct vcpu *v, unsigned long address, + unsigned long len) { struct vlapic *vlapic = vcpu_vlapic(v); - unsigned long offset = addr - vlapic_base_address(vlapic); + unsigned long offset = address - vlapic_base_address(vlapic); return !vlapic_hw_disabled(vlapic) && !vlapic_x2apic_mode(vlapic) && - (offset < PAGE_SIZE); + (address >= vlapic_base_address(vlapic)) && + ((offset + len) <= PAGE_SIZE); } static const struct hvm_mmio_ops vlapic_mmio_ops = { diff --git a/xen/arch/x86/hvm/vmsi.c b/xen/arch/x86/hvm/vmsi.c index 09ea301..61fe391 100644 --- a/xen/arch/x86/hvm/vmsi.c +++ b/xen/arch/x86/hvm/vmsi.c @@ -168,14 +168,14 @@ struct msixtbl_entry static DEFINE_RCU_READ_LOCK(msixtbl_rcu_lock); static struct msixtbl_entry *msixtbl_find_entry( - struct vcpu *v, unsigned long addr) + struct vcpu *v, unsigned long address, unsigned long len) { struct msixtbl_entry *entry; struct domain *d = v->domain; list_for_each_entry( entry, &d->arch.hvm_domain.msixtbl_list, list ) - if ( addr >= entry->gtable && - addr < entry->gtable + entry->table_len ) + if ( (address >= entry->gtable) && + ((address + len) <= (entry->gtable + entry->table_len)) ) return entry; return NULL; @@ -214,7 +214,7 @@ static int msixtbl_read( rcu_read_lock(&msixtbl_rcu_lock); - entry = msixtbl_find_entry(v, address); + entry = msixtbl_find_entry(v, address, len); if ( !entry ) goto out; offset = address & (PCI_MSIX_ENTRY_SIZE - 1); @@ -273,7 +273,7 @@ static int msixtbl_write(struct vcpu *v, unsigned long address, rcu_read_lock(&msixtbl_rcu_lock); - entry = msixtbl_find_entry(v, address); + entry = msixtbl_find_entry(v, address, len); if ( !entry ) goto out; nr_entry = (address - entry->gtable) / PCI_MSIX_ENTRY_SIZE; @@ -333,12 +333,15 @@ out: return r; } -static int msixtbl_range(struct vcpu *v, unsigned long addr) +static int msixtbl_range(struct vcpu *v, unsigned long address, + unsigned long len) { + struct msixtbl_entry *entry; const struct msi_desc *desc; rcu_read_lock(&msixtbl_rcu_lock); - desc = msixtbl_addr_to_desc(msixtbl_find_entry(v, addr), addr); + entry = msixtbl_find_entry(v, address, len); + desc = msixtbl_addr_to_desc(entry, address); rcu_read_unlock(&msixtbl_rcu_lock); return !!desc; @@ -514,3 +517,13 @@ void msix_write_completion(struct vcpu *v) if ( msixtbl_write(v, ctrl_address, 4, 0) != X86EMUL_OKAY ) gdprintk(XENLOG_WARNING, "MSI-X write completion failure\n"); } + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/drivers/passthrough/amd/iommu_guest.c b/xen/drivers/passthrough/amd/iommu_guest.c index 9c3c488..dd84281 100644 --- a/xen/drivers/passthrough/amd/iommu_guest.c +++ b/xen/drivers/passthrough/amd/iommu_guest.c @@ -868,12 +868,14 @@ static void guest_iommu_reg_init(struct guest_iommu *iommu) iommu->reg_ext_feature.hi = upper; } -static int guest_iommu_mmio_range(struct vcpu *v, unsigned long addr) +static int guest_iommu_mmio_range(struct vcpu *v, unsigned long addr, + unsigned long length) { struct guest_iommu *iommu = vcpu_iommu(v); - return iommu && addr >= iommu->mmio_base && - addr < iommu->mmio_base + IOMMU_MMIO_SIZE; + return iommu && + (addr >= iommu->mmio_base) && + ((addr + length) <= (iommu->mmio_base + IOMMU_MMIO_SIZE)); } static const struct hvm_mmio_ops iommu_mmio_ops = { @@ -926,3 +928,13 @@ void guest_iommu_destroy(struct domain *d) domain_hvm_iommu(d)->arch.g_iommu = NULL; } + +/* + * Local variables: + * mode: C + * c-file-style: "BSD" + * c-basic-offset: 4 + * tab-width: 4 + * indent-tabs-mode: nil + * End: + */ diff --git a/xen/include/asm-x86/hvm/io.h b/xen/include/asm-x86/hvm/io.h index fecd02d..b4596fc 100644 --- a/xen/include/asm-x86/hvm/io.h +++ b/xen/include/asm-x86/hvm/io.h @@ -35,7 +35,9 @@ typedef int (*hvm_mmio_write_t)(struct vcpu *v, unsigned long addr, unsigned long length, unsigned long val); -typedef int (*hvm_mmio_check_t)(struct vcpu *v, unsigned long addr); +typedef int (*hvm_mmio_check_t)(struct vcpu *v, + unsigned long addr, + unsigned long length); struct hvm_mmio_ops { hvm_mmio_check_t check; -- 1.7.10.4