From mboxrd@z Thu Jan 1 00:00:00 1970 From: "Michael S. Tsirkin" Subject: Re: [PATCH v3 6/7] iommu/virtio: Add probe request Date: Fri, 12 Oct 2018 12:42:04 -0400 Message-ID: <20181012123633-mutt-send-email-mst@kernel.org> References: <20181012145917.6840-1-jean-philippe.brucker@arm.com> <20181012145917.6840-7-jean-philippe.brucker@arm.com> Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: Content-Disposition: inline In-Reply-To: <20181012145917.6840-7-jean-philippe.brucker@arm.com> List-Unsubscribe: , List-Archive: List-Post: List-Help: List-Subscribe: , Sender: virtualization-bounces@lists.linux-foundation.org Errors-To: virtualization-bounces@lists.linux-foundation.org To: Jean-Philippe Brucker Cc: mark.rutland@arm.com, peter.maydell@linaro.org, lorenzo.pieralisi@arm.com, tnowicki@caviumnetworks.com, devicetree@vger.kernel.org, linux-pci@vger.kernel.org, joro@8bytes.org, will.deacon@arm.com, virtualization@lists.linux-foundation.org, eric.auger@redhat.com, iommu@lists.linux-foundation.org, robh+dt@kernel.org, marc.zyngier@arm.com, robin.murphy@arm.com, kvmarm@lists.cs.columbia.edu List-Id: devicetree@vger.kernel.org On Fri, Oct 12, 2018 at 03:59:16PM +0100, Jean-Philippe Brucker wrote: > When the device offers the probe feature, send a probe request for each > device managed by the IOMMU. Extract RESV_MEM information. When we > encounter a MSI doorbell region, set it up as a IOMMU_RESV_MSI region. > This will tell other subsystems that there is no need to map the MSI > doorbell in the virtio-iommu, because MSIs bypass it. > > Signed-off-by: Jean-Philippe Brucker > --- > drivers/iommu/virtio-iommu.c | 147 ++++++++++++++++++++++++++++-- > include/uapi/linux/virtio_iommu.h | 39 ++++++++ > 2 files changed, 180 insertions(+), 6 deletions(-) > > diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c > index 9fb38cd3b727..8eaf66770469 100644 > --- a/drivers/iommu/virtio-iommu.c > +++ b/drivers/iommu/virtio-iommu.c > @@ -56,6 +56,7 @@ struct viommu_dev { > struct iommu_domain_geometry geometry; > u64 pgsize_bitmap; > u8 domain_bits; > + u32 probe_size; > }; > > struct viommu_mapping { > @@ -77,8 +78,10 @@ struct viommu_domain { > }; > > struct viommu_endpoint { > + struct device *dev; > struct viommu_dev *viommu; > struct viommu_domain *vdomain; > + struct list_head resv_regions; > }; > > struct viommu_request { > @@ -129,6 +132,9 @@ static off_t viommu_get_req_offset(struct viommu_dev *viommu, > { > size_t tail_size = sizeof(struct virtio_iommu_req_tail); > > + if (req->type == VIRTIO_IOMMU_T_PROBE) > + return len - viommu->probe_size - tail_size; > + > return len - tail_size; > } > > @@ -414,6 +420,101 @@ static int viommu_replay_mappings(struct viommu_domain *vdomain) > return ret; > } > > +static int viommu_add_resv_mem(struct viommu_endpoint *vdev, > + struct virtio_iommu_probe_resv_mem *mem, > + size_t len) > +{ > + struct iommu_resv_region *region = NULL; > + unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; > + > + u64 start = le64_to_cpu(mem->start); > + u64 end = le64_to_cpu(mem->end); > + size_t size = end - start + 1; > + > + if (len < sizeof(*mem)) > + return -EINVAL; Maybe validate that size, end and start all make sense and e.g. fit within phys_addr_t and size_t? > + > + switch (mem->subtype) { > + default: > + dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n", > + mem->subtype); > + /* Fall-through */ > + case VIRTIO_IOMMU_RESV_MEM_T_RESERVED: > + region = iommu_alloc_resv_region(start, size, 0, > + IOMMU_RESV_RESERVED); > + break; > + case VIRTIO_IOMMU_RESV_MEM_T_MSI: > + region = iommu_alloc_resv_region(start, size, prot, > + IOMMU_RESV_MSI); > + break; > + } > + > + list_add(&vdev->resv_regions, ®ion->list); > + return 0; > +} > + > +static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev) > +{ > + int ret; > + u16 type, len; > + size_t cur = 0; > + size_t probe_len; > + struct virtio_iommu_req_probe *probe; > + struct virtio_iommu_probe_property *prop; > + struct iommu_fwspec *fwspec = dev->iommu_fwspec; > + struct viommu_endpoint *vdev = fwspec->iommu_priv; > + > + if (!fwspec->num_ids) > + return -EINVAL; > + > + probe_len = sizeof(*probe) + viommu->probe_size + > + sizeof(struct virtio_iommu_req_tail); > + probe = kzalloc(probe_len, GFP_KERNEL); > + if (!probe) > + return -ENOMEM; > + > + probe->head.type = VIRTIO_IOMMU_T_PROBE; > + /* > + * For now, assume that properties of an endpoint that outputs multiple > + * IDs are consistent. Only probe the first one. > + */ > + probe->endpoint = cpu_to_le32(fwspec->ids[0]); > + > + ret = viommu_send_req_sync(viommu, probe, probe_len); > + if (ret) > + goto out_free; > + > + prop = (void *)probe->properties; > + type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; > + > + while (type != VIRTIO_IOMMU_PROBE_T_NONE && > + cur < viommu->probe_size) { > + len = le16_to_cpu(prop->length) + sizeof(*prop); > + > + switch (type) { > + case VIRTIO_IOMMU_PROBE_T_RESV_MEM: > + ret = viommu_add_resv_mem(vdev, (void *)prop, len); > + break; > + default: > + dev_err(dev, "unknown viommu prop 0x%x\n", type); > + } > + > + if (ret) > + dev_err(dev, "failed to parse viommu prop 0x%x\n", type); > + > + cur += len; > + if (cur >= viommu->probe_size) > + break; > + > + prop = (void *)probe->properties + cur; > + type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; > + } > + > +out_free: > + kfree(probe); > + return ret; > +} > + > /* IOMMU API */ > > static struct iommu_domain *viommu_domain_alloc(unsigned type) > @@ -636,15 +737,33 @@ static void viommu_iotlb_sync(struct iommu_domain *domain) > > static void viommu_get_resv_regions(struct device *dev, struct list_head *head) > { > - struct iommu_resv_region *region; > + struct iommu_resv_region *entry, *new_entry, *msi = NULL; > + struct viommu_endpoint *vdev = dev->iommu_fwspec->iommu_priv; > int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; > > - region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, prot, > - IOMMU_RESV_SW_MSI); > - if (!region) > - return; > + list_for_each_entry(entry, &vdev->resv_regions, list) { > + /* > + * If the device registered a bypass MSI windows, use it. > + * Otherwise add a software-mapped region > + */ > + if (entry->type == IOMMU_RESV_MSI) > + msi = entry; > + > + new_entry = kmemdup(entry, sizeof(*entry), GFP_KERNEL); > + if (!new_entry) > + return; > + list_add_tail(&new_entry->list, head); > + } > + > + if (!msi) { > + msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, > + prot, IOMMU_RESV_SW_MSI); > + if (!msi) > + return; > + > + list_add_tail(&msi->list, head); > + } > > - list_add_tail(®ion->list, head); > iommu_dma_get_resv_regions(dev, head); > } > > @@ -692,9 +811,18 @@ static int viommu_add_device(struct device *dev) > if (!vdev) > return -ENOMEM; > > + vdev->dev = dev; > vdev->viommu = viommu; > + INIT_LIST_HEAD(&vdev->resv_regions); > fwspec->iommu_priv = vdev; > > + if (viommu->probe_size) { > + /* Get additional information for this endpoint */ > + ret = viommu_probe_endpoint(viommu, dev); > + if (ret) > + return ret; > + } > + > ret = iommu_device_link(&viommu->iommu, dev); > if (ret) > goto err_free_dev; > @@ -717,6 +845,7 @@ static int viommu_add_device(struct device *dev) > iommu_device_unlink(&viommu->iommu, dev); > > err_free_dev: > + viommu_put_resv_regions(dev, &vdev->resv_regions); > kfree(vdev); > > return ret; > @@ -734,6 +863,7 @@ static void viommu_remove_device(struct device *dev) > > iommu_group_remove_device(dev); > iommu_device_unlink(&vdev->viommu->iommu, dev); > + viommu_put_resv_regions(dev, &vdev->resv_regions); > kfree(vdev); > } > > @@ -832,6 +962,10 @@ static int viommu_probe(struct virtio_device *vdev) > struct virtio_iommu_config, domain_bits, > &viommu->domain_bits); > > + virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE, > + struct virtio_iommu_config, probe_size, > + &viommu->probe_size); > + > viommu->geometry = (struct iommu_domain_geometry) { > .aperture_start = input_start, > .aperture_end = input_end, > @@ -913,6 +1047,7 @@ static unsigned int features[] = { > VIRTIO_IOMMU_F_MAP_UNMAP, > VIRTIO_IOMMU_F_DOMAIN_BITS, > VIRTIO_IOMMU_F_INPUT_RANGE, > + VIRTIO_IOMMU_F_PROBE, > }; > > static struct virtio_device_id id_table[] = { > diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h > index e808fc7fbe82..feed74586bb0 100644 > --- a/include/uapi/linux/virtio_iommu.h > +++ b/include/uapi/linux/virtio_iommu.h > @@ -14,6 +14,7 @@ > #define VIRTIO_IOMMU_F_DOMAIN_BITS 1 > #define VIRTIO_IOMMU_F_MAP_UNMAP 2 > #define VIRTIO_IOMMU_F_BYPASS 3 > +#define VIRTIO_IOMMU_F_PROBE 4 > > struct virtio_iommu_config { > /* Supported page sizes */ > @@ -25,6 +26,9 @@ struct virtio_iommu_config { > } input_range; > /* Max domain ID size */ > __u8 domain_bits; > + __u8 padding[3]; > + /* Probe buffer size */ Oh so you do add the padding in this patch, it's just a question of moving this line to the earlier patch then. > + __u32 probe_size; > }; > > /* Request types */ > @@ -32,6 +36,7 @@ struct virtio_iommu_config { > #define VIRTIO_IOMMU_T_DETACH 0x02 > #define VIRTIO_IOMMU_T_MAP 0x03 > #define VIRTIO_IOMMU_T_UNMAP 0x04 > +#define VIRTIO_IOMMU_T_PROBE 0x05 > > /* Status types */ > #define VIRTIO_IOMMU_S_OK 0x00 > @@ -98,4 +103,38 @@ struct virtio_iommu_req_unmap { > struct virtio_iommu_req_tail tail; > }; > > +#define VIRTIO_IOMMU_PROBE_T_NONE 0 > +#define VIRTIO_IOMMU_PROBE_T_RESV_MEM 1 > + > +#define VIRTIO_IOMMU_PROBE_T_MASK 0xfff > + > +struct virtio_iommu_probe_property { > + __le16 type; > + __le16 length; > +}; > + > +#define VIRTIO_IOMMU_RESV_MEM_T_RESERVED 0 > +#define VIRTIO_IOMMU_RESV_MEM_T_MSI 1 > + > +struct virtio_iommu_probe_resv_mem { > + struct virtio_iommu_probe_property head; > + __u8 subtype; > + __u8 reserved[3]; > + __le64 start; > + __le64 end; > +}; > + > +struct virtio_iommu_req_probe { > + struct virtio_iommu_req_head head; > + __le32 endpoint; > + __u8 reserved[64]; > + > + __u8 properties[]; > + > + /* > + * Tail follows the variable-length properties array. No padding, > + * property lengths are all aligned on 8 bytes. > + */ > +}; > + > #endif > -- > 2.19.1 From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-6.8 required=3.0 tests=HEADER_FROM_DIFFERENT_DOMAINS, INCLUDES_PATCH,MAILING_LIST_MULTI,SIGNED_OFF_BY,SPF_PASS,URIBL_BLOCKED autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 6E651C6783C for ; Fri, 12 Oct 2018 16:42:12 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 1D66021476 for ; Fri, 12 Oct 2018 16:42:12 +0000 (UTC) DMARC-Filter: OpenDMARC Filter v1.3.2 mail.kernel.org 1D66021476 Authentication-Results: mail.kernel.org; dmarc=fail (p=none dis=none) header.from=redhat.com Authentication-Results: mail.kernel.org; spf=none smtp.mailfrom=linux-pci-owner@vger.kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1725902AbeJMAPa (ORCPT ); Fri, 12 Oct 2018 20:15:30 -0400 Received: from mx1.redhat.com ([209.132.183.28]:47418 "EHLO mx1.redhat.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725854AbeJMAPa (ORCPT ); Fri, 12 Oct 2018 20:15:30 -0400 Received: from smtp.corp.redhat.com (int-mx03.intmail.prod.int.phx2.redhat.com [10.5.11.13]) (using TLSv1.2 with cipher AECDH-AES256-SHA (256/256 bits)) (No client certificate requested) by mx1.redhat.com (Postfix) with ESMTPS id 0649A30BAFD2; Fri, 12 Oct 2018 16:42:10 +0000 (UTC) Received: from redhat.com (ovpn-124-120.rdu2.redhat.com [10.10.124.120]) by smtp.corp.redhat.com (Postfix) with SMTP id E88AE6F977; Fri, 12 Oct 2018 16:42:04 +0000 (UTC) Date: Fri, 12 Oct 2018 12:42:04 -0400 From: "Michael S. Tsirkin" To: Jean-Philippe Brucker Cc: iommu@lists.linux-foundation.org, virtualization@lists.linux-foundation.org, devicetree@vger.kernel.org, linux-pci@vger.kernel.org, kvmarm@lists.cs.columbia.edu, peter.maydell@linaro.org, joro@8bytes.org, jasowang@redhat.com, robh+dt@kernel.org, mark.rutland@arm.com, eric.auger@redhat.com, tnowicki@caviumnetworks.com, kevin.tian@intel.com, marc.zyngier@arm.com, robin.murphy@arm.com, will.deacon@arm.com, lorenzo.pieralisi@arm.com Subject: Re: [PATCH v3 6/7] iommu/virtio: Add probe request Message-ID: <20181012123633-mutt-send-email-mst@kernel.org> References: <20181012145917.6840-1-jean-philippe.brucker@arm.com> <20181012145917.6840-7-jean-philippe.brucker@arm.com> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <20181012145917.6840-7-jean-philippe.brucker@arm.com> X-Scanned-By: MIMEDefang 2.79 on 10.5.11.13 X-Greylist: Sender IP whitelisted, not delayed by milter-greylist-4.5.16 (mx1.redhat.com [10.5.110.43]); Fri, 12 Oct 2018 16:42:10 +0000 (UTC) Sender: linux-pci-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-pci@vger.kernel.org On Fri, Oct 12, 2018 at 03:59:16PM +0100, Jean-Philippe Brucker wrote: > When the device offers the probe feature, send a probe request for each > device managed by the IOMMU. Extract RESV_MEM information. When we > encounter a MSI doorbell region, set it up as a IOMMU_RESV_MSI region. > This will tell other subsystems that there is no need to map the MSI > doorbell in the virtio-iommu, because MSIs bypass it. > > Signed-off-by: Jean-Philippe Brucker > --- > drivers/iommu/virtio-iommu.c | 147 ++++++++++++++++++++++++++++-- > include/uapi/linux/virtio_iommu.h | 39 ++++++++ > 2 files changed, 180 insertions(+), 6 deletions(-) > > diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c > index 9fb38cd3b727..8eaf66770469 100644 > --- a/drivers/iommu/virtio-iommu.c > +++ b/drivers/iommu/virtio-iommu.c > @@ -56,6 +56,7 @@ struct viommu_dev { > struct iommu_domain_geometry geometry; > u64 pgsize_bitmap; > u8 domain_bits; > + u32 probe_size; > }; > > struct viommu_mapping { > @@ -77,8 +78,10 @@ struct viommu_domain { > }; > > struct viommu_endpoint { > + struct device *dev; > struct viommu_dev *viommu; > struct viommu_domain *vdomain; > + struct list_head resv_regions; > }; > > struct viommu_request { > @@ -129,6 +132,9 @@ static off_t viommu_get_req_offset(struct viommu_dev *viommu, > { > size_t tail_size = sizeof(struct virtio_iommu_req_tail); > > + if (req->type == VIRTIO_IOMMU_T_PROBE) > + return len - viommu->probe_size - tail_size; > + > return len - tail_size; > } > > @@ -414,6 +420,101 @@ static int viommu_replay_mappings(struct viommu_domain *vdomain) > return ret; > } > > +static int viommu_add_resv_mem(struct viommu_endpoint *vdev, > + struct virtio_iommu_probe_resv_mem *mem, > + size_t len) > +{ > + struct iommu_resv_region *region = NULL; > + unsigned long prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; > + > + u64 start = le64_to_cpu(mem->start); > + u64 end = le64_to_cpu(mem->end); > + size_t size = end - start + 1; > + > + if (len < sizeof(*mem)) > + return -EINVAL; Maybe validate that size, end and start all make sense and e.g. fit within phys_addr_t and size_t? > + > + switch (mem->subtype) { > + default: > + dev_warn(vdev->dev, "unknown resv mem subtype 0x%x\n", > + mem->subtype); > + /* Fall-through */ > + case VIRTIO_IOMMU_RESV_MEM_T_RESERVED: > + region = iommu_alloc_resv_region(start, size, 0, > + IOMMU_RESV_RESERVED); > + break; > + case VIRTIO_IOMMU_RESV_MEM_T_MSI: > + region = iommu_alloc_resv_region(start, size, prot, > + IOMMU_RESV_MSI); > + break; > + } > + > + list_add(&vdev->resv_regions, ®ion->list); > + return 0; > +} > + > +static int viommu_probe_endpoint(struct viommu_dev *viommu, struct device *dev) > +{ > + int ret; > + u16 type, len; > + size_t cur = 0; > + size_t probe_len; > + struct virtio_iommu_req_probe *probe; > + struct virtio_iommu_probe_property *prop; > + struct iommu_fwspec *fwspec = dev->iommu_fwspec; > + struct viommu_endpoint *vdev = fwspec->iommu_priv; > + > + if (!fwspec->num_ids) > + return -EINVAL; > + > + probe_len = sizeof(*probe) + viommu->probe_size + > + sizeof(struct virtio_iommu_req_tail); > + probe = kzalloc(probe_len, GFP_KERNEL); > + if (!probe) > + return -ENOMEM; > + > + probe->head.type = VIRTIO_IOMMU_T_PROBE; > + /* > + * For now, assume that properties of an endpoint that outputs multiple > + * IDs are consistent. Only probe the first one. > + */ > + probe->endpoint = cpu_to_le32(fwspec->ids[0]); > + > + ret = viommu_send_req_sync(viommu, probe, probe_len); > + if (ret) > + goto out_free; > + > + prop = (void *)probe->properties; > + type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; > + > + while (type != VIRTIO_IOMMU_PROBE_T_NONE && > + cur < viommu->probe_size) { > + len = le16_to_cpu(prop->length) + sizeof(*prop); > + > + switch (type) { > + case VIRTIO_IOMMU_PROBE_T_RESV_MEM: > + ret = viommu_add_resv_mem(vdev, (void *)prop, len); > + break; > + default: > + dev_err(dev, "unknown viommu prop 0x%x\n", type); > + } > + > + if (ret) > + dev_err(dev, "failed to parse viommu prop 0x%x\n", type); > + > + cur += len; > + if (cur >= viommu->probe_size) > + break; > + > + prop = (void *)probe->properties + cur; > + type = le16_to_cpu(prop->type) & VIRTIO_IOMMU_PROBE_T_MASK; > + } > + > +out_free: > + kfree(probe); > + return ret; > +} > + > /* IOMMU API */ > > static struct iommu_domain *viommu_domain_alloc(unsigned type) > @@ -636,15 +737,33 @@ static void viommu_iotlb_sync(struct iommu_domain *domain) > > static void viommu_get_resv_regions(struct device *dev, struct list_head *head) > { > - struct iommu_resv_region *region; > + struct iommu_resv_region *entry, *new_entry, *msi = NULL; > + struct viommu_endpoint *vdev = dev->iommu_fwspec->iommu_priv; > int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; > > - region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, prot, > - IOMMU_RESV_SW_MSI); > - if (!region) > - return; > + list_for_each_entry(entry, &vdev->resv_regions, list) { > + /* > + * If the device registered a bypass MSI windows, use it. > + * Otherwise add a software-mapped region > + */ > + if (entry->type == IOMMU_RESV_MSI) > + msi = entry; > + > + new_entry = kmemdup(entry, sizeof(*entry), GFP_KERNEL); > + if (!new_entry) > + return; > + list_add_tail(&new_entry->list, head); > + } > + > + if (!msi) { > + msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, > + prot, IOMMU_RESV_SW_MSI); > + if (!msi) > + return; > + > + list_add_tail(&msi->list, head); > + } > > - list_add_tail(®ion->list, head); > iommu_dma_get_resv_regions(dev, head); > } > > @@ -692,9 +811,18 @@ static int viommu_add_device(struct device *dev) > if (!vdev) > return -ENOMEM; > > + vdev->dev = dev; > vdev->viommu = viommu; > + INIT_LIST_HEAD(&vdev->resv_regions); > fwspec->iommu_priv = vdev; > > + if (viommu->probe_size) { > + /* Get additional information for this endpoint */ > + ret = viommu_probe_endpoint(viommu, dev); > + if (ret) > + return ret; > + } > + > ret = iommu_device_link(&viommu->iommu, dev); > if (ret) > goto err_free_dev; > @@ -717,6 +845,7 @@ static int viommu_add_device(struct device *dev) > iommu_device_unlink(&viommu->iommu, dev); > > err_free_dev: > + viommu_put_resv_regions(dev, &vdev->resv_regions); > kfree(vdev); > > return ret; > @@ -734,6 +863,7 @@ static void viommu_remove_device(struct device *dev) > > iommu_group_remove_device(dev); > iommu_device_unlink(&vdev->viommu->iommu, dev); > + viommu_put_resv_regions(dev, &vdev->resv_regions); > kfree(vdev); > } > > @@ -832,6 +962,10 @@ static int viommu_probe(struct virtio_device *vdev) > struct virtio_iommu_config, domain_bits, > &viommu->domain_bits); > > + virtio_cread_feature(vdev, VIRTIO_IOMMU_F_PROBE, > + struct virtio_iommu_config, probe_size, > + &viommu->probe_size); > + > viommu->geometry = (struct iommu_domain_geometry) { > .aperture_start = input_start, > .aperture_end = input_end, > @@ -913,6 +1047,7 @@ static unsigned int features[] = { > VIRTIO_IOMMU_F_MAP_UNMAP, > VIRTIO_IOMMU_F_DOMAIN_BITS, > VIRTIO_IOMMU_F_INPUT_RANGE, > + VIRTIO_IOMMU_F_PROBE, > }; > > static struct virtio_device_id id_table[] = { > diff --git a/include/uapi/linux/virtio_iommu.h b/include/uapi/linux/virtio_iommu.h > index e808fc7fbe82..feed74586bb0 100644 > --- a/include/uapi/linux/virtio_iommu.h > +++ b/include/uapi/linux/virtio_iommu.h > @@ -14,6 +14,7 @@ > #define VIRTIO_IOMMU_F_DOMAIN_BITS 1 > #define VIRTIO_IOMMU_F_MAP_UNMAP 2 > #define VIRTIO_IOMMU_F_BYPASS 3 > +#define VIRTIO_IOMMU_F_PROBE 4 > > struct virtio_iommu_config { > /* Supported page sizes */ > @@ -25,6 +26,9 @@ struct virtio_iommu_config { > } input_range; > /* Max domain ID size */ > __u8 domain_bits; > + __u8 padding[3]; > + /* Probe buffer size */ Oh so you do add the padding in this patch, it's just a question of moving this line to the earlier patch then. > + __u32 probe_size; > }; > > /* Request types */ > @@ -32,6 +36,7 @@ struct virtio_iommu_config { > #define VIRTIO_IOMMU_T_DETACH 0x02 > #define VIRTIO_IOMMU_T_MAP 0x03 > #define VIRTIO_IOMMU_T_UNMAP 0x04 > +#define VIRTIO_IOMMU_T_PROBE 0x05 > > /* Status types */ > #define VIRTIO_IOMMU_S_OK 0x00 > @@ -98,4 +103,38 @@ struct virtio_iommu_req_unmap { > struct virtio_iommu_req_tail tail; > }; > > +#define VIRTIO_IOMMU_PROBE_T_NONE 0 > +#define VIRTIO_IOMMU_PROBE_T_RESV_MEM 1 > + > +#define VIRTIO_IOMMU_PROBE_T_MASK 0xfff > + > +struct virtio_iommu_probe_property { > + __le16 type; > + __le16 length; > +}; > + > +#define VIRTIO_IOMMU_RESV_MEM_T_RESERVED 0 > +#define VIRTIO_IOMMU_RESV_MEM_T_MSI 1 > + > +struct virtio_iommu_probe_resv_mem { > + struct virtio_iommu_probe_property head; > + __u8 subtype; > + __u8 reserved[3]; > + __le64 start; > + __le64 end; > +}; > + > +struct virtio_iommu_req_probe { > + struct virtio_iommu_req_head head; > + __le32 endpoint; > + __u8 reserved[64]; > + > + __u8 properties[]; > + > + /* > + * Tail follows the variable-length properties array. No padding, > + * property lengths are all aligned on 8 bytes. > + */ > +}; > + > #endif > -- > 2.19.1