All of lore.kernel.org
 help / color / mirror / Atom feed
From: Eric Auger <eric.auger@linaro.org>
To: eric.auger@st.com, eric.auger@linaro.org,
	alex.williamson@redhat.com, will.deacon@arm.com,
	christoffer.dall@linaro.org, marc.zyngier@arm.com,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: Bharat.Bhushan@freescale.com, pranav.sawargaonkar@gmail.com,
	p.fedin@samsung.com, suravee.suthikulpanit@amd.com,
	linux-kernel@vger.kernel.org, patches@linaro.org,
	iommu@lists.linux-foundation.org
Subject: [PATCH 10/10] vfio: allow the user to register reserved iova range for MSI mapping
Date: Tue, 26 Jan 2016 13:12:48 +0000	[thread overview]
Message-ID: <1453813968-2024-11-git-send-email-eric.auger@linaro.org> (raw)
In-Reply-To: <1453813968-2024-1-git-send-email-eric.auger@linaro.org>

The user is allowed to register a reserved IOVA range by using the
DMA MAP API and setting the new flag: VFIO_DMA_MAP_FLAG_MSI_RESERVED_IOVA.
It provides the base address and the size. This region is stored in the
vfio_dma rb tree. At that point the iova range is not mapped to any target
address yet. The host kernel will use those iova when needed, typically
when the VFIO-PCI device allocates its MSI's.

This patch also handles the destruction of the reserved binding RB-tree and
domain's iova_domains.

Signed-off-by: Eric Auger <eric.auger@linaro.org>
Signed-off-by: Bharat Bhushan <Bharat.Bhushan@freescale.com>

---

- Currently the user is not yet informed about the number of pages to
  provide

RFC v1 -> RFC v2:
- takes into account Alex comments, based on
  [RFC PATCH 1/6] vfio: Add interface for add/del reserved iova region:
- use the existing dma map/unmap ioctl interface with a flag to register
  a reserved IOVA range. A single reserved iova region is allowed.
---
 drivers/vfio/vfio_iommu_type1.c | 98 ++++++++++++++++++++++++++++++++++++++++-
 include/uapi/linux/vfio.h       |  9 ++++
 2 files changed, 106 insertions(+), 1 deletion(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2f085d3..37c7d78 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -538,10 +538,40 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
 	vfio_lock_acct(-unlocked);
 }
 
+/* vfio_unmap_reserved: unmap and free all reserved binding nodes
+ * for all domains and destroy their iova_domain
+ *
+ * @iommu: iommu handle
+ */
+static void vfio_unmap_reserved(struct vfio_iommu *iommu)
+{
+	struct vfio_domain *d;
+
+	list_for_each_entry(d, &iommu->domain_list, next) {
+		struct rb_node *node;
+
+		while ((node = rb_first(&d->reserved_binding_list))) {
+			struct vfio_reserved_binding *b =
+				rb_entry(node,
+					 struct vfio_reserved_binding, node);
+
+			while (!kref_put(&b->kref,
+				vfio_reserved_binding_release)) {
+			}
+		}
+		d->reserved_binding_list = RB_ROOT;
+
+		put_iova_domain(d->reserved_iova_domain);
+		kfree(d->reserved_iova_domain);
+	}
+}
+
 static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
 {
 	if (likely(dma->type != VFIO_IOVA_RESERVED))
 		vfio_unmap_unpin(iommu, dma);
+	else
+		vfio_unmap_reserved(iommu);
 	vfio_unlink_dma(iommu, dma);
 	kfree(dma);
 }
@@ -785,6 +815,68 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
 	return ret;
 }
 
+static int vfio_register_reserved_iova_range(struct vfio_iommu *iommu,
+			   struct vfio_iommu_type1_dma_map *map)
+{
+	dma_addr_t iova = map->iova;
+	size_t size = map->size;
+	uint64_t mask;
+	struct vfio_dma *dma;
+	int ret = 0;
+	struct vfio_domain *d;
+	unsigned long order;
+
+	/* Verify that none of our __u64 fields overflow */
+	if (map->size != size || map->iova != iova)
+		return -EINVAL;
+
+	order =  __ffs(vfio_pgsize_bitmap(iommu));
+	mask = ((uint64_t)1 << order) - 1;
+
+	WARN_ON(mask & PAGE_MASK);
+
+	/* we currently only support MSI_RESERVED_IOVA */
+	if (!(map->flags & VFIO_DMA_MAP_FLAG_MSI_RESERVED_IOVA))
+		return -EINVAL;
+
+	if (!size || (size | iova) & mask)
+		return -EINVAL;
+
+	/* Don't allow IOVA address wrap */
+	if (iova + size - 1 < iova)
+		return -EINVAL;
+
+	mutex_lock(&iommu->lock);
+
+	/* check if the iova domain has not been instantiated already*/
+	d = list_first_entry(&iommu->domain_list,
+				  struct vfio_domain, next);
+
+	if (d->reserved_iova_domain || vfio_find_dma(iommu, iova, size)) {
+		ret =  -EEXIST;
+		goto out;
+	}
+
+	dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+	if (!dma) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	dma->iova = iova;
+	dma->size = size;
+	dma->type = VFIO_IOVA_RESERVED;
+
+	vfio_link_dma(iommu, dma);
+
+	list_for_each_entry(d, &iommu->domain_list, next)
+		alloc_reserved_iova_domain(d, iova, size, order);
+
+out:
+	mutex_unlock(&iommu->lock);
+	return ret;
+}
+
 static int vfio_bus_type(struct device *dev, void *data)
 {
 	struct bus_type **bus = data;
@@ -1297,7 +1389,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
 	} else if (cmd == VFIO_IOMMU_MAP_DMA) {
 		struct vfio_iommu_type1_dma_map map;
 		uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
-				VFIO_DMA_MAP_FLAG_WRITE;
+				VFIO_DMA_MAP_FLAG_WRITE |
+				VFIO_DMA_MAP_FLAG_MSI_RESERVED_IOVA;
 
 		minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
 
@@ -1307,6 +1400,9 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
 		if (map.argsz < minsz || map.flags & ~mask)
 			return -EINVAL;
 
+		if (map.flags & VFIO_DMA_MAP_FLAG_MSI_RESERVED_IOVA)
+			return vfio_register_reserved_iova_range(iommu, &map);
+
 		return vfio_dma_do_map(iommu, &map);
 
 	} else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 43e183b..982e326 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -411,12 +411,21 @@ struct vfio_iommu_type1_info {
  *
  * Map process virtual addresses to IO virtual addresses using the
  * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
+ *
+ * In case MSI_RESERVED_IOVA is set, the API only aims at registering an IOVA
+ * region which will be used on some platforms to map the host MSI frame.
+ * in that specific case, vaddr and prot are ignored. The requirement for
+ * provisioning such IOVA range can be checked by calling VFIO_IOMMU_GET_INFO
+ * with the VFIO_IOMMU_INFO_REQUIRE_MSI_MAP attribute. A single
+ * MSI_RESERVED_IOVA region can be registered
  */
 struct vfio_iommu_type1_dma_map {
 	__u32	argsz;
 	__u32	flags;
 #define VFIO_DMA_MAP_FLAG_READ (1 << 0)		/* readable from device */
 #define VFIO_DMA_MAP_FLAG_WRITE (1 << 1)	/* writable from device */
+/* reserved iova for MSI vectors*/
+#define VFIO_DMA_MAP_FLAG_MSI_RESERVED_IOVA (1 << 2)
 	__u64	vaddr;				/* Process virtual address */
 	__u64	iova;				/* IO virtual address */
 	__u64	size;				/* Size of mapping (bytes) */
-- 
1.9.1

WARNING: multiple messages have this Message-ID (diff)
From: Eric Auger <eric.auger@linaro.org>
To: eric.auger@st.com, eric.auger@linaro.org,
	alex.williamson@redhat.com, will.deacon@arm.com,
	christoffer.dall@linaro.org, marc.zyngier@arm.com,
	linux-arm-kernel@lists.infradead.org,
	kvmarm@lists.cs.columbia.edu, kvm@vger.kernel.org
Cc: patches@linaro.org, linux-kernel@vger.kernel.org,
	iommu@lists.linux-foundation.org
Subject: [PATCH 10/10] vfio: allow the user to register reserved iova range for MSI mapping
Date: Tue, 26 Jan 2016 13:12:48 +0000	[thread overview]
Message-ID: <1453813968-2024-11-git-send-email-eric.auger@linaro.org> (raw)
In-Reply-To: <1453813968-2024-1-git-send-email-eric.auger@linaro.org>

The user is allowed to register a reserved IOVA range by using the
DMA MAP API and setting the new flag: VFIO_DMA_MAP_FLAG_MSI_RESERVED_IOVA.
It provides the base address and the size. This region is stored in the
vfio_dma rb tree. At that point the iova range is not mapped to any target
address yet. The host kernel will use those iova when needed, typically
when the VFIO-PCI device allocates its MSI's.

This patch also handles the destruction of the reserved binding RB-tree and
domain's iova_domains.

Signed-off-by: Eric Auger <eric.auger@linaro.org>
Signed-off-by: Bharat Bhushan <Bharat.Bhushan@freescale.com>

---

- Currently the user is not yet informed about the number of pages to
  provide

RFC v1 -> RFC v2:
- takes into account Alex comments, based on
  [RFC PATCH 1/6] vfio: Add interface for add/del reserved iova region:
- use the existing dma map/unmap ioctl interface with a flag to register
  a reserved IOVA range. A single reserved iova region is allowed.
---
 drivers/vfio/vfio_iommu_type1.c | 98 ++++++++++++++++++++++++++++++++++++++++-
 include/uapi/linux/vfio.h       |  9 ++++
 2 files changed, 106 insertions(+), 1 deletion(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2f085d3..37c7d78 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -538,10 +538,40 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
 	vfio_lock_acct(-unlocked);
 }
 
+/* vfio_unmap_reserved: unmap and free all reserved binding nodes
+ * for all domains and destroy their iova_domain
+ *
+ * @iommu: iommu handle
+ */
+static void vfio_unmap_reserved(struct vfio_iommu *iommu)
+{
+	struct vfio_domain *d;
+
+	list_for_each_entry(d, &iommu->domain_list, next) {
+		struct rb_node *node;
+
+		while ((node = rb_first(&d->reserved_binding_list))) {
+			struct vfio_reserved_binding *b =
+				rb_entry(node,
+					 struct vfio_reserved_binding, node);
+
+			while (!kref_put(&b->kref,
+				vfio_reserved_binding_release)) {
+			}
+		}
+		d->reserved_binding_list = RB_ROOT;
+
+		put_iova_domain(d->reserved_iova_domain);
+		kfree(d->reserved_iova_domain);
+	}
+}
+
 static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
 {
 	if (likely(dma->type != VFIO_IOVA_RESERVED))
 		vfio_unmap_unpin(iommu, dma);
+	else
+		vfio_unmap_reserved(iommu);
 	vfio_unlink_dma(iommu, dma);
 	kfree(dma);
 }
@@ -785,6 +815,68 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
 	return ret;
 }
 
+static int vfio_register_reserved_iova_range(struct vfio_iommu *iommu,
+			   struct vfio_iommu_type1_dma_map *map)
+{
+	dma_addr_t iova = map->iova;
+	size_t size = map->size;
+	uint64_t mask;
+	struct vfio_dma *dma;
+	int ret = 0;
+	struct vfio_domain *d;
+	unsigned long order;
+
+	/* Verify that none of our __u64 fields overflow */
+	if (map->size != size || map->iova != iova)
+		return -EINVAL;
+
+	order =  __ffs(vfio_pgsize_bitmap(iommu));
+	mask = ((uint64_t)1 << order) - 1;
+
+	WARN_ON(mask & PAGE_MASK);
+
+	/* we currently only support MSI_RESERVED_IOVA */
+	if (!(map->flags & VFIO_DMA_MAP_FLAG_MSI_RESERVED_IOVA))
+		return -EINVAL;
+
+	if (!size || (size | iova) & mask)
+		return -EINVAL;
+
+	/* Don't allow IOVA address wrap */
+	if (iova + size - 1 < iova)
+		return -EINVAL;
+
+	mutex_lock(&iommu->lock);
+
+	/* check if the iova domain has not been instantiated already*/
+	d = list_first_entry(&iommu->domain_list,
+				  struct vfio_domain, next);
+
+	if (d->reserved_iova_domain || vfio_find_dma(iommu, iova, size)) {
+		ret =  -EEXIST;
+		goto out;
+	}
+
+	dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+	if (!dma) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	dma->iova = iova;
+	dma->size = size;
+	dma->type = VFIO_IOVA_RESERVED;
+
+	vfio_link_dma(iommu, dma);
+
+	list_for_each_entry(d, &iommu->domain_list, next)
+		alloc_reserved_iova_domain(d, iova, size, order);
+
+out:
+	mutex_unlock(&iommu->lock);
+	return ret;
+}
+
 static int vfio_bus_type(struct device *dev, void *data)
 {
 	struct bus_type **bus = data;
@@ -1297,7 +1389,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
 	} else if (cmd == VFIO_IOMMU_MAP_DMA) {
 		struct vfio_iommu_type1_dma_map map;
 		uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
-				VFIO_DMA_MAP_FLAG_WRITE;
+				VFIO_DMA_MAP_FLAG_WRITE |
+				VFIO_DMA_MAP_FLAG_MSI_RESERVED_IOVA;
 
 		minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
 
@@ -1307,6 +1400,9 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
 		if (map.argsz < minsz || map.flags & ~mask)
 			return -EINVAL;
 
+		if (map.flags & VFIO_DMA_MAP_FLAG_MSI_RESERVED_IOVA)
+			return vfio_register_reserved_iova_range(iommu, &map);
+
 		return vfio_dma_do_map(iommu, &map);
 
 	} else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 43e183b..982e326 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -411,12 +411,21 @@ struct vfio_iommu_type1_info {
  *
  * Map process virtual addresses to IO virtual addresses using the
  * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
+ *
+ * In case MSI_RESERVED_IOVA is set, the API only aims at registering an IOVA
+ * region which will be used on some platforms to map the host MSI frame.
+ * in that specific case, vaddr and prot are ignored. The requirement for
+ * provisioning such IOVA range can be checked by calling VFIO_IOMMU_GET_INFO
+ * with the VFIO_IOMMU_INFO_REQUIRE_MSI_MAP attribute. A single
+ * MSI_RESERVED_IOVA region can be registered
  */
 struct vfio_iommu_type1_dma_map {
 	__u32	argsz;
 	__u32	flags;
 #define VFIO_DMA_MAP_FLAG_READ (1 << 0)		/* readable from device */
 #define VFIO_DMA_MAP_FLAG_WRITE (1 << 1)	/* writable from device */
+/* reserved iova for MSI vectors*/
+#define VFIO_DMA_MAP_FLAG_MSI_RESERVED_IOVA (1 << 2)
 	__u64	vaddr;				/* Process virtual address */
 	__u64	iova;				/* IO virtual address */
 	__u64	size;				/* Size of mapping (bytes) */
-- 
1.9.1

WARNING: multiple messages have this Message-ID (diff)
From: eric.auger@linaro.org (Eric Auger)
To: linux-arm-kernel@lists.infradead.org
Subject: [PATCH 10/10] vfio: allow the user to register reserved iova range for MSI mapping
Date: Tue, 26 Jan 2016 13:12:48 +0000	[thread overview]
Message-ID: <1453813968-2024-11-git-send-email-eric.auger@linaro.org> (raw)
In-Reply-To: <1453813968-2024-1-git-send-email-eric.auger@linaro.org>

The user is allowed to register a reserved IOVA range by using the
DMA MAP API and setting the new flag: VFIO_DMA_MAP_FLAG_MSI_RESERVED_IOVA.
It provides the base address and the size. This region is stored in the
vfio_dma rb tree. At that point the iova range is not mapped to any target
address yet. The host kernel will use those iova when needed, typically
when the VFIO-PCI device allocates its MSI's.

This patch also handles the destruction of the reserved binding RB-tree and
domain's iova_domains.

Signed-off-by: Eric Auger <eric.auger@linaro.org>
Signed-off-by: Bharat Bhushan <Bharat.Bhushan@freescale.com>

---

- Currently the user is not yet informed about the number of pages to
  provide

RFC v1 -> RFC v2:
- takes into account Alex comments, based on
  [RFC PATCH 1/6] vfio: Add interface for add/del reserved iova region:
- use the existing dma map/unmap ioctl interface with a flag to register
  a reserved IOVA range. A single reserved iova region is allowed.
---
 drivers/vfio/vfio_iommu_type1.c | 98 ++++++++++++++++++++++++++++++++++++++++-
 include/uapi/linux/vfio.h       |  9 ++++
 2 files changed, 106 insertions(+), 1 deletion(-)

diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2f085d3..37c7d78 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -538,10 +538,40 @@ static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma)
 	vfio_lock_acct(-unlocked);
 }
 
+/* vfio_unmap_reserved: unmap and free all reserved binding nodes
+ * for all domains and destroy their iova_domain
+ *
+ * @iommu: iommu handle
+ */
+static void vfio_unmap_reserved(struct vfio_iommu *iommu)
+{
+	struct vfio_domain *d;
+
+	list_for_each_entry(d, &iommu->domain_list, next) {
+		struct rb_node *node;
+
+		while ((node = rb_first(&d->reserved_binding_list))) {
+			struct vfio_reserved_binding *b =
+				rb_entry(node,
+					 struct vfio_reserved_binding, node);
+
+			while (!kref_put(&b->kref,
+				vfio_reserved_binding_release)) {
+			}
+		}
+		d->reserved_binding_list = RB_ROOT;
+
+		put_iova_domain(d->reserved_iova_domain);
+		kfree(d->reserved_iova_domain);
+	}
+}
+
 static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma)
 {
 	if (likely(dma->type != VFIO_IOVA_RESERVED))
 		vfio_unmap_unpin(iommu, dma);
+	else
+		vfio_unmap_reserved(iommu);
 	vfio_unlink_dma(iommu, dma);
 	kfree(dma);
 }
@@ -785,6 +815,68 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu,
 	return ret;
 }
 
+static int vfio_register_reserved_iova_range(struct vfio_iommu *iommu,
+			   struct vfio_iommu_type1_dma_map *map)
+{
+	dma_addr_t iova = map->iova;
+	size_t size = map->size;
+	uint64_t mask;
+	struct vfio_dma *dma;
+	int ret = 0;
+	struct vfio_domain *d;
+	unsigned long order;
+
+	/* Verify that none of our __u64 fields overflow */
+	if (map->size != size || map->iova != iova)
+		return -EINVAL;
+
+	order =  __ffs(vfio_pgsize_bitmap(iommu));
+	mask = ((uint64_t)1 << order) - 1;
+
+	WARN_ON(mask & PAGE_MASK);
+
+	/* we currently only support MSI_RESERVED_IOVA */
+	if (!(map->flags & VFIO_DMA_MAP_FLAG_MSI_RESERVED_IOVA))
+		return -EINVAL;
+
+	if (!size || (size | iova) & mask)
+		return -EINVAL;
+
+	/* Don't allow IOVA address wrap */
+	if (iova + size - 1 < iova)
+		return -EINVAL;
+
+	mutex_lock(&iommu->lock);
+
+	/* check if the iova domain has not been instantiated already*/
+	d = list_first_entry(&iommu->domain_list,
+				  struct vfio_domain, next);
+
+	if (d->reserved_iova_domain || vfio_find_dma(iommu, iova, size)) {
+		ret =  -EEXIST;
+		goto out;
+	}
+
+	dma = kzalloc(sizeof(*dma), GFP_KERNEL);
+	if (!dma) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	dma->iova = iova;
+	dma->size = size;
+	dma->type = VFIO_IOVA_RESERVED;
+
+	vfio_link_dma(iommu, dma);
+
+	list_for_each_entry(d, &iommu->domain_list, next)
+		alloc_reserved_iova_domain(d, iova, size, order);
+
+out:
+	mutex_unlock(&iommu->lock);
+	return ret;
+}
+
 static int vfio_bus_type(struct device *dev, void *data)
 {
 	struct bus_type **bus = data;
@@ -1297,7 +1389,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
 	} else if (cmd == VFIO_IOMMU_MAP_DMA) {
 		struct vfio_iommu_type1_dma_map map;
 		uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
-				VFIO_DMA_MAP_FLAG_WRITE;
+				VFIO_DMA_MAP_FLAG_WRITE |
+				VFIO_DMA_MAP_FLAG_MSI_RESERVED_IOVA;
 
 		minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
 
@@ -1307,6 +1400,9 @@ static long vfio_iommu_type1_ioctl(void *iommu_data,
 		if (map.argsz < minsz || map.flags & ~mask)
 			return -EINVAL;
 
+		if (map.flags & VFIO_DMA_MAP_FLAG_MSI_RESERVED_IOVA)
+			return vfio_register_reserved_iova_range(iommu, &map);
+
 		return vfio_dma_do_map(iommu, &map);
 
 	} else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 43e183b..982e326 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -411,12 +411,21 @@ struct vfio_iommu_type1_info {
  *
  * Map process virtual addresses to IO virtual addresses using the
  * provided struct vfio_dma_map. Caller sets argsz. READ &/ WRITE required.
+ *
+ * In case MSI_RESERVED_IOVA is set, the API only aims@registering an IOVA
+ * region which will be used on some platforms to map the host MSI frame.
+ * in that specific case, vaddr and prot are ignored. The requirement for
+ * provisioning such IOVA range can be checked by calling VFIO_IOMMU_GET_INFO
+ * with the VFIO_IOMMU_INFO_REQUIRE_MSI_MAP attribute. A single
+ * MSI_RESERVED_IOVA region can be registered
  */
 struct vfio_iommu_type1_dma_map {
 	__u32	argsz;
 	__u32	flags;
 #define VFIO_DMA_MAP_FLAG_READ (1 << 0)		/* readable from device */
 #define VFIO_DMA_MAP_FLAG_WRITE (1 << 1)	/* writable from device */
+/* reserved iova for MSI vectors*/
+#define VFIO_DMA_MAP_FLAG_MSI_RESERVED_IOVA (1 << 2)
 	__u64	vaddr;				/* Process virtual address */
 	__u64	iova;				/* IO virtual address */
 	__u64	size;				/* Size of mapping (bytes) */
-- 
1.9.1

  parent reply	other threads:[~2016-01-26 13:13 UTC|newest]

Thread overview: 99+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-01-26 13:12 [PATCH 00/10] KVM PCIe/MSI passthrough on ARM/ARM64 Eric Auger
2016-01-26 13:12 ` Eric Auger
2016-01-26 13:12 ` Eric Auger
2016-01-26 13:12 ` [PATCH 01/10] iommu: Add DOMAIN_ATTR_MSI_MAPPING attribute Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 13:12 ` [PATCH 02/10] vfio: expose MSI mapping requirement through VFIO_IOMMU_GET_INFO Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 13:12 ` [PATCH 03/10] vfio_iommu_type1: add reserved binding RB tree management Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 13:12 ` [PATCH 04/10] vfio: introduce VFIO_IOVA_RESERVED vfio_dma type Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 13:12 ` [PATCH 05/10] vfio/type1: attach a reserved iova domain to vfio_domain Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 13:12 ` [PATCH 06/10] vfio: introduce vfio_group_alloc_map_/unmap_free_reserved_iova Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 16:17   ` kbuild test robot
2016-01-26 16:17     ` kbuild test robot
2016-01-26 16:17     ` kbuild test robot
2016-01-26 16:17     ` kbuild test robot
2016-01-26 16:37     ` Eric Auger
2016-01-26 16:37       ` Eric Auger
2016-01-26 16:37       ` Eric Auger
2016-01-26 13:12 ` [PATCH 07/10] vfio: pci: cache the vfio_group in vfio_pci_device Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 13:12 ` [PATCH 08/10] vfio: introduce vfio_group_require_msi_mapping Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 13:12 ` [PATCH 09/10] vfio-pci: create an iommu mapping for msi address Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 14:43   ` kbuild test robot
2016-01-26 14:43     ` kbuild test robot
2016-01-26 14:43     ` kbuild test robot
2016-01-26 14:43     ` kbuild test robot
2016-01-26 15:14     ` Eric Auger
2016-01-26 15:14       ` Eric Auger
2016-01-26 15:14       ` Eric Auger
2016-01-26 13:12 ` Eric Auger [this message]
2016-01-26 13:12   ` [PATCH 10/10] vfio: allow the user to register reserved iova range for MSI mapping Eric Auger
2016-01-26 13:12   ` Eric Auger
2016-01-26 16:42   ` kbuild test robot
2016-01-26 16:42     ` kbuild test robot
2016-01-26 16:42     ` kbuild test robot
2016-01-26 18:32   ` kbuild test robot
2016-01-26 18:32     ` kbuild test robot
2016-01-26 18:32     ` kbuild test robot
2016-01-26 18:32     ` kbuild test robot
2016-01-26 17:25 ` [PATCH 00/10] KVM PCIe/MSI passthrough on ARM/ARM64 Pavel Fedin
2016-01-26 17:25   ` Pavel Fedin
2016-01-26 17:25   ` Pavel Fedin
2016-01-27  8:52   ` Eric Auger
2016-01-27  8:52     ` Eric Auger
2016-01-27  8:52     ` Eric Auger
2016-01-28  7:13     ` Pavel Fedin
2016-01-28  7:13       ` Pavel Fedin
2016-01-28  7:13       ` Pavel Fedin
2016-01-28  9:50       ` Eric Auger
2016-01-28  9:50         ` Eric Auger
2016-01-28 21:51 ` Alex Williamson
2016-01-28 21:51   ` Alex Williamson
2016-01-28 21:51   ` Alex Williamson
2016-01-29 14:35   ` Eric Auger
2016-01-29 14:35     ` Eric Auger
2016-01-29 14:35     ` Eric Auger
2016-01-29 19:33     ` Alex Williamson
2016-01-29 19:33       ` Alex Williamson
2016-01-29 21:25       ` Eric Auger
2016-01-29 21:25         ` Eric Auger
2016-01-29 21:25         ` Eric Auger
2016-02-01 14:03         ` Will Deacon
2016-02-01 14:03           ` Will Deacon
2016-02-01 14:03           ` Will Deacon
2016-02-03 12:50           ` Christoffer Dall
2016-02-03 12:50             ` Christoffer Dall
2016-02-03 12:50             ` Christoffer Dall
2016-02-03 13:10             ` Will Deacon
2016-02-03 13:10               ` Will Deacon
2016-02-03 13:10               ` Will Deacon
2016-02-03 15:36               ` Christoffer Dall
2016-02-03 15:36                 ` Christoffer Dall
2016-02-03 15:36                 ` Christoffer Dall
2016-02-05 17:32                 ` ARM PCI/MSI KVM passthrough with GICv2M Eric Auger
2016-02-05 17:32                   ` Eric Auger
2016-02-05 18:17                   ` Alex Williamson
2016-02-05 18:17                     ` Alex Williamson
2016-02-05 18:17                     ` Alex Williamson
2016-02-08  9:48                     ` Christoffer Dall
2016-02-08  9:48                       ` Christoffer Dall
2016-02-08  9:48                       ` Christoffer Dall
2016-02-08 13:27                       ` Eric Auger
2016-02-08 13:27                         ` Eric Auger
2016-02-08 13:27                         ` Eric Auger

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1453813968-2024-11-git-send-email-eric.auger@linaro.org \
    --to=eric.auger@linaro.org \
    --cc=Bharat.Bhushan@freescale.com \
    --cc=alex.williamson@redhat.com \
    --cc=christoffer.dall@linaro.org \
    --cc=eric.auger@st.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=kvm@vger.kernel.org \
    --cc=kvmarm@lists.cs.columbia.edu \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=marc.zyngier@arm.com \
    --cc=p.fedin@samsung.com \
    --cc=patches@linaro.org \
    --cc=pranav.sawargaonkar@gmail.com \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=will.deacon@arm.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.