All of lore.kernel.org
 help / color / mirror / Atom feed
From: Joao Martins <joao.m.martins@oracle.com>
To: iommu@lists.linux-foundation.org
Cc: Joao Martins <joao.m.martins@oracle.com>,
	Joerg Roedel <joro@8bytes.org>,
	Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>,
	Will Deacon <will@kernel.org>,
	Robin Murphy <robin.murphy@arm.com>,
	Jean-Philippe Brucker <jean-philippe@linaro.org>,
	Keqian Zhu <zhukeqian1@huawei.com>,
	Shameerali Kolothum Thodi  <shameerali.kolothum.thodi@huawei.com>,
	David Woodhouse <dwmw2@infradead.org>,
	Lu Baolu <baolu.lu@linux.intel.com>,
	Jason Gunthorpe <jgg@nvidia.com>,
	Nicolin Chen <nicolinc@nvidia.com>,
	Yishai Hadas <yishaih@nvidia.com>,
	Kevin Tian <kevin.tian@intel.com>,
	Eric Auger <eric.auger@redhat.com>, Yi Liu <yi.l.liu@intel.com>,
	Alex Williamson <alex.williamson@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	kvm@vger.kernel.org
Subject: [PATCH RFC 07/19] iommufd/vfio-compat: Dirty tracking IOCTLs compatibility
Date: Thu, 28 Apr 2022 22:09:21 +0100	[thread overview]
Message-ID: <20220428210933.3583-8-joao.m.martins@oracle.com> (raw)
In-Reply-To: <20220428210933.3583-1-joao.m.martins@oracle.com>

Add the correspondent APIs for performing VFIO dirty tracking,
particularly VFIO_IOMMU_DIRTY_PAGES ioctl subcmds:
* VFIO_IOMMU_DIRTY_PAGES_FLAG_START: Start dirty tracking and allocates
				     the area @dirty_bitmap
* VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP: Stop dirty tracking and frees
				    the area @dirty_bitmap
* VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP: Fetch dirty bitmap while dirty
tracking is active.

Advertise the VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION
whereas it gets set the domain configured page size the same as
iopt::iova_alignment and maximum dirty bitmap size same
as VFIO. Compared to VFIO type1 iommu, the perpectual dirtying is
not implemented and userspace gets -EOPNOTSUPP which is handled by
today's userspace.

Move iommufd_get_pagesizes() definition prior to unmap for
iommufd_vfio_unmap_dma() dirty support to validate the user bitmap page
size against IOPT pagesize.

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
---
 drivers/iommu/iommufd/vfio_compat.c | 221 ++++++++++++++++++++++++++--
 1 file changed, 209 insertions(+), 12 deletions(-)

diff --git a/drivers/iommu/iommufd/vfio_compat.c b/drivers/iommu/iommufd/vfio_compat.c
index dbe39404a105..2802f49cc10d 100644
--- a/drivers/iommu/iommufd/vfio_compat.c
+++ b/drivers/iommu/iommufd/vfio_compat.c
@@ -56,6 +56,16 @@ create_compat_ioas(struct iommufd_ctx *ictx)
 	return ioas;
 }
 
+static u64 iommufd_get_pagesizes(struct iommufd_ioas *ioas)
+{
+	/* FIXME: See vfio_update_pgsize_bitmap(), for compat this should return
+	 * the high bits too, and we need to decide if we should report that
+	 * iommufd supports less than PAGE_SIZE alignment or stick to strict
+	 * compatibility. qemu only cares about the first set bit.
+	 */
+	return ioas->iopt.iova_alignment;
+}
+
 int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd)
 {
 	struct iommu_vfio_ioas *cmd = ucmd->cmd;
@@ -130,9 +140,14 @@ static int iommufd_vfio_unmap_dma(struct iommufd_ctx *ictx, unsigned int cmd,
 				  void __user *arg)
 {
 	size_t minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
-	u32 supported_flags = VFIO_DMA_UNMAP_FLAG_ALL;
+	u32 supported_flags = VFIO_DMA_UNMAP_FLAG_ALL |
+		VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP;
+	struct iommufd_dirty_data dirty, *dirtyp = NULL;
 	struct vfio_iommu_type1_dma_unmap unmap;
+	struct vfio_bitmap bitmap;
 	struct iommufd_ioas *ioas;
+	unsigned long pgshift;
+	size_t pgsize;
 	int rc;
 
 	if (copy_from_user(&unmap, arg, minsz))
@@ -141,14 +156,53 @@ static int iommufd_vfio_unmap_dma(struct iommufd_ctx *ictx, unsigned int cmd,
 	if (unmap.argsz < minsz || unmap.flags & ~supported_flags)
 		return -EINVAL;
 
+	if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
+		unsigned long npages;
+
+		if (copy_from_user(&bitmap,
+				   (void __user *)(arg + minsz),
+				   sizeof(bitmap)))
+			return -EFAULT;
+
+		if (!access_ok((void __user *)bitmap.data, bitmap.size))
+			return -EINVAL;
+
+		pgshift = __ffs(bitmap.pgsize);
+		npages = unmap.size >> pgshift;
+
+		if (!npages || !bitmap.size ||
+		    (bitmap.size > DIRTY_BITMAP_SIZE_MAX) ||
+		    (bitmap.size < dirty_bitmap_bytes(npages)))
+			return -EINVAL;
+
+		dirty.iova = unmap.iova;
+		dirty.length = unmap.size;
+		dirty.data = bitmap.data;
+		dirty.page_size = 1 << pgshift;
+		dirtyp = &dirty;
+	}
+
 	ioas = get_compat_ioas(ictx);
 	if (IS_ERR(ioas))
 		return PTR_ERR(ioas);
 
+	pgshift = __ffs(iommufd_get_pagesizes(ioas)),
+	pgsize = (size_t)1 << pgshift;
+
+	/* When dirty tracking is enabled, allow only min supported pgsize */
+	if ((unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) &&
+	    (bitmap.pgsize != pgsize)) {
+		rc = -EINVAL;
+		goto out_put;
+	}
+
 	if (unmap.flags & VFIO_DMA_UNMAP_FLAG_ALL)
 		rc = iopt_unmap_all(&ioas->iopt);
 	else
-		rc = iopt_unmap_iova(&ioas->iopt, unmap.iova, unmap.size, NULL);
+		rc = iopt_unmap_iova(&ioas->iopt, unmap.iova, unmap.size,
+				     dirtyp);
+
+out_put:
 	iommufd_put_object(&ioas->obj);
 	return rc;
 }
@@ -222,16 +276,6 @@ static int iommufd_vfio_set_iommu(struct iommufd_ctx *ictx, unsigned long type)
 	return 0;
 }
 
-static u64 iommufd_get_pagesizes(struct iommufd_ioas *ioas)
-{
-	/* FIXME: See vfio_update_pgsize_bitmap(), for compat this should return
-	 * the high bits too, and we need to decide if we should report that
-	 * iommufd supports less than PAGE_SIZE alignment or stick to strict
-	 * compatibility. qemu only cares about the first set bit.
-	 */
-	return ioas->iopt.iova_alignment;
-}
-
 static int iommufd_fill_cap_iova(struct iommufd_ioas *ioas,
 				 struct vfio_info_cap_header __user *cur,
 				 size_t avail)
@@ -289,6 +333,26 @@ static int iommufd_fill_cap_dma_avail(struct iommufd_ioas *ioas,
 	return sizeof(cap_dma);
 }
 
+static int iommufd_fill_cap_migration(struct iommufd_ioas *ioas,
+				      struct vfio_info_cap_header __user *cur,
+				      size_t avail)
+{
+	struct vfio_iommu_type1_info_cap_migration cap_mig = {
+		.header = {
+			.id = VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION,
+			.version = 1,
+		},
+		.flags = 0,
+		.pgsize_bitmap = (size_t) 1 << __ffs(iommufd_get_pagesizes(ioas)),
+		.max_dirty_bitmap_size = DIRTY_BITMAP_SIZE_MAX,
+	};
+
+	if (avail >= sizeof(cap_mig) &&
+	    copy_to_user(cur, &cap_mig, sizeof(cap_mig)))
+		return -EFAULT;
+	return sizeof(cap_mig);
+}
+
 static int iommufd_vfio_iommu_get_info(struct iommufd_ctx *ictx,
 				       void __user *arg)
 {
@@ -298,6 +362,7 @@ static int iommufd_vfio_iommu_get_info(struct iommufd_ctx *ictx,
 	static const fill_cap_fn fill_fns[] = {
 		iommufd_fill_cap_iova,
 		iommufd_fill_cap_dma_avail,
+		iommufd_fill_cap_migration,
 	};
 	size_t minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
 	struct vfio_info_cap_header __user *last_cap = NULL;
@@ -364,6 +429,137 @@ static int iommufd_vfio_iommu_get_info(struct iommufd_ctx *ictx,
 	return rc;
 }
 
+static int iommufd_vfio_dirty_pages_start(struct iommufd_ctx *ictx,
+				struct vfio_iommu_type1_dirty_bitmap *dirty)
+{
+	struct iommufd_ioas *ioas;
+	int ret = -EINVAL;
+
+	ioas = get_compat_ioas(ictx);
+	if (IS_ERR(ioas))
+		return PTR_ERR(ioas);
+
+	ret = iopt_set_dirty_tracking(&ioas->iopt, NULL, true);
+
+	iommufd_put_object(&ioas->obj);
+
+	return ret;
+}
+
+static int iommufd_vfio_dirty_pages_stop(struct iommufd_ctx *ictx,
+				struct vfio_iommu_type1_dirty_bitmap *dirty)
+{
+	struct iommufd_ioas *ioas;
+	int ret;
+
+	ioas = get_compat_ioas(ictx);
+	if (IS_ERR(ioas))
+		return PTR_ERR(ioas);
+
+	ret = iopt_set_dirty_tracking(&ioas->iopt, NULL, false);
+
+	iommufd_put_object(&ioas->obj);
+
+	return ret;
+}
+
+static int iommufd_vfio_dirty_pages_get_bitmap(struct iommufd_ctx *ictx,
+				struct vfio_iommu_type1_dirty_bitmap_get *range)
+{
+	struct iommufd_dirty_data bitmap;
+	uint64_t npages, bitmap_size;
+	struct iommufd_ioas *ioas;
+	unsigned long pgshift;
+	size_t iommu_pgsize;
+	int ret = -EINVAL;
+
+	ioas = get_compat_ioas(ictx);
+	if (IS_ERR(ioas))
+		return PTR_ERR(ioas);
+
+	down_read(&ioas->iopt.iova_rwsem);
+	pgshift = __ffs(range->bitmap.pgsize);
+	npages = range->size >> pgshift;
+	bitmap_size = range->bitmap.size;
+
+	if (!npages || !bitmap_size || (bitmap_size > DIRTY_BITMAP_SIZE_MAX) ||
+	    (bitmap_size < dirty_bitmap_bytes(npages)))
+		goto out_put;
+
+	iommu_pgsize = 1 << __ffs(iommufd_get_pagesizes(ioas));
+
+	/* allow only smallest supported pgsize */
+	if (range->bitmap.pgsize != iommu_pgsize)
+		goto out_put;
+
+	if (range->iova & (iommu_pgsize - 1))
+		goto out_put;
+
+	if (!range->size || range->size & (iommu_pgsize - 1))
+		goto out_put;
+
+	bitmap.iova = range->iova;
+	bitmap.length = range->size;
+	bitmap.data = range->bitmap.data;
+	bitmap.page_size = 1 << pgshift;
+
+	ret = iopt_read_and_clear_dirty_data(&ioas->iopt, NULL, &bitmap);
+
+out_put:
+	up_read(&ioas->iopt.iova_rwsem);
+	iommufd_put_object(&ioas->obj);
+	return ret;
+}
+
+static int iommufd_vfio_dirty_pages(struct iommufd_ctx *ictx, unsigned int cmd,
+				    void __user *arg)
+{
+	size_t minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap, flags);
+	struct vfio_iommu_type1_dirty_bitmap dirty;
+	u32 supported_flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
+			VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
+			VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
+	int ret = 0;
+
+	if (copy_from_user(&dirty, (void __user *)arg, minsz))
+		return -EFAULT;
+
+	if (dirty.argsz < minsz || dirty.flags & ~supported_flags)
+		return -EINVAL;
+
+	/* only one flag should be set at a time */
+	if (__ffs(dirty.flags) != __fls(dirty.flags))
+		return -EINVAL;
+
+	if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) {
+		ret = iommufd_vfio_dirty_pages_start(ictx, &dirty);
+	} else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
+		ret = iommufd_vfio_dirty_pages_stop(ictx, &dirty);
+	} else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
+		struct vfio_iommu_type1_dirty_bitmap_get range;
+		size_t data_size = dirty.argsz - minsz;
+
+		if (!data_size || data_size < sizeof(range))
+			return -EINVAL;
+
+		if (copy_from_user(&range, (void __user *)(arg + minsz),
+				   sizeof(range)))
+			return -EFAULT;
+
+		if (range.iova + range.size < range.iova)
+			return -EINVAL;
+
+		if (!access_ok((void __user *)range.bitmap.data,
+			       range.bitmap.size))
+			return -EINVAL;
+
+		ret = iommufd_vfio_dirty_pages_get_bitmap(ictx, &range);
+	}
+
+	return ret;
+}
+
+
 /* FIXME TODO:
 PowerPC SPAPR only:
 #define VFIO_IOMMU_ENABLE	_IO(VFIO_TYPE, VFIO_BASE + 15)
@@ -394,6 +590,7 @@ int iommufd_vfio_ioctl(struct iommufd_ctx *ictx, unsigned int cmd,
 	case VFIO_IOMMU_UNMAP_DMA:
 		return iommufd_vfio_unmap_dma(ictx, cmd, uarg);
 	case VFIO_IOMMU_DIRTY_PAGES:
+		return iommufd_vfio_dirty_pages(ictx, cmd, uarg);
 	default:
 		return -ENOIOCTLCMD;
 	}
-- 
2.17.2


WARNING: multiple messages have this Message-ID (diff)
From: Joao Martins <joao.m.martins@oracle.com>
To: iommu@lists.linux-foundation.org
Cc: Jean-Philippe Brucker <jean-philippe@linaro.org>,
	Kevin Tian <kevin.tian@intel.com>,
	Yishai Hadas <yishaih@nvidia.com>,
	Jason Gunthorpe <jgg@nvidia.com>,
	kvm@vger.kernel.org, Will Deacon <will@kernel.org>,
	Cornelia Huck <cohuck@redhat.com>,
	Alex Williamson <alex.williamson@redhat.com>,
	Joao Martins <joao.m.martins@oracle.com>,
	David Woodhouse <dwmw2@infradead.org>,
	Robin Murphy <robin.murphy@arm.com>
Subject: [PATCH RFC 07/19] iommufd/vfio-compat: Dirty tracking IOCTLs compatibility
Date: Thu, 28 Apr 2022 22:09:21 +0100	[thread overview]
Message-ID: <20220428210933.3583-8-joao.m.martins@oracle.com> (raw)
In-Reply-To: <20220428210933.3583-1-joao.m.martins@oracle.com>

Add the correspondent APIs for performing VFIO dirty tracking,
particularly VFIO_IOMMU_DIRTY_PAGES ioctl subcmds:
* VFIO_IOMMU_DIRTY_PAGES_FLAG_START: Start dirty tracking and allocates
				     the area @dirty_bitmap
* VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP: Stop dirty tracking and frees
				    the area @dirty_bitmap
* VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP: Fetch dirty bitmap while dirty
tracking is active.

Advertise the VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION
whereas it gets set the domain configured page size the same as
iopt::iova_alignment and maximum dirty bitmap size same
as VFIO. Compared to VFIO type1 iommu, the perpectual dirtying is
not implemented and userspace gets -EOPNOTSUPP which is handled by
today's userspace.

Move iommufd_get_pagesizes() definition prior to unmap for
iommufd_vfio_unmap_dma() dirty support to validate the user bitmap page
size against IOPT pagesize.

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
---
 drivers/iommu/iommufd/vfio_compat.c | 221 ++++++++++++++++++++++++++--
 1 file changed, 209 insertions(+), 12 deletions(-)

diff --git a/drivers/iommu/iommufd/vfio_compat.c b/drivers/iommu/iommufd/vfio_compat.c
index dbe39404a105..2802f49cc10d 100644
--- a/drivers/iommu/iommufd/vfio_compat.c
+++ b/drivers/iommu/iommufd/vfio_compat.c
@@ -56,6 +56,16 @@ create_compat_ioas(struct iommufd_ctx *ictx)
 	return ioas;
 }
 
+static u64 iommufd_get_pagesizes(struct iommufd_ioas *ioas)
+{
+	/* FIXME: See vfio_update_pgsize_bitmap(), for compat this should return
+	 * the high bits too, and we need to decide if we should report that
+	 * iommufd supports less than PAGE_SIZE alignment or stick to strict
+	 * compatibility. qemu only cares about the first set bit.
+	 */
+	return ioas->iopt.iova_alignment;
+}
+
 int iommufd_vfio_ioas(struct iommufd_ucmd *ucmd)
 {
 	struct iommu_vfio_ioas *cmd = ucmd->cmd;
@@ -130,9 +140,14 @@ static int iommufd_vfio_unmap_dma(struct iommufd_ctx *ictx, unsigned int cmd,
 				  void __user *arg)
 {
 	size_t minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
-	u32 supported_flags = VFIO_DMA_UNMAP_FLAG_ALL;
+	u32 supported_flags = VFIO_DMA_UNMAP_FLAG_ALL |
+		VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP;
+	struct iommufd_dirty_data dirty, *dirtyp = NULL;
 	struct vfio_iommu_type1_dma_unmap unmap;
+	struct vfio_bitmap bitmap;
 	struct iommufd_ioas *ioas;
+	unsigned long pgshift;
+	size_t pgsize;
 	int rc;
 
 	if (copy_from_user(&unmap, arg, minsz))
@@ -141,14 +156,53 @@ static int iommufd_vfio_unmap_dma(struct iommufd_ctx *ictx, unsigned int cmd,
 	if (unmap.argsz < minsz || unmap.flags & ~supported_flags)
 		return -EINVAL;
 
+	if (unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) {
+		unsigned long npages;
+
+		if (copy_from_user(&bitmap,
+				   (void __user *)(arg + minsz),
+				   sizeof(bitmap)))
+			return -EFAULT;
+
+		if (!access_ok((void __user *)bitmap.data, bitmap.size))
+			return -EINVAL;
+
+		pgshift = __ffs(bitmap.pgsize);
+		npages = unmap.size >> pgshift;
+
+		if (!npages || !bitmap.size ||
+		    (bitmap.size > DIRTY_BITMAP_SIZE_MAX) ||
+		    (bitmap.size < dirty_bitmap_bytes(npages)))
+			return -EINVAL;
+
+		dirty.iova = unmap.iova;
+		dirty.length = unmap.size;
+		dirty.data = bitmap.data;
+		dirty.page_size = 1 << pgshift;
+		dirtyp = &dirty;
+	}
+
 	ioas = get_compat_ioas(ictx);
 	if (IS_ERR(ioas))
 		return PTR_ERR(ioas);
 
+	pgshift = __ffs(iommufd_get_pagesizes(ioas)),
+	pgsize = (size_t)1 << pgshift;
+
+	/* When dirty tracking is enabled, allow only min supported pgsize */
+	if ((unmap.flags & VFIO_DMA_UNMAP_FLAG_GET_DIRTY_BITMAP) &&
+	    (bitmap.pgsize != pgsize)) {
+		rc = -EINVAL;
+		goto out_put;
+	}
+
 	if (unmap.flags & VFIO_DMA_UNMAP_FLAG_ALL)
 		rc = iopt_unmap_all(&ioas->iopt);
 	else
-		rc = iopt_unmap_iova(&ioas->iopt, unmap.iova, unmap.size, NULL);
+		rc = iopt_unmap_iova(&ioas->iopt, unmap.iova, unmap.size,
+				     dirtyp);
+
+out_put:
 	iommufd_put_object(&ioas->obj);
 	return rc;
 }
@@ -222,16 +276,6 @@ static int iommufd_vfio_set_iommu(struct iommufd_ctx *ictx, unsigned long type)
 	return 0;
 }
 
-static u64 iommufd_get_pagesizes(struct iommufd_ioas *ioas)
-{
-	/* FIXME: See vfio_update_pgsize_bitmap(), for compat this should return
-	 * the high bits too, and we need to decide if we should report that
-	 * iommufd supports less than PAGE_SIZE alignment or stick to strict
-	 * compatibility. qemu only cares about the first set bit.
-	 */
-	return ioas->iopt.iova_alignment;
-}
-
 static int iommufd_fill_cap_iova(struct iommufd_ioas *ioas,
 				 struct vfio_info_cap_header __user *cur,
 				 size_t avail)
@@ -289,6 +333,26 @@ static int iommufd_fill_cap_dma_avail(struct iommufd_ioas *ioas,
 	return sizeof(cap_dma);
 }
 
+static int iommufd_fill_cap_migration(struct iommufd_ioas *ioas,
+				      struct vfio_info_cap_header __user *cur,
+				      size_t avail)
+{
+	struct vfio_iommu_type1_info_cap_migration cap_mig = {
+		.header = {
+			.id = VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION,
+			.version = 1,
+		},
+		.flags = 0,
+		.pgsize_bitmap = (size_t) 1 << __ffs(iommufd_get_pagesizes(ioas)),
+		.max_dirty_bitmap_size = DIRTY_BITMAP_SIZE_MAX,
+	};
+
+	if (avail >= sizeof(cap_mig) &&
+	    copy_to_user(cur, &cap_mig, sizeof(cap_mig)))
+		return -EFAULT;
+	return sizeof(cap_mig);
+}
+
 static int iommufd_vfio_iommu_get_info(struct iommufd_ctx *ictx,
 				       void __user *arg)
 {
@@ -298,6 +362,7 @@ static int iommufd_vfio_iommu_get_info(struct iommufd_ctx *ictx,
 	static const fill_cap_fn fill_fns[] = {
 		iommufd_fill_cap_iova,
 		iommufd_fill_cap_dma_avail,
+		iommufd_fill_cap_migration,
 	};
 	size_t minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
 	struct vfio_info_cap_header __user *last_cap = NULL;
@@ -364,6 +429,137 @@ static int iommufd_vfio_iommu_get_info(struct iommufd_ctx *ictx,
 	return rc;
 }
 
+static int iommufd_vfio_dirty_pages_start(struct iommufd_ctx *ictx,
+				struct vfio_iommu_type1_dirty_bitmap *dirty)
+{
+	struct iommufd_ioas *ioas;
+	int ret = -EINVAL;
+
+	ioas = get_compat_ioas(ictx);
+	if (IS_ERR(ioas))
+		return PTR_ERR(ioas);
+
+	ret = iopt_set_dirty_tracking(&ioas->iopt, NULL, true);
+
+	iommufd_put_object(&ioas->obj);
+
+	return ret;
+}
+
+static int iommufd_vfio_dirty_pages_stop(struct iommufd_ctx *ictx,
+				struct vfio_iommu_type1_dirty_bitmap *dirty)
+{
+	struct iommufd_ioas *ioas;
+	int ret;
+
+	ioas = get_compat_ioas(ictx);
+	if (IS_ERR(ioas))
+		return PTR_ERR(ioas);
+
+	ret = iopt_set_dirty_tracking(&ioas->iopt, NULL, false);
+
+	iommufd_put_object(&ioas->obj);
+
+	return ret;
+}
+
+static int iommufd_vfio_dirty_pages_get_bitmap(struct iommufd_ctx *ictx,
+				struct vfio_iommu_type1_dirty_bitmap_get *range)
+{
+	struct iommufd_dirty_data bitmap;
+	uint64_t npages, bitmap_size;
+	struct iommufd_ioas *ioas;
+	unsigned long pgshift;
+	size_t iommu_pgsize;
+	int ret = -EINVAL;
+
+	ioas = get_compat_ioas(ictx);
+	if (IS_ERR(ioas))
+		return PTR_ERR(ioas);
+
+	down_read(&ioas->iopt.iova_rwsem);
+	pgshift = __ffs(range->bitmap.pgsize);
+	npages = range->size >> pgshift;
+	bitmap_size = range->bitmap.size;
+
+	if (!npages || !bitmap_size || (bitmap_size > DIRTY_BITMAP_SIZE_MAX) ||
+	    (bitmap_size < dirty_bitmap_bytes(npages)))
+		goto out_put;
+
+	iommu_pgsize = 1 << __ffs(iommufd_get_pagesizes(ioas));
+
+	/* allow only smallest supported pgsize */
+	if (range->bitmap.pgsize != iommu_pgsize)
+		goto out_put;
+
+	if (range->iova & (iommu_pgsize - 1))
+		goto out_put;
+
+	if (!range->size || range->size & (iommu_pgsize - 1))
+		goto out_put;
+
+	bitmap.iova = range->iova;
+	bitmap.length = range->size;
+	bitmap.data = range->bitmap.data;
+	bitmap.page_size = 1 << pgshift;
+
+	ret = iopt_read_and_clear_dirty_data(&ioas->iopt, NULL, &bitmap);
+
+out_put:
+	up_read(&ioas->iopt.iova_rwsem);
+	iommufd_put_object(&ioas->obj);
+	return ret;
+}
+
+static int iommufd_vfio_dirty_pages(struct iommufd_ctx *ictx, unsigned int cmd,
+				    void __user *arg)
+{
+	size_t minsz = offsetofend(struct vfio_iommu_type1_dirty_bitmap, flags);
+	struct vfio_iommu_type1_dirty_bitmap dirty;
+	u32 supported_flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START |
+			VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP |
+			VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP;
+	int ret = 0;
+
+	if (copy_from_user(&dirty, (void __user *)arg, minsz))
+		return -EFAULT;
+
+	if (dirty.argsz < minsz || dirty.flags & ~supported_flags)
+		return -EINVAL;
+
+	/* only one flag should be set at a time */
+	if (__ffs(dirty.flags) != __fls(dirty.flags))
+		return -EINVAL;
+
+	if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_START) {
+		ret = iommufd_vfio_dirty_pages_start(ictx, &dirty);
+	} else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_STOP) {
+		ret = iommufd_vfio_dirty_pages_stop(ictx, &dirty);
+	} else if (dirty.flags & VFIO_IOMMU_DIRTY_PAGES_FLAG_GET_BITMAP) {
+		struct vfio_iommu_type1_dirty_bitmap_get range;
+		size_t data_size = dirty.argsz - minsz;
+
+		if (!data_size || data_size < sizeof(range))
+			return -EINVAL;
+
+		if (copy_from_user(&range, (void __user *)(arg + minsz),
+				   sizeof(range)))
+			return -EFAULT;
+
+		if (range.iova + range.size < range.iova)
+			return -EINVAL;
+
+		if (!access_ok((void __user *)range.bitmap.data,
+			       range.bitmap.size))
+			return -EINVAL;
+
+		ret = iommufd_vfio_dirty_pages_get_bitmap(ictx, &range);
+	}
+
+	return ret;
+}
+
+
 /* FIXME TODO:
 PowerPC SPAPR only:
 #define VFIO_IOMMU_ENABLE	_IO(VFIO_TYPE, VFIO_BASE + 15)
@@ -394,6 +590,7 @@ int iommufd_vfio_ioctl(struct iommufd_ctx *ictx, unsigned int cmd,
 	case VFIO_IOMMU_UNMAP_DMA:
 		return iommufd_vfio_unmap_dma(ictx, cmd, uarg);
 	case VFIO_IOMMU_DIRTY_PAGES:
+		return iommufd_vfio_dirty_pages(ictx, cmd, uarg);
 	default:
 		return -ENOIOCTLCMD;
 	}
-- 
2.17.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

  parent reply	other threads:[~2022-04-28 21:11 UTC|newest]

Thread overview: 209+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-28 21:09 [PATCH RFC 00/19] IOMMUFD Dirty Tracking Joao Martins
2022-04-28 21:09 ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 01/19] iommu: Add iommu_domain ops for dirty tracking Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  7:54   ` Tian, Kevin
2022-04-29  7:54     ` Tian, Kevin
2022-04-29 10:44     ` Joao Martins
2022-04-29 10:44       ` Joao Martins
2022-04-29 12:08   ` Jason Gunthorpe
2022-04-29 12:08     ` Jason Gunthorpe via iommu
2022-04-29 14:26     ` Joao Martins
2022-04-29 14:26       ` Joao Martins
2022-04-29 14:35       ` Jason Gunthorpe
2022-04-29 14:35         ` Jason Gunthorpe via iommu
2022-04-29 13:40   ` Baolu Lu
2022-04-29 13:40     ` Baolu Lu
2022-04-29 15:27     ` Joao Martins
2022-04-29 15:27       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 02/19] iommufd: Dirty tracking for io_pagetable Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  8:07   ` Tian, Kevin
2022-04-29  8:07     ` Tian, Kevin
2022-04-29 10:48     ` Joao Martins
2022-04-29 10:48       ` Joao Martins
2022-04-29 11:56     ` Jason Gunthorpe
2022-04-29 11:56       ` Jason Gunthorpe via iommu
2022-04-29 14:28       ` Joao Martins
2022-04-29 14:28         ` Joao Martins
2022-04-29 23:51   ` Baolu Lu
2022-04-29 23:51     ` Baolu Lu
2022-05-02 11:57     ` Joao Martins
2022-05-02 11:57       ` Joao Martins
2022-08-29 10:01   ` Shameerali Kolothum Thodi
2022-04-28 21:09 ` [PATCH RFC 03/19] iommufd: Dirty tracking data support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  8:12   ` Tian, Kevin
2022-04-29  8:12     ` Tian, Kevin
2022-04-29 10:54     ` Joao Martins
2022-04-29 10:54       ` Joao Martins
2022-04-29 12:09       ` Jason Gunthorpe
2022-04-29 12:09         ` Jason Gunthorpe via iommu
2022-04-29 14:33         ` Joao Martins
2022-04-29 14:33           ` Joao Martins
2022-04-30  4:11   ` Baolu Lu
2022-04-30  4:11     ` Baolu Lu
2022-05-02 12:06     ` Joao Martins
2022-05-02 12:06       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 04/19] iommu: Add an unmap API that returns dirtied IOPTEs Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-30  5:12   ` Baolu Lu
2022-04-30  5:12     ` Baolu Lu
2022-05-02 12:22     ` Joao Martins
2022-05-02 12:22       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 05/19] iommufd: Add a dirty bitmap to iopt_unmap_iova() Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29 12:14   ` Jason Gunthorpe
2022-04-29 12:14     ` Jason Gunthorpe via iommu
2022-04-29 14:36     ` Joao Martins
2022-04-29 14:36       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 06/19] iommufd: Dirty tracking IOCTLs for the hw_pagetable Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-28 21:09 ` Joao Martins [this message]
2022-04-28 21:09   ` [PATCH RFC 07/19] iommufd/vfio-compat: Dirty tracking IOCTLs compatibility Joao Martins
2022-04-29 12:19   ` Jason Gunthorpe
2022-04-29 12:19     ` Jason Gunthorpe via iommu
2022-04-29 14:27     ` Joao Martins
2022-04-29 14:27       ` Joao Martins
2022-04-29 14:36       ` Jason Gunthorpe via iommu
2022-04-29 14:36         ` Jason Gunthorpe
2022-04-29 14:52         ` Joao Martins
2022-04-29 14:52           ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 08/19] iommufd: Add a test for dirty tracking ioctls Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 09/19] iommu/amd: Access/Dirty bit support in IOPTEs Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-05-31 11:34   ` Suravee Suthikulpanit via iommu
2022-05-31 11:34     ` Suravee Suthikulpanit
2022-05-31 12:15     ` Baolu Lu
2022-05-31 12:15       ` Baolu Lu
2022-05-31 15:22     ` Joao Martins
2022-05-31 15:22       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 10/19] iommu/amd: Add unmap_read_dirty() support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-05-31 12:39   ` Suravee Suthikulpanit
2022-05-31 12:39     ` Suravee Suthikulpanit via iommu
2022-05-31 15:51     ` Joao Martins
2022-05-31 15:51       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 11/19] iommu/amd: Print access/dirty bits if supported Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 12/19] iommu/arm-smmu-v3: Add feature detection for HTTU Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 13/19] iommu/arm-smmu-v3: Add feature detection for BBML Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29 11:11   ` Robin Murphy
2022-04-29 11:11     ` Robin Murphy
2022-04-29 11:54     ` Joao Martins
2022-04-29 11:54       ` Joao Martins
2022-04-29 12:26       ` Robin Murphy
2022-04-29 12:26         ` Robin Murphy
2022-04-29 14:34         ` Joao Martins
2022-04-29 14:34           ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 14/19] iommu/arm-smmu-v3: Add read_and_clear_dirty() support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-08-29  9:59   ` Shameerali Kolothum Thodi
2022-04-28 21:09 ` [PATCH RFC 15/19] iommu/arm-smmu-v3: Add set_dirty_tracking_range() support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  8:28   ` Tian, Kevin
2022-04-29  8:28     ` Tian, Kevin
2022-04-29 11:05     ` Joao Martins
2022-04-29 11:05       ` Joao Martins
2022-04-29 11:19       ` Robin Murphy
2022-04-29 11:19         ` Robin Murphy
2022-04-29 12:06         ` Joao Martins
2022-04-29 12:06           ` Joao Martins
2022-04-29 12:23           ` Jason Gunthorpe
2022-04-29 12:23             ` Jason Gunthorpe via iommu
2022-04-29 14:45             ` Joao Martins
2022-04-29 14:45               ` Joao Martins
2022-04-29 16:11               ` Jason Gunthorpe
2022-04-29 16:11                 ` Jason Gunthorpe via iommu
2022-04-29 16:40                 ` Joao Martins
2022-04-29 16:40                   ` Joao Martins
2022-04-29 16:46                   ` Jason Gunthorpe
2022-04-29 16:46                     ` Jason Gunthorpe via iommu
2022-04-29 19:20                   ` Robin Murphy
2022-04-29 19:20                     ` Robin Murphy
2022-05-02 11:52                     ` Joao Martins
2022-05-02 11:52                       ` Joao Martins
2022-05-02 11:57                       ` Joao Martins
2022-05-02 11:57                         ` Joao Martins
2022-05-05  7:25       ` Shameerali Kolothum Thodi
2022-05-05  7:25         ` Shameerali Kolothum Thodi via iommu
2022-05-05  9:52         ` Joao Martins
2022-05-05  9:52           ` Joao Martins
2022-08-29  9:59           ` Shameerali Kolothum Thodi
2022-08-29 10:00   ` Shameerali Kolothum Thodi
2022-04-28 21:09 ` [PATCH RFC 16/19] iommu/arm-smmu-v3: Enable HTTU for stage1 with io-pgtable mapping Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29 11:35   ` Robin Murphy
2022-04-29 11:35     ` Robin Murphy
2022-04-29 12:10     ` Joao Martins
2022-04-29 12:10       ` Joao Martins
2022-04-29 12:46       ` Robin Murphy
2022-04-29 12:46         ` Robin Murphy
2022-08-29 10:00   ` Shameerali Kolothum Thodi
2022-04-28 21:09 ` [PATCH RFC 17/19] iommu/arm-smmu-v3: Add unmap_read_dirty() support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29 11:53   ` Robin Murphy
2022-04-29 11:53     ` Robin Murphy
2022-04-28 21:09 ` [PATCH RFC 18/19] iommu/intel: Access/Dirty bit support for SL domains Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  9:03   ` Tian, Kevin
2022-04-29  9:03     ` Tian, Kevin
2022-04-29 11:20     ` Joao Martins
2022-04-29 11:20       ` Joao Martins
2022-04-30  6:12   ` Baolu Lu
2022-04-30  6:12     ` Baolu Lu
2022-05-02 12:24     ` Joao Martins
2022-05-02 12:24       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 19/19] iommu/intel: Add unmap_read_dirty() support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  5:45 ` [PATCH RFC 00/19] IOMMUFD Dirty Tracking Tian, Kevin
2022-04-29  5:45   ` Tian, Kevin
2022-04-29 10:27   ` Joao Martins
2022-04-29 10:27     ` Joao Martins
2022-04-29 12:38     ` Jason Gunthorpe
2022-04-29 12:38       ` Jason Gunthorpe via iommu
2022-04-29 15:20       ` Joao Martins
2022-04-29 15:20         ` Joao Martins
2022-05-05  7:40       ` Tian, Kevin
2022-05-05  7:40         ` Tian, Kevin
2022-05-05 14:07         ` Jason Gunthorpe
2022-05-05 14:07           ` Jason Gunthorpe via iommu
2022-05-06  3:51           ` Tian, Kevin
2022-05-06  3:51             ` Tian, Kevin
2022-05-06 11:46             ` Jason Gunthorpe
2022-05-06 11:46               ` Jason Gunthorpe via iommu
2022-05-10  1:38               ` Tian, Kevin
2022-05-10  1:38                 ` Tian, Kevin
2022-05-10 11:50                 ` Joao Martins
2022-05-10 11:50                   ` Joao Martins
2022-05-11  1:17                   ` Tian, Kevin
2022-05-11  1:17                     ` Tian, Kevin
2022-05-10 13:46                 ` Jason Gunthorpe via iommu
2022-05-10 13:46                   ` Jason Gunthorpe
2022-05-11  1:10                   ` Tian, Kevin
2022-05-11  1:10                     ` Tian, Kevin
2022-07-12 18:34                     ` Joao Martins
2022-07-21 14:24                       ` Jason Gunthorpe
2022-05-02 18:11   ` Alex Williamson
2022-05-02 18:11     ` Alex Williamson
2022-05-02 18:52     ` Jason Gunthorpe
2022-05-02 18:52       ` Jason Gunthorpe via iommu
2022-05-03 10:48       ` Joao Martins
2022-05-03 10:48         ` Joao Martins
2022-05-05  7:42       ` Tian, Kevin
2022-05-05  7:42         ` Tian, Kevin
2022-05-05 10:06         ` Joao Martins
2022-05-05 10:06           ` Joao Martins
2022-05-05 11:03           ` Tian, Kevin
2022-05-05 11:03             ` Tian, Kevin
2022-05-05 11:50             ` Joao Martins
2022-05-05 11:50               ` Joao Martins
2022-05-06  3:14               ` Tian, Kevin
2022-05-06  3:14                 ` Tian, Kevin
2022-05-05 13:55             ` Jason Gunthorpe
2022-05-05 13:55               ` Jason Gunthorpe via iommu
2022-05-06  3:17               ` Tian, Kevin
2022-05-06  3:17                 ` Tian, Kevin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220428210933.3583-8-joao.m.martins@oracle.com \
    --to=joao.m.martins@oracle.com \
    --cc=alex.williamson@redhat.com \
    --cc=baolu.lu@linux.intel.com \
    --cc=cohuck@redhat.com \
    --cc=dwmw2@infradead.org \
    --cc=eric.auger@redhat.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jean-philippe@linaro.org \
    --cc=jgg@nvidia.com \
    --cc=joro@8bytes.org \
    --cc=kevin.tian@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=nicolinc@nvidia.com \
    --cc=robin.murphy@arm.com \
    --cc=shameerali.kolothum.thodi@huawei.com \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=will@kernel.org \
    --cc=yi.l.liu@intel.com \
    --cc=yishaih@nvidia.com \
    --cc=zhukeqian1@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.