All of lore.kernel.org
 help / color / mirror / Atom feed
From: Joao Martins <joao.m.martins@oracle.com>
To: iommu@lists.linux-foundation.org
Cc: Joao Martins <joao.m.martins@oracle.com>,
	Joerg Roedel <joro@8bytes.org>,
	Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>,
	Will Deacon <will@kernel.org>,
	Robin Murphy <robin.murphy@arm.com>,
	Jean-Philippe Brucker <jean-philippe@linaro.org>,
	Keqian Zhu <zhukeqian1@huawei.com>,
	Shameerali Kolothum Thodi  <shameerali.kolothum.thodi@huawei.com>,
	David Woodhouse <dwmw2@infradead.org>,
	Lu Baolu <baolu.lu@linux.intel.com>,
	Jason Gunthorpe <jgg@nvidia.com>,
	Nicolin Chen <nicolinc@nvidia.com>,
	Yishai Hadas <yishaih@nvidia.com>,
	Kevin Tian <kevin.tian@intel.com>,
	Eric Auger <eric.auger@redhat.com>, Yi Liu <yi.l.liu@intel.com>,
	Alex Williamson <alex.williamson@redhat.com>,
	Cornelia Huck <cohuck@redhat.com>,
	kvm@vger.kernel.org
Subject: [PATCH RFC 08/19] iommufd: Add a test for dirty tracking ioctls
Date: Thu, 28 Apr 2022 22:09:22 +0100	[thread overview]
Message-ID: <20220428210933.3583-9-joao.m.martins@oracle.com> (raw)
In-Reply-To: <20220428210933.3583-1-joao.m.martins@oracle.com>

Add a new test ioctl for simulating the dirty IOVAs
in the mock domain, and implement the mock iommu domain ops
that get the dirty tracking supported.

The selftest exercises the usual main workflow of:

1) Setting/Clearing dirty tracking from the iommu domain
2) Read and clear dirty IOPTEs
3) Unmap and read dirty back

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
---
 drivers/iommu/iommufd/iommufd_test.h    |   9 ++
 drivers/iommu/iommufd/selftest.c        | 137 +++++++++++++++++++++++-
 tools/testing/selftests/iommu/Makefile  |   1 +
 tools/testing/selftests/iommu/iommufd.c | 135 +++++++++++++++++++++++
 4 files changed, 279 insertions(+), 3 deletions(-)

diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h
index d22ef484af1a..90dafa513078 100644
--- a/drivers/iommu/iommufd/iommufd_test.h
+++ b/drivers/iommu/iommufd/iommufd_test.h
@@ -14,6 +14,7 @@ enum {
 	IOMMU_TEST_OP_MD_CHECK_REFS,
 	IOMMU_TEST_OP_ACCESS_PAGES,
 	IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT,
+	IOMMU_TEST_OP_DIRTY,
 };
 
 enum {
@@ -57,6 +58,14 @@ struct iommu_test_cmd {
 		struct {
 			__u32 limit;
 		} memory_limit;
+		struct {
+			__u32 flags;
+			__aligned_u64 iova;
+			__aligned_u64 length;
+			__aligned_u64 page_size;
+			__aligned_u64 uptr;
+			__aligned_u64 out_nr_dirty;
+		} dirty;
 	};
 	__u32 last;
 };
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index a665719b493e..b02309722436 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -13,6 +13,7 @@
 size_t iommufd_test_memory_limit = 65536;
 
 enum {
+	MOCK_DIRTY_TRACK = 1,
 	MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
 
 	/*
@@ -25,9 +26,11 @@ enum {
 	_MOCK_PFN_START = MOCK_PFN_MASK + 1,
 	MOCK_PFN_START_IOVA = _MOCK_PFN_START,
 	MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
+	MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
 };
 
 struct mock_iommu_domain {
+	unsigned long flags;
 	struct iommu_domain domain;
 	struct xarray pfns;
 };
@@ -133,7 +136,7 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
 
 		for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
 			ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
-			WARN_ON(!ent);
+
 			/*
 			 * iommufd generates unmaps that must be a strict
 			 * superset of the map's performend So every starting
@@ -143,12 +146,12 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
 			 * passed to map_pages
 			 */
 			if (first) {
-				WARN_ON(!(xa_to_value(ent) &
+				WARN_ON(ent && !(xa_to_value(ent) &
 					  MOCK_PFN_START_IOVA));
 				first = false;
 			}
 			if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
-				WARN_ON(!(xa_to_value(ent) &
+				WARN_ON(ent && !(xa_to_value(ent) &
 					  MOCK_PFN_LAST_IOVA));
 
 			iova += MOCK_IO_PAGE_SIZE;
@@ -171,6 +174,75 @@ static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain,
 	return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE;
 }
 
+static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
+					  bool enable)
+{
+	struct mock_iommu_domain *mock =
+		container_of(domain, struct mock_iommu_domain, domain);
+	unsigned long flags = mock->flags;
+
+	/* No change? */
+	if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK)))
+		return -EINVAL;
+
+	flags = (enable ?
+		 flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK);
+
+	mock->flags = flags;
+	return 0;
+}
+
+static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
+					    unsigned long iova, size_t size,
+					    struct iommu_dirty_bitmap *dirty)
+{
+	struct mock_iommu_domain *mock =
+		container_of(domain, struct mock_iommu_domain, domain);
+	unsigned long i, max = size / MOCK_IO_PAGE_SIZE;
+	void *ent, *old;
+
+	if (!(mock->flags & MOCK_DIRTY_TRACK))
+		return -EINVAL;
+
+	for (i = 0; i < max; i++) {
+		unsigned long cur = iova + i * MOCK_IO_PAGE_SIZE;
+
+		ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
+		if (ent &&
+		    (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) {
+			unsigned long val;
+
+			/* Clear dirty */
+			val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
+			old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
+				       xa_mk_value(val), GFP_KERNEL);
+			WARN_ON_ONCE(ent != old);
+			iommu_dirty_bitmap_record(dirty, cur, MOCK_IO_PAGE_SIZE);
+		}
+	}
+
+	return 0;
+}
+
+static size_t mock_domain_unmap_read_dirty(struct iommu_domain *domain,
+					   unsigned long iova, size_t page_size,
+					   struct iommu_iotlb_gather *gather,
+					   struct iommu_dirty_bitmap *dirty)
+{
+	struct mock_iommu_domain *mock =
+		container_of(domain, struct mock_iommu_domain, domain);
+	void *ent;
+
+	WARN_ON(page_size != MOCK_IO_PAGE_SIZE);
+
+	ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
+	if (ent && (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA) &&
+	    (mock->flags & MOCK_DIRTY_TRACK))
+		iommu_dirty_bitmap_record(dirty, iova, page_size);
+
+	return ent ? page_size : 0;
+}
+
 static const struct iommu_ops mock_ops = {
 	.owner = THIS_MODULE,
 	.pgsize_bitmap = MOCK_IO_PAGE_SIZE,
@@ -181,6 +253,9 @@ static const struct iommu_ops mock_ops = {
 			.map_pages = mock_domain_map_pages,
 			.unmap_pages = mock_domain_unmap_pages,
 			.iova_to_phys = mock_domain_iova_to_phys,
+			.set_dirty_tracking = mock_domain_set_dirty_tracking,
+			.read_and_clear_dirty = mock_domain_read_and_clear_dirty,
+			.unmap_read_dirty = mock_domain_unmap_read_dirty,
 		},
 };
 
@@ -442,6 +517,56 @@ static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
 	return rc;
 }
 
+static int iommufd_test_dirty(struct iommufd_ucmd *ucmd,
+			      unsigned int mockpt_id, unsigned long iova,
+			      size_t length, unsigned long page_size,
+			      void __user *uptr, u32 flags)
+{
+	unsigned long i, max = length / page_size;
+	struct iommu_test_cmd *cmd = ucmd->cmd;
+	struct iommufd_hw_pagetable *hwpt;
+	struct mock_iommu_domain *mock;
+	int rc, count = 0;
+
+	if (iova % page_size || length % page_size ||
+	    (uintptr_t)uptr % page_size)
+		return -EINVAL;
+
+	hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
+	if (IS_ERR(hwpt))
+		return PTR_ERR(hwpt);
+
+	if (!(mock->flags & MOCK_DIRTY_TRACK)) {
+		rc = -EINVAL;
+		goto out_put;
+	}
+
+	for (i = 0; i < max; i++) {
+		unsigned long cur = iova + i * page_size;
+		void *ent, *old;
+
+		if (!test_bit(i, (unsigned long *) uptr))
+			continue;
+
+		ent = xa_load(&mock->pfns, cur / page_size);
+		if (ent) {
+			unsigned long val;
+
+			val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA;
+			old = xa_store(&mock->pfns, cur / page_size,
+				       xa_mk_value(val), GFP_KERNEL);
+			WARN_ON_ONCE(ent != old);
+			count++;
+		}
+	}
+
+	cmd->dirty.out_nr_dirty = count;
+	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+out_put:
+	iommufd_put_object(&hwpt->obj);
+	return rc;
+}
+
 void iommufd_selftest_destroy(struct iommufd_object *obj)
 {
 	struct selftest_obj *sobj = container_of(obj, struct selftest_obj, obj);
@@ -486,6 +611,12 @@ int iommufd_test(struct iommufd_ucmd *ucmd)
 			cmd->access_pages.length,
 			u64_to_user_ptr(cmd->access_pages.uptr),
 			cmd->access_pages.flags);
+	case IOMMU_TEST_OP_DIRTY:
+		return iommufd_test_dirty(
+			ucmd, cmd->id, cmd->dirty.iova,
+			cmd->dirty.length, cmd->dirty.page_size,
+			u64_to_user_ptr(cmd->dirty.uptr),
+			cmd->dirty.flags);
 	case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT:
 		iommufd_test_memory_limit = cmd->memory_limit.limit;
 		return 0;
diff --git a/tools/testing/selftests/iommu/Makefile b/tools/testing/selftests/iommu/Makefile
index 7bc38b3beaeb..48d4dcf11506 100644
--- a/tools/testing/selftests/iommu/Makefile
+++ b/tools/testing/selftests/iommu/Makefile
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 CFLAGS += -Wall -O2 -Wno-unused-function
+CFLAGS += -I../../../../tools/include/
 CFLAGS += -I../../../../include/uapi/
 CFLAGS += -I../../../../include/
 
diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
index 5c47d706ed94..3a494f7958f4 100644
--- a/tools/testing/selftests/iommu/iommufd.c
+++ b/tools/testing/selftests/iommu/iommufd.c
@@ -13,13 +13,18 @@
 #define __EXPORTED_HEADERS__
 #include <linux/iommufd.h>
 #include <linux/vfio.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
 #include "../../../../drivers/iommu/iommufd/iommufd_test.h"
+#define BITS_PER_BYTE 8
 
 static void *buffer;
+static void *bitmap;
 
 static unsigned long PAGE_SIZE;
 static unsigned long HUGEPAGE_SIZE;
 static unsigned long BUFFER_SIZE;
+static unsigned long BITMAP_SIZE;
 
 #define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
 
@@ -52,6 +57,10 @@ static __attribute__((constructor)) void setup_sizes(void)
 	BUFFER_SIZE = PAGE_SIZE * 16;
 	rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
 	assert(rc || buffer || (uintptr_t)buffer % HUGEPAGE_SIZE == 0);
+
+	BITMAP_SIZE = BUFFER_SIZE / MOCK_PAGE_SIZE / BITS_PER_BYTE;
+	rc = posix_memalign(&bitmap, PAGE_SIZE, BUFFER_SIZE);
+	assert(rc || buffer || (uintptr_t)buffer % PAGE_SIZE == 0);
 }
 
 /*
@@ -546,6 +555,132 @@ TEST_F(iommufd_ioas, iova_ranges)
 	EXPECT_EQ(0, cmd->out_valid_iovas[1].last);
 }
 
+TEST_F(iommufd_ioas, dirty)
+{
+	struct iommu_ioas_map map_cmd = {
+		.size = sizeof(map_cmd),
+		.flags = IOMMU_IOAS_MAP_FIXED_IOVA,
+		.ioas_id = self->ioas_id,
+		.user_va = (uintptr_t)buffer,
+		.length = BUFFER_SIZE,
+		.iova = MOCK_APERTURE_START,
+	};
+	struct iommu_test_cmd mock_cmd = {
+		.size = sizeof(mock_cmd),
+		.op = IOMMU_TEST_OP_MOCK_DOMAIN,
+		.id = self->ioas_id,
+	};
+	struct iommu_hwpt_set_dirty set_dirty_cmd = {
+		.size = sizeof(set_dirty_cmd),
+		.flags = IOMMU_DIRTY_TRACKING_ENABLED,
+		.hwpt_id = self->ioas_id,
+	};
+	struct iommu_test_cmd dirty_cmd = {
+		.size = sizeof(dirty_cmd),
+		.op = IOMMU_TEST_OP_DIRTY,
+		.id = self->ioas_id,
+		.dirty = { .iova = MOCK_APERTURE_START,
+			   .length = BUFFER_SIZE,
+			   .page_size = MOCK_PAGE_SIZE,
+			   .uptr = (uintptr_t)bitmap },
+	};
+	struct iommu_hwpt_get_dirty_iova get_dirty_cmd = {
+		.size = sizeof(get_dirty_cmd),
+		.hwpt_id = self->ioas_id,
+		.bitmap = {
+			.iova = MOCK_APERTURE_START,
+			.length = BUFFER_SIZE,
+			.page_size = MOCK_PAGE_SIZE,
+			.data = (__u64 *)bitmap,
+		}
+	};
+	struct iommu_ioas_unmap_dirty unmap_dirty_cmd = {
+		.size = sizeof(unmap_dirty_cmd),
+		.ioas_id = self->ioas_id,
+		.bitmap = {
+			.iova = MOCK_APERTURE_START,
+			.length = BUFFER_SIZE,
+			.page_size = MOCK_PAGE_SIZE,
+			.data = (__u64 *)bitmap,
+		},
+	};
+	struct iommu_destroy destroy_cmd = { .size = sizeof(destroy_cmd) };
+	unsigned long i, count, nbits = BITMAP_SIZE * BITS_PER_BYTE;
+
+	/* Toggle dirty with a domain and a single map */
+	ASSERT_EQ(0, ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_MOCK_DOMAIN),
+			   &mock_cmd));
+	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_MAP, &map_cmd));
+
+	set_dirty_cmd.hwpt_id = mock_cmd.id;
+	ASSERT_EQ(0,
+		  ioctl(self->fd, IOMMU_HWPT_SET_DIRTY, &set_dirty_cmd));
+	EXPECT_ERRNO(EINVAL,
+		  ioctl(self->fd, IOMMU_HWPT_SET_DIRTY, &set_dirty_cmd));
+
+	/* Mark all even bits as dirty in the mock domain */
+	for (count = 0, i = 0; i < nbits; count += !(i%2), i++)
+		if (!(i % 2))
+			set_bit(i, (unsigned long *) bitmap);
+	ASSERT_EQ(count, BITMAP_SIZE * BITS_PER_BYTE / 2);
+
+	dirty_cmd.id = mock_cmd.id;
+	ASSERT_EQ(0,
+		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY),
+			&dirty_cmd));
+	ASSERT_EQ(BITMAP_SIZE * BITS_PER_BYTE / 2,
+		  dirty_cmd.dirty.out_nr_dirty);
+
+	get_dirty_cmd.hwpt_id = mock_cmd.id;
+	memset(bitmap, 0, BITMAP_SIZE);
+	ASSERT_EQ(0,
+		  ioctl(self->fd, IOMMU_HWPT_GET_DIRTY_IOVA, &get_dirty_cmd));
+
+	/* All even bits should be dirty */
+	for (count = 0, i = 0; i < nbits; count += !(i%2), i++)
+		ASSERT_EQ(!(i % 2), test_bit(i, (unsigned long *) bitmap));
+	ASSERT_EQ(count, dirty_cmd.dirty.out_nr_dirty);
+
+	memset(bitmap, 0, BITMAP_SIZE);
+	ASSERT_EQ(0,
+		  ioctl(self->fd, IOMMU_HWPT_GET_DIRTY_IOVA, &get_dirty_cmd));
+
+	/* Should be all zeroes */
+	for (i = 0; i < nbits; i++)
+		ASSERT_EQ(0, test_bit(i, (unsigned long *) bitmap));
+
+	/* Mark all even bits as dirty in the mock domain */
+	for (count = 0, i = 0; i < nbits; count += !(i%2), i++)
+		if (!(i % 2))
+			set_bit(i, (unsigned long *) bitmap);
+	ASSERT_EQ(count, BITMAP_SIZE * BITS_PER_BYTE / 2);
+	ASSERT_EQ(0,
+		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY),
+			&dirty_cmd));
+	ASSERT_EQ(BITMAP_SIZE * BITS_PER_BYTE / 2,
+		  dirty_cmd.dirty.out_nr_dirty);
+
+	memset(bitmap, 0, BITMAP_SIZE);
+	ASSERT_EQ(0,
+		  ioctl(self->fd, IOMMU_IOAS_UNMAP_DIRTY, &unmap_dirty_cmd));
+
+	/* All even bits should be dirty */
+	for (count = 0, i = 0; i < nbits; count += !(i%2), i++)
+		ASSERT_EQ(!(i % 2), test_bit(i, (unsigned long *) bitmap));
+	ASSERT_EQ(count, dirty_cmd.dirty.out_nr_dirty);
+
+	set_dirty_cmd.flags = IOMMU_DIRTY_TRACKING_DISABLED;
+	ASSERT_EQ(0,
+		     ioctl(self->fd, IOMMU_HWPT_SET_DIRTY, &set_dirty_cmd));
+	EXPECT_ERRNO(EINVAL,
+		     ioctl(self->fd, IOMMU_HWPT_SET_DIRTY, &set_dirty_cmd));
+
+	destroy_cmd.id = mock_cmd.mock_domain.device_id;
+	ASSERT_EQ(0, ioctl(self->fd, IOMMU_DESTROY, &destroy_cmd));
+	destroy_cmd.id = mock_cmd.id;
+	ASSERT_EQ(0, ioctl(self->fd, IOMMU_DESTROY, &destroy_cmd));
+}
+
 TEST_F(iommufd_ioas, access)
 {
 	struct iommu_ioas_map map_cmd = {
-- 
2.17.2


WARNING: multiple messages have this Message-ID (diff)
From: Joao Martins <joao.m.martins@oracle.com>
To: iommu@lists.linux-foundation.org
Cc: Jean-Philippe Brucker <jean-philippe@linaro.org>,
	Kevin Tian <kevin.tian@intel.com>,
	Yishai Hadas <yishaih@nvidia.com>,
	Jason Gunthorpe <jgg@nvidia.com>,
	kvm@vger.kernel.org, Will Deacon <will@kernel.org>,
	Cornelia Huck <cohuck@redhat.com>,
	Alex Williamson <alex.williamson@redhat.com>,
	Joao Martins <joao.m.martins@oracle.com>,
	David Woodhouse <dwmw2@infradead.org>,
	Robin Murphy <robin.murphy@arm.com>
Subject: [PATCH RFC 08/19] iommufd: Add a test for dirty tracking ioctls
Date: Thu, 28 Apr 2022 22:09:22 +0100	[thread overview]
Message-ID: <20220428210933.3583-9-joao.m.martins@oracle.com> (raw)
In-Reply-To: <20220428210933.3583-1-joao.m.martins@oracle.com>

Add a new test ioctl for simulating the dirty IOVAs
in the mock domain, and implement the mock iommu domain ops
that get the dirty tracking supported.

The selftest exercises the usual main workflow of:

1) Setting/Clearing dirty tracking from the iommu domain
2) Read and clear dirty IOPTEs
3) Unmap and read dirty back

Signed-off-by: Joao Martins <joao.m.martins@oracle.com>
---
 drivers/iommu/iommufd/iommufd_test.h    |   9 ++
 drivers/iommu/iommufd/selftest.c        | 137 +++++++++++++++++++++++-
 tools/testing/selftests/iommu/Makefile  |   1 +
 tools/testing/selftests/iommu/iommufd.c | 135 +++++++++++++++++++++++
 4 files changed, 279 insertions(+), 3 deletions(-)

diff --git a/drivers/iommu/iommufd/iommufd_test.h b/drivers/iommu/iommufd/iommufd_test.h
index d22ef484af1a..90dafa513078 100644
--- a/drivers/iommu/iommufd/iommufd_test.h
+++ b/drivers/iommu/iommufd/iommufd_test.h
@@ -14,6 +14,7 @@ enum {
 	IOMMU_TEST_OP_MD_CHECK_REFS,
 	IOMMU_TEST_OP_ACCESS_PAGES,
 	IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT,
+	IOMMU_TEST_OP_DIRTY,
 };
 
 enum {
@@ -57,6 +58,14 @@ struct iommu_test_cmd {
 		struct {
 			__u32 limit;
 		} memory_limit;
+		struct {
+			__u32 flags;
+			__aligned_u64 iova;
+			__aligned_u64 length;
+			__aligned_u64 page_size;
+			__aligned_u64 uptr;
+			__aligned_u64 out_nr_dirty;
+		} dirty;
 	};
 	__u32 last;
 };
diff --git a/drivers/iommu/iommufd/selftest.c b/drivers/iommu/iommufd/selftest.c
index a665719b493e..b02309722436 100644
--- a/drivers/iommu/iommufd/selftest.c
+++ b/drivers/iommu/iommufd/selftest.c
@@ -13,6 +13,7 @@
 size_t iommufd_test_memory_limit = 65536;
 
 enum {
+	MOCK_DIRTY_TRACK = 1,
 	MOCK_IO_PAGE_SIZE = PAGE_SIZE / 2,
 
 	/*
@@ -25,9 +26,11 @@ enum {
 	_MOCK_PFN_START = MOCK_PFN_MASK + 1,
 	MOCK_PFN_START_IOVA = _MOCK_PFN_START,
 	MOCK_PFN_LAST_IOVA = _MOCK_PFN_START,
+	MOCK_PFN_DIRTY_IOVA = _MOCK_PFN_START << 1,
 };
 
 struct mock_iommu_domain {
+	unsigned long flags;
 	struct iommu_domain domain;
 	struct xarray pfns;
 };
@@ -133,7 +136,7 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
 
 		for (cur = 0; cur != pgsize; cur += MOCK_IO_PAGE_SIZE) {
 			ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
-			WARN_ON(!ent);
+
 			/*
 			 * iommufd generates unmaps that must be a strict
 			 * superset of the map's performend So every starting
@@ -143,12 +146,12 @@ static size_t mock_domain_unmap_pages(struct iommu_domain *domain,
 			 * passed to map_pages
 			 */
 			if (first) {
-				WARN_ON(!(xa_to_value(ent) &
+				WARN_ON(ent && !(xa_to_value(ent) &
 					  MOCK_PFN_START_IOVA));
 				first = false;
 			}
 			if (pgcount == 1 && cur + MOCK_IO_PAGE_SIZE == pgsize)
-				WARN_ON(!(xa_to_value(ent) &
+				WARN_ON(ent && !(xa_to_value(ent) &
 					  MOCK_PFN_LAST_IOVA));
 
 			iova += MOCK_IO_PAGE_SIZE;
@@ -171,6 +174,75 @@ static phys_addr_t mock_domain_iova_to_phys(struct iommu_domain *domain,
 	return (xa_to_value(ent) & MOCK_PFN_MASK) * MOCK_IO_PAGE_SIZE;
 }
 
+static int mock_domain_set_dirty_tracking(struct iommu_domain *domain,
+					  bool enable)
+{
+	struct mock_iommu_domain *mock =
+		container_of(domain, struct mock_iommu_domain, domain);
+	unsigned long flags = mock->flags;
+
+	/* No change? */
+	if (!(enable ^ !!(flags & MOCK_DIRTY_TRACK)))
+		return -EINVAL;
+
+	flags = (enable ?
+		 flags | MOCK_DIRTY_TRACK : flags & ~MOCK_DIRTY_TRACK);
+
+	mock->flags = flags;
+	return 0;
+}
+
+static int mock_domain_read_and_clear_dirty(struct iommu_domain *domain,
+					    unsigned long iova, size_t size,
+					    struct iommu_dirty_bitmap *dirty)
+{
+	struct mock_iommu_domain *mock =
+		container_of(domain, struct mock_iommu_domain, domain);
+	unsigned long i, max = size / MOCK_IO_PAGE_SIZE;
+	void *ent, *old;
+
+	if (!(mock->flags & MOCK_DIRTY_TRACK))
+		return -EINVAL;
+
+	for (i = 0; i < max; i++) {
+		unsigned long cur = iova + i * MOCK_IO_PAGE_SIZE;
+
+		ent = xa_load(&mock->pfns, cur / MOCK_IO_PAGE_SIZE);
+		if (ent &&
+		    (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA)) {
+			unsigned long val;
+
+			/* Clear dirty */
+			val = xa_to_value(ent) & ~MOCK_PFN_DIRTY_IOVA;
+			old = xa_store(&mock->pfns, cur / MOCK_IO_PAGE_SIZE,
+				       xa_mk_value(val), GFP_KERNEL);
+			WARN_ON_ONCE(ent != old);
+			iommu_dirty_bitmap_record(dirty, cur, MOCK_IO_PAGE_SIZE);
+		}
+	}
+
+	return 0;
+}
+
+static size_t mock_domain_unmap_read_dirty(struct iommu_domain *domain,
+					   unsigned long iova, size_t page_size,
+					   struct iommu_iotlb_gather *gather,
+					   struct iommu_dirty_bitmap *dirty)
+{
+	struct mock_iommu_domain *mock =
+		container_of(domain, struct mock_iommu_domain, domain);
+	void *ent;
+
+	WARN_ON(page_size != MOCK_IO_PAGE_SIZE);
+
+	ent = xa_erase(&mock->pfns, iova / MOCK_IO_PAGE_SIZE);
+	if (ent && (xa_to_value(ent) & MOCK_PFN_DIRTY_IOVA) &&
+	    (mock->flags & MOCK_DIRTY_TRACK))
+		iommu_dirty_bitmap_record(dirty, iova, page_size);
+
+	return ent ? page_size : 0;
+}
+
 static const struct iommu_ops mock_ops = {
 	.owner = THIS_MODULE,
 	.pgsize_bitmap = MOCK_IO_PAGE_SIZE,
@@ -181,6 +253,9 @@ static const struct iommu_ops mock_ops = {
 			.map_pages = mock_domain_map_pages,
 			.unmap_pages = mock_domain_unmap_pages,
 			.iova_to_phys = mock_domain_iova_to_phys,
+			.set_dirty_tracking = mock_domain_set_dirty_tracking,
+			.read_and_clear_dirty = mock_domain_read_and_clear_dirty,
+			.unmap_read_dirty = mock_domain_unmap_read_dirty,
 		},
 };
 
@@ -442,6 +517,56 @@ static int iommufd_test_access_pages(struct iommufd_ucmd *ucmd,
 	return rc;
 }
 
+static int iommufd_test_dirty(struct iommufd_ucmd *ucmd,
+			      unsigned int mockpt_id, unsigned long iova,
+			      size_t length, unsigned long page_size,
+			      void __user *uptr, u32 flags)
+{
+	unsigned long i, max = length / page_size;
+	struct iommu_test_cmd *cmd = ucmd->cmd;
+	struct iommufd_hw_pagetable *hwpt;
+	struct mock_iommu_domain *mock;
+	int rc, count = 0;
+
+	if (iova % page_size || length % page_size ||
+	    (uintptr_t)uptr % page_size)
+		return -EINVAL;
+
+	hwpt = get_md_pagetable(ucmd, mockpt_id, &mock);
+	if (IS_ERR(hwpt))
+		return PTR_ERR(hwpt);
+
+	if (!(mock->flags & MOCK_DIRTY_TRACK)) {
+		rc = -EINVAL;
+		goto out_put;
+	}
+
+	for (i = 0; i < max; i++) {
+		unsigned long cur = iova + i * page_size;
+		void *ent, *old;
+
+		if (!test_bit(i, (unsigned long *) uptr))
+			continue;
+
+		ent = xa_load(&mock->pfns, cur / page_size);
+		if (ent) {
+			unsigned long val;
+
+			val = xa_to_value(ent) | MOCK_PFN_DIRTY_IOVA;
+			old = xa_store(&mock->pfns, cur / page_size,
+				       xa_mk_value(val), GFP_KERNEL);
+			WARN_ON_ONCE(ent != old);
+			count++;
+		}
+	}
+
+	cmd->dirty.out_nr_dirty = count;
+	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+out_put:
+	iommufd_put_object(&hwpt->obj);
+	return rc;
+}
+
 void iommufd_selftest_destroy(struct iommufd_object *obj)
 {
 	struct selftest_obj *sobj = container_of(obj, struct selftest_obj, obj);
@@ -486,6 +611,12 @@ int iommufd_test(struct iommufd_ucmd *ucmd)
 			cmd->access_pages.length,
 			u64_to_user_ptr(cmd->access_pages.uptr),
 			cmd->access_pages.flags);
+	case IOMMU_TEST_OP_DIRTY:
+		return iommufd_test_dirty(
+			ucmd, cmd->id, cmd->dirty.iova,
+			cmd->dirty.length, cmd->dirty.page_size,
+			u64_to_user_ptr(cmd->dirty.uptr),
+			cmd->dirty.flags);
 	case IOMMU_TEST_OP_SET_TEMP_MEMORY_LIMIT:
 		iommufd_test_memory_limit = cmd->memory_limit.limit;
 		return 0;
diff --git a/tools/testing/selftests/iommu/Makefile b/tools/testing/selftests/iommu/Makefile
index 7bc38b3beaeb..48d4dcf11506 100644
--- a/tools/testing/selftests/iommu/Makefile
+++ b/tools/testing/selftests/iommu/Makefile
@@ -1,5 +1,6 @@
 # SPDX-License-Identifier: GPL-2.0-only
 CFLAGS += -Wall -O2 -Wno-unused-function
+CFLAGS += -I../../../../tools/include/
 CFLAGS += -I../../../../include/uapi/
 CFLAGS += -I../../../../include/
 
diff --git a/tools/testing/selftests/iommu/iommufd.c b/tools/testing/selftests/iommu/iommufd.c
index 5c47d706ed94..3a494f7958f4 100644
--- a/tools/testing/selftests/iommu/iommufd.c
+++ b/tools/testing/selftests/iommu/iommufd.c
@@ -13,13 +13,18 @@
 #define __EXPORTED_HEADERS__
 #include <linux/iommufd.h>
 #include <linux/vfio.h>
+#include <linux/bitmap.h>
+#include <linux/bitops.h>
 #include "../../../../drivers/iommu/iommufd/iommufd_test.h"
+#define BITS_PER_BYTE 8
 
 static void *buffer;
+static void *bitmap;
 
 static unsigned long PAGE_SIZE;
 static unsigned long HUGEPAGE_SIZE;
 static unsigned long BUFFER_SIZE;
+static unsigned long BITMAP_SIZE;
 
 #define MOCK_PAGE_SIZE (PAGE_SIZE / 2)
 
@@ -52,6 +57,10 @@ static __attribute__((constructor)) void setup_sizes(void)
 	BUFFER_SIZE = PAGE_SIZE * 16;
 	rc = posix_memalign(&buffer, HUGEPAGE_SIZE, BUFFER_SIZE);
 	assert(rc || buffer || (uintptr_t)buffer % HUGEPAGE_SIZE == 0);
+
+	BITMAP_SIZE = BUFFER_SIZE / MOCK_PAGE_SIZE / BITS_PER_BYTE;
+	rc = posix_memalign(&bitmap, PAGE_SIZE, BUFFER_SIZE);
+	assert(rc || buffer || (uintptr_t)buffer % PAGE_SIZE == 0);
 }
 
 /*
@@ -546,6 +555,132 @@ TEST_F(iommufd_ioas, iova_ranges)
 	EXPECT_EQ(0, cmd->out_valid_iovas[1].last);
 }
 
+TEST_F(iommufd_ioas, dirty)
+{
+	struct iommu_ioas_map map_cmd = {
+		.size = sizeof(map_cmd),
+		.flags = IOMMU_IOAS_MAP_FIXED_IOVA,
+		.ioas_id = self->ioas_id,
+		.user_va = (uintptr_t)buffer,
+		.length = BUFFER_SIZE,
+		.iova = MOCK_APERTURE_START,
+	};
+	struct iommu_test_cmd mock_cmd = {
+		.size = sizeof(mock_cmd),
+		.op = IOMMU_TEST_OP_MOCK_DOMAIN,
+		.id = self->ioas_id,
+	};
+	struct iommu_hwpt_set_dirty set_dirty_cmd = {
+		.size = sizeof(set_dirty_cmd),
+		.flags = IOMMU_DIRTY_TRACKING_ENABLED,
+		.hwpt_id = self->ioas_id,
+	};
+	struct iommu_test_cmd dirty_cmd = {
+		.size = sizeof(dirty_cmd),
+		.op = IOMMU_TEST_OP_DIRTY,
+		.id = self->ioas_id,
+		.dirty = { .iova = MOCK_APERTURE_START,
+			   .length = BUFFER_SIZE,
+			   .page_size = MOCK_PAGE_SIZE,
+			   .uptr = (uintptr_t)bitmap },
+	};
+	struct iommu_hwpt_get_dirty_iova get_dirty_cmd = {
+		.size = sizeof(get_dirty_cmd),
+		.hwpt_id = self->ioas_id,
+		.bitmap = {
+			.iova = MOCK_APERTURE_START,
+			.length = BUFFER_SIZE,
+			.page_size = MOCK_PAGE_SIZE,
+			.data = (__u64 *)bitmap,
+		}
+	};
+	struct iommu_ioas_unmap_dirty unmap_dirty_cmd = {
+		.size = sizeof(unmap_dirty_cmd),
+		.ioas_id = self->ioas_id,
+		.bitmap = {
+			.iova = MOCK_APERTURE_START,
+			.length = BUFFER_SIZE,
+			.page_size = MOCK_PAGE_SIZE,
+			.data = (__u64 *)bitmap,
+		},
+	};
+	struct iommu_destroy destroy_cmd = { .size = sizeof(destroy_cmd) };
+	unsigned long i, count, nbits = BITMAP_SIZE * BITS_PER_BYTE;
+
+	/* Toggle dirty with a domain and a single map */
+	ASSERT_EQ(0, ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_MOCK_DOMAIN),
+			   &mock_cmd));
+	ASSERT_EQ(0, ioctl(self->fd, IOMMU_IOAS_MAP, &map_cmd));
+
+	set_dirty_cmd.hwpt_id = mock_cmd.id;
+	ASSERT_EQ(0,
+		  ioctl(self->fd, IOMMU_HWPT_SET_DIRTY, &set_dirty_cmd));
+	EXPECT_ERRNO(EINVAL,
+		  ioctl(self->fd, IOMMU_HWPT_SET_DIRTY, &set_dirty_cmd));
+
+	/* Mark all even bits as dirty in the mock domain */
+	for (count = 0, i = 0; i < nbits; count += !(i%2), i++)
+		if (!(i % 2))
+			set_bit(i, (unsigned long *) bitmap);
+	ASSERT_EQ(count, BITMAP_SIZE * BITS_PER_BYTE / 2);
+
+	dirty_cmd.id = mock_cmd.id;
+	ASSERT_EQ(0,
+		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY),
+			&dirty_cmd));
+	ASSERT_EQ(BITMAP_SIZE * BITS_PER_BYTE / 2,
+		  dirty_cmd.dirty.out_nr_dirty);
+
+	get_dirty_cmd.hwpt_id = mock_cmd.id;
+	memset(bitmap, 0, BITMAP_SIZE);
+	ASSERT_EQ(0,
+		  ioctl(self->fd, IOMMU_HWPT_GET_DIRTY_IOVA, &get_dirty_cmd));
+
+	/* All even bits should be dirty */
+	for (count = 0, i = 0; i < nbits; count += !(i%2), i++)
+		ASSERT_EQ(!(i % 2), test_bit(i, (unsigned long *) bitmap));
+	ASSERT_EQ(count, dirty_cmd.dirty.out_nr_dirty);
+
+	memset(bitmap, 0, BITMAP_SIZE);
+	ASSERT_EQ(0,
+		  ioctl(self->fd, IOMMU_HWPT_GET_DIRTY_IOVA, &get_dirty_cmd));
+
+	/* Should be all zeroes */
+	for (i = 0; i < nbits; i++)
+		ASSERT_EQ(0, test_bit(i, (unsigned long *) bitmap));
+
+	/* Mark all even bits as dirty in the mock domain */
+	for (count = 0, i = 0; i < nbits; count += !(i%2), i++)
+		if (!(i % 2))
+			set_bit(i, (unsigned long *) bitmap);
+	ASSERT_EQ(count, BITMAP_SIZE * BITS_PER_BYTE / 2);
+	ASSERT_EQ(0,
+		  ioctl(self->fd, _IOMMU_TEST_CMD(IOMMU_TEST_OP_DIRTY),
+			&dirty_cmd));
+	ASSERT_EQ(BITMAP_SIZE * BITS_PER_BYTE / 2,
+		  dirty_cmd.dirty.out_nr_dirty);
+
+	memset(bitmap, 0, BITMAP_SIZE);
+	ASSERT_EQ(0,
+		  ioctl(self->fd, IOMMU_IOAS_UNMAP_DIRTY, &unmap_dirty_cmd));
+
+	/* All even bits should be dirty */
+	for (count = 0, i = 0; i < nbits; count += !(i%2), i++)
+		ASSERT_EQ(!(i % 2), test_bit(i, (unsigned long *) bitmap));
+	ASSERT_EQ(count, dirty_cmd.dirty.out_nr_dirty);
+
+	set_dirty_cmd.flags = IOMMU_DIRTY_TRACKING_DISABLED;
+	ASSERT_EQ(0,
+		     ioctl(self->fd, IOMMU_HWPT_SET_DIRTY, &set_dirty_cmd));
+	EXPECT_ERRNO(EINVAL,
+		     ioctl(self->fd, IOMMU_HWPT_SET_DIRTY, &set_dirty_cmd));
+
+	destroy_cmd.id = mock_cmd.mock_domain.device_id;
+	ASSERT_EQ(0, ioctl(self->fd, IOMMU_DESTROY, &destroy_cmd));
+	destroy_cmd.id = mock_cmd.id;
+	ASSERT_EQ(0, ioctl(self->fd, IOMMU_DESTROY, &destroy_cmd));
+}
+
 TEST_F(iommufd_ioas, access)
 {
 	struct iommu_ioas_map map_cmd = {
-- 
2.17.2

_______________________________________________
iommu mailing list
iommu@lists.linux-foundation.org
https://lists.linuxfoundation.org/mailman/listinfo/iommu

  parent reply	other threads:[~2022-04-28 21:11 UTC|newest]

Thread overview: 209+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-28 21:09 [PATCH RFC 00/19] IOMMUFD Dirty Tracking Joao Martins
2022-04-28 21:09 ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 01/19] iommu: Add iommu_domain ops for dirty tracking Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  7:54   ` Tian, Kevin
2022-04-29  7:54     ` Tian, Kevin
2022-04-29 10:44     ` Joao Martins
2022-04-29 10:44       ` Joao Martins
2022-04-29 12:08   ` Jason Gunthorpe
2022-04-29 12:08     ` Jason Gunthorpe via iommu
2022-04-29 14:26     ` Joao Martins
2022-04-29 14:26       ` Joao Martins
2022-04-29 14:35       ` Jason Gunthorpe
2022-04-29 14:35         ` Jason Gunthorpe via iommu
2022-04-29 13:40   ` Baolu Lu
2022-04-29 13:40     ` Baolu Lu
2022-04-29 15:27     ` Joao Martins
2022-04-29 15:27       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 02/19] iommufd: Dirty tracking for io_pagetable Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  8:07   ` Tian, Kevin
2022-04-29  8:07     ` Tian, Kevin
2022-04-29 10:48     ` Joao Martins
2022-04-29 10:48       ` Joao Martins
2022-04-29 11:56     ` Jason Gunthorpe
2022-04-29 11:56       ` Jason Gunthorpe via iommu
2022-04-29 14:28       ` Joao Martins
2022-04-29 14:28         ` Joao Martins
2022-04-29 23:51   ` Baolu Lu
2022-04-29 23:51     ` Baolu Lu
2022-05-02 11:57     ` Joao Martins
2022-05-02 11:57       ` Joao Martins
2022-08-29 10:01   ` Shameerali Kolothum Thodi
2022-04-28 21:09 ` [PATCH RFC 03/19] iommufd: Dirty tracking data support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  8:12   ` Tian, Kevin
2022-04-29  8:12     ` Tian, Kevin
2022-04-29 10:54     ` Joao Martins
2022-04-29 10:54       ` Joao Martins
2022-04-29 12:09       ` Jason Gunthorpe
2022-04-29 12:09         ` Jason Gunthorpe via iommu
2022-04-29 14:33         ` Joao Martins
2022-04-29 14:33           ` Joao Martins
2022-04-30  4:11   ` Baolu Lu
2022-04-30  4:11     ` Baolu Lu
2022-05-02 12:06     ` Joao Martins
2022-05-02 12:06       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 04/19] iommu: Add an unmap API that returns dirtied IOPTEs Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-30  5:12   ` Baolu Lu
2022-04-30  5:12     ` Baolu Lu
2022-05-02 12:22     ` Joao Martins
2022-05-02 12:22       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 05/19] iommufd: Add a dirty bitmap to iopt_unmap_iova() Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29 12:14   ` Jason Gunthorpe
2022-04-29 12:14     ` Jason Gunthorpe via iommu
2022-04-29 14:36     ` Joao Martins
2022-04-29 14:36       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 06/19] iommufd: Dirty tracking IOCTLs for the hw_pagetable Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 07/19] iommufd/vfio-compat: Dirty tracking IOCTLs compatibility Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29 12:19   ` Jason Gunthorpe
2022-04-29 12:19     ` Jason Gunthorpe via iommu
2022-04-29 14:27     ` Joao Martins
2022-04-29 14:27       ` Joao Martins
2022-04-29 14:36       ` Jason Gunthorpe via iommu
2022-04-29 14:36         ` Jason Gunthorpe
2022-04-29 14:52         ` Joao Martins
2022-04-29 14:52           ` Joao Martins
2022-04-28 21:09 ` Joao Martins [this message]
2022-04-28 21:09   ` [PATCH RFC 08/19] iommufd: Add a test for dirty tracking ioctls Joao Martins
2022-04-28 21:09 ` [PATCH RFC 09/19] iommu/amd: Access/Dirty bit support in IOPTEs Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-05-31 11:34   ` Suravee Suthikulpanit via iommu
2022-05-31 11:34     ` Suravee Suthikulpanit
2022-05-31 12:15     ` Baolu Lu
2022-05-31 12:15       ` Baolu Lu
2022-05-31 15:22     ` Joao Martins
2022-05-31 15:22       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 10/19] iommu/amd: Add unmap_read_dirty() support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-05-31 12:39   ` Suravee Suthikulpanit
2022-05-31 12:39     ` Suravee Suthikulpanit via iommu
2022-05-31 15:51     ` Joao Martins
2022-05-31 15:51       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 11/19] iommu/amd: Print access/dirty bits if supported Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 12/19] iommu/arm-smmu-v3: Add feature detection for HTTU Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 13/19] iommu/arm-smmu-v3: Add feature detection for BBML Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29 11:11   ` Robin Murphy
2022-04-29 11:11     ` Robin Murphy
2022-04-29 11:54     ` Joao Martins
2022-04-29 11:54       ` Joao Martins
2022-04-29 12:26       ` Robin Murphy
2022-04-29 12:26         ` Robin Murphy
2022-04-29 14:34         ` Joao Martins
2022-04-29 14:34           ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 14/19] iommu/arm-smmu-v3: Add read_and_clear_dirty() support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-08-29  9:59   ` Shameerali Kolothum Thodi
2022-04-28 21:09 ` [PATCH RFC 15/19] iommu/arm-smmu-v3: Add set_dirty_tracking_range() support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  8:28   ` Tian, Kevin
2022-04-29  8:28     ` Tian, Kevin
2022-04-29 11:05     ` Joao Martins
2022-04-29 11:05       ` Joao Martins
2022-04-29 11:19       ` Robin Murphy
2022-04-29 11:19         ` Robin Murphy
2022-04-29 12:06         ` Joao Martins
2022-04-29 12:06           ` Joao Martins
2022-04-29 12:23           ` Jason Gunthorpe
2022-04-29 12:23             ` Jason Gunthorpe via iommu
2022-04-29 14:45             ` Joao Martins
2022-04-29 14:45               ` Joao Martins
2022-04-29 16:11               ` Jason Gunthorpe
2022-04-29 16:11                 ` Jason Gunthorpe via iommu
2022-04-29 16:40                 ` Joao Martins
2022-04-29 16:40                   ` Joao Martins
2022-04-29 16:46                   ` Jason Gunthorpe
2022-04-29 16:46                     ` Jason Gunthorpe via iommu
2022-04-29 19:20                   ` Robin Murphy
2022-04-29 19:20                     ` Robin Murphy
2022-05-02 11:52                     ` Joao Martins
2022-05-02 11:52                       ` Joao Martins
2022-05-02 11:57                       ` Joao Martins
2022-05-02 11:57                         ` Joao Martins
2022-05-05  7:25       ` Shameerali Kolothum Thodi
2022-05-05  7:25         ` Shameerali Kolothum Thodi via iommu
2022-05-05  9:52         ` Joao Martins
2022-05-05  9:52           ` Joao Martins
2022-08-29  9:59           ` Shameerali Kolothum Thodi
2022-08-29 10:00   ` Shameerali Kolothum Thodi
2022-04-28 21:09 ` [PATCH RFC 16/19] iommu/arm-smmu-v3: Enable HTTU for stage1 with io-pgtable mapping Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29 11:35   ` Robin Murphy
2022-04-29 11:35     ` Robin Murphy
2022-04-29 12:10     ` Joao Martins
2022-04-29 12:10       ` Joao Martins
2022-04-29 12:46       ` Robin Murphy
2022-04-29 12:46         ` Robin Murphy
2022-08-29 10:00   ` Shameerali Kolothum Thodi
2022-04-28 21:09 ` [PATCH RFC 17/19] iommu/arm-smmu-v3: Add unmap_read_dirty() support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29 11:53   ` Robin Murphy
2022-04-29 11:53     ` Robin Murphy
2022-04-28 21:09 ` [PATCH RFC 18/19] iommu/intel: Access/Dirty bit support for SL domains Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  9:03   ` Tian, Kevin
2022-04-29  9:03     ` Tian, Kevin
2022-04-29 11:20     ` Joao Martins
2022-04-29 11:20       ` Joao Martins
2022-04-30  6:12   ` Baolu Lu
2022-04-30  6:12     ` Baolu Lu
2022-05-02 12:24     ` Joao Martins
2022-05-02 12:24       ` Joao Martins
2022-04-28 21:09 ` [PATCH RFC 19/19] iommu/intel: Add unmap_read_dirty() support Joao Martins
2022-04-28 21:09   ` Joao Martins
2022-04-29  5:45 ` [PATCH RFC 00/19] IOMMUFD Dirty Tracking Tian, Kevin
2022-04-29  5:45   ` Tian, Kevin
2022-04-29 10:27   ` Joao Martins
2022-04-29 10:27     ` Joao Martins
2022-04-29 12:38     ` Jason Gunthorpe
2022-04-29 12:38       ` Jason Gunthorpe via iommu
2022-04-29 15:20       ` Joao Martins
2022-04-29 15:20         ` Joao Martins
2022-05-05  7:40       ` Tian, Kevin
2022-05-05  7:40         ` Tian, Kevin
2022-05-05 14:07         ` Jason Gunthorpe
2022-05-05 14:07           ` Jason Gunthorpe via iommu
2022-05-06  3:51           ` Tian, Kevin
2022-05-06  3:51             ` Tian, Kevin
2022-05-06 11:46             ` Jason Gunthorpe
2022-05-06 11:46               ` Jason Gunthorpe via iommu
2022-05-10  1:38               ` Tian, Kevin
2022-05-10  1:38                 ` Tian, Kevin
2022-05-10 11:50                 ` Joao Martins
2022-05-10 11:50                   ` Joao Martins
2022-05-11  1:17                   ` Tian, Kevin
2022-05-11  1:17                     ` Tian, Kevin
2022-05-10 13:46                 ` Jason Gunthorpe via iommu
2022-05-10 13:46                   ` Jason Gunthorpe
2022-05-11  1:10                   ` Tian, Kevin
2022-05-11  1:10                     ` Tian, Kevin
2022-07-12 18:34                     ` Joao Martins
2022-07-21 14:24                       ` Jason Gunthorpe
2022-05-02 18:11   ` Alex Williamson
2022-05-02 18:11     ` Alex Williamson
2022-05-02 18:52     ` Jason Gunthorpe
2022-05-02 18:52       ` Jason Gunthorpe via iommu
2022-05-03 10:48       ` Joao Martins
2022-05-03 10:48         ` Joao Martins
2022-05-05  7:42       ` Tian, Kevin
2022-05-05  7:42         ` Tian, Kevin
2022-05-05 10:06         ` Joao Martins
2022-05-05 10:06           ` Joao Martins
2022-05-05 11:03           ` Tian, Kevin
2022-05-05 11:03             ` Tian, Kevin
2022-05-05 11:50             ` Joao Martins
2022-05-05 11:50               ` Joao Martins
2022-05-06  3:14               ` Tian, Kevin
2022-05-06  3:14                 ` Tian, Kevin
2022-05-05 13:55             ` Jason Gunthorpe
2022-05-05 13:55               ` Jason Gunthorpe via iommu
2022-05-06  3:17               ` Tian, Kevin
2022-05-06  3:17                 ` Tian, Kevin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220428210933.3583-9-joao.m.martins@oracle.com \
    --to=joao.m.martins@oracle.com \
    --cc=alex.williamson@redhat.com \
    --cc=baolu.lu@linux.intel.com \
    --cc=cohuck@redhat.com \
    --cc=dwmw2@infradead.org \
    --cc=eric.auger@redhat.com \
    --cc=iommu@lists.linux-foundation.org \
    --cc=jean-philippe@linaro.org \
    --cc=jgg@nvidia.com \
    --cc=joro@8bytes.org \
    --cc=kevin.tian@intel.com \
    --cc=kvm@vger.kernel.org \
    --cc=nicolinc@nvidia.com \
    --cc=robin.murphy@arm.com \
    --cc=shameerali.kolothum.thodi@huawei.com \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=will@kernel.org \
    --cc=yi.l.liu@intel.com \
    --cc=yishaih@nvidia.com \
    --cc=zhukeqian1@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.