All of lore.kernel.org
 help / color / mirror / Atom feed
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: igt-dev@lists.freedesktop.org
Subject: [PATCH i-g-t 3/9] i915/gem_exec_schedule: Beware priority inversion from iova faults
Date: Wed, 13 Nov 2019 12:52:34 +0000	[thread overview]
Message-ID: <20191113125240.3781-3-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20191113125240.3781-1-chris@chris-wilson.co.uk>

Check that if two contexts (one high priority, one low) share the same
buffer that has taken a page fault that we do not create an implicit
dependency between the two contexts for servicing that page fault and
binding the vma.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/i915/gem_exec_schedule.c | 166 +++++++++++++++++++++++++++++++++
 1 file changed, 166 insertions(+)

diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
index d98434123..f8b0ef5a8 100644
--- a/tests/i915/gem_exec_schedule.c
+++ b/tests/i915/gem_exec_schedule.c
@@ -1638,9 +1638,15 @@ static int userfaultfd(int flags)
 
 struct ufd_thread {
 	uint32_t batch;
+	uint32_t scratch;
 	uint32_t *page;
 	unsigned int engine;
+	unsigned int flags;
 	int i915;
+
+	pthread_mutex_t mutex;
+	pthread_cond_t cond;
+	int count;
 };
 
 static uint32_t create_userptr(int i915, void *page)
@@ -1777,6 +1783,160 @@ static void test_pi_userfault(int i915, unsigned int engine)
 	close(ufd);
 }
 
+static void *iova_thread(struct ufd_thread *t, int prio)
+{
+	uint32_t ctx =
+		gem_context_clone(t->i915, 0,
+				  t->flags & SHARED ? I915_CONTEXT_CLONE_VM : 0,
+				  0);
+
+	gem_context_set_priority(t->i915, ctx, prio);
+
+	store_dword_plug(t->i915, ctx, t->engine,
+			 t->scratch, 0, prio,
+			 t->batch, 0 /* no write hazard! */);
+
+	pthread_mutex_lock(&t->mutex);
+	if (!--t->count)
+		pthread_cond_signal(&t->cond);
+	pthread_mutex_unlock(&t->mutex);
+
+	gem_context_destroy(t->i915, ctx);
+	return NULL;
+}
+
+static void *iova_low(void *arg)
+{
+	return iova_thread(arg, MIN_PRIO);
+}
+
+static void *iova_high(void *arg)
+{
+	return iova_thread(arg, MAX_PRIO);
+}
+
+static void test_pi_iova(int i915, unsigned int engine, unsigned int flags)
+{
+	struct uffdio_api api = { .api = UFFD_API };
+	struct uffdio_register reg;
+	struct uffdio_copy copy;
+	struct uffd_msg msg;
+	struct ufd_thread t;
+	igt_spin_t *spin;
+	pthread_t hi, lo;
+	char poison[4096];
+	uint32_t result;
+	int ufd;
+
+	/*
+	 * In this scenario, we have a pair of contending contexts that
+	 * share the same resource. That resource is stuck behind a slow
+	 * page fault such that neither context has immediate access to it.
+	 * What is expected is that as soon as that resource becomes available,
+	 * the two contexts are queued with the high priority context taking
+	 * precedence. We need to check that we do not cross-contaminate
+	 * the two contents with the page fault on the shared resource
+	 * initiated by the low priority context. (Consider that the low
+	 * priority context may install an exclusive fence for the page
+	 * fault, which is then used for strict ordering by the high priority
+	 * context, causing an unwanted implicit dependency between the two
+	 * and promoting the low priority context to high.)
+	 *
+	 * SHARED: the two contexts share a vm, but still have separate
+	 * timelines that should not mingle.
+	 */
+
+	ufd = userfaultfd(0);
+	igt_require_f(ufd != -1, "kernel support for userfaultfd\n");
+	igt_require_f(ioctl(ufd, UFFDIO_API, &api) == 0 && api.api == UFFD_API,
+		      "userfaultfd API v%lld:%lld\n", UFFD_API, api.api);
+
+	t.i915 = i915;
+	t.engine = engine;
+	t.flags = flags;
+
+	t.count = 2;
+	pthread_cond_init(&t.cond, NULL);
+	pthread_mutex_init(&t.mutex, NULL);
+
+	t.page = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, 0, 0);
+	igt_assert(t.page != MAP_FAILED);
+	t.batch = create_userptr(i915, t.page);
+	t.scratch = gem_create(i915, 4096);
+
+	/* Register our fault handler for t.page */
+	memset(&reg, 0, sizeof(reg));
+	reg.mode = UFFDIO_REGISTER_MODE_MISSING;
+	reg.range.start = to_user_pointer(t.page);
+	reg.range.len = 4096;
+	do_ioctl(ufd, UFFDIO_REGISTER, &reg);
+	igt_assert(reg.ioctls == UFFD_API_RANGE_IOCTLS);
+
+	/*
+	 * Fill the engine with spinners; the store_dword() is too quick!
+	 *
+	 * It is not that it is too quick, it that the order in which the
+	 * requests are signaled from the pagefault completion is loosely
+	 * defined (currently, it's in order of attachment so low context
+	 * wins), then submission into the execlists is immediate with the
+	 * low context filling the last slot in the ELSP. Preemption will
+	 * not take place until after the low priority context has had a
+	 * chance to run, and since the task is very short there is no
+	 * arbitration point inside the batch buffer so we only preempt
+	 * after the low priority context has completed.
+	 *
+	 * One way to prevent such opportunistic execution of the low priority
+	 * context would be to remove direct submission and wait until all
+	 * signals are delivered (as the signal delivery is under the irq lock,
+	 * the local tasklet will not run until after all signals have been
+	 * delivered... but another tasklet might).
+	 */
+	spin = igt_spin_new(i915, .engine = engine);
+	for (int i = 0; i < MAX_ELSP_QLEN; i++) {
+		spin->execbuf.rsvd1 = create_highest_priority(i915);
+		gem_execbuf(i915, &spin->execbuf);
+		gem_context_destroy(i915, spin->execbuf.rsvd1);
+	}
+
+	/* Kick off the submission threads */
+	igt_assert(pthread_create(&lo, NULL, iova_low, &t) == 0);
+
+	/* Wait until the low priority thread is blocked on the fault */
+	igt_assert_eq(read(ufd, &msg, sizeof(msg)), sizeof(msg));
+	igt_assert_eq(msg.event, UFFD_EVENT_PAGEFAULT);
+	igt_assert(from_user_pointer(msg.arg.pagefault.address) == t.page);
+
+	/* Then release a very similar thread, but at high priority! */
+	igt_assert(pthread_create(&hi, NULL, iova_high, &t) == 0);
+
+	/* Service the fault; releasing both contexts */
+	memset(&copy, 0, sizeof(copy));
+	copy.dst = msg.arg.pagefault.address;
+	copy.src = to_user_pointer(memset(poison, 0xc5, sizeof(poison)));
+	copy.len = 4096;
+	do_ioctl(ufd, UFFDIO_COPY, &copy);
+
+	/* Wait until both threads have had a chance to submit */
+	pthread_mutex_lock(&t.mutex);
+	while (t.count)
+		pthread_cond_wait(&t.cond, &t.mutex);
+	pthread_mutex_unlock(&t.mutex);
+	igt_debugfs_dump(i915, "i915_engine_info");
+	igt_spin_free(i915, spin);
+
+	pthread_join(hi, NULL);
+	pthread_join(lo, NULL);
+	gem_close(i915, t.batch);
+
+	gem_sync(i915, t.scratch); /* write hazard lies */
+	gem_read(i915, t.scratch, 0, &result, sizeof(result));
+	igt_assert_eq(result, MIN_PRIO);
+	gem_close(i915, t.scratch);
+
+	munmap(t.page, 4096);
+	close(ufd);
+}
+
 static void measure_semaphore_power(int i915)
 {
 	struct rapl gpu, pkg;
@@ -2019,6 +2179,12 @@ igt_main
 
 				igt_subtest_f("pi-userfault-%s", e->name)
 					test_pi_userfault(fd, eb_ring(e));
+
+				igt_subtest_f("pi-distinct-iova-%s", e->name)
+					test_pi_iova(fd, eb_ring(e), 0);
+
+				igt_subtest_f("pi-shared-iova-%s", e->name)
+					test_pi_iova(fd, eb_ring(e), SHARED);
 			}
 		}
 	}
-- 
2.24.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

WARNING: multiple messages have this Message-ID (diff)
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: igt-dev@lists.freedesktop.org
Subject: [Intel-gfx] [PATCH i-g-t 3/9] i915/gem_exec_schedule: Beware priority inversion from iova faults
Date: Wed, 13 Nov 2019 12:52:34 +0000	[thread overview]
Message-ID: <20191113125240.3781-3-chris@chris-wilson.co.uk> (raw)
Message-ID: <20191113125234.q-wo_3aN9mzAxCaWSS7VdvSEELsDTN6KuQsZ5lWywhc@z> (raw)
In-Reply-To: <20191113125240.3781-1-chris@chris-wilson.co.uk>

Check that if two contexts (one high priority, one low) share the same
buffer that has taken a page fault that we do not create an implicit
dependency between the two contexts for servicing that page fault and
binding the vma.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/i915/gem_exec_schedule.c | 166 +++++++++++++++++++++++++++++++++
 1 file changed, 166 insertions(+)

diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
index d98434123..f8b0ef5a8 100644
--- a/tests/i915/gem_exec_schedule.c
+++ b/tests/i915/gem_exec_schedule.c
@@ -1638,9 +1638,15 @@ static int userfaultfd(int flags)
 
 struct ufd_thread {
 	uint32_t batch;
+	uint32_t scratch;
 	uint32_t *page;
 	unsigned int engine;
+	unsigned int flags;
 	int i915;
+
+	pthread_mutex_t mutex;
+	pthread_cond_t cond;
+	int count;
 };
 
 static uint32_t create_userptr(int i915, void *page)
@@ -1777,6 +1783,160 @@ static void test_pi_userfault(int i915, unsigned int engine)
 	close(ufd);
 }
 
+static void *iova_thread(struct ufd_thread *t, int prio)
+{
+	uint32_t ctx =
+		gem_context_clone(t->i915, 0,
+				  t->flags & SHARED ? I915_CONTEXT_CLONE_VM : 0,
+				  0);
+
+	gem_context_set_priority(t->i915, ctx, prio);
+
+	store_dword_plug(t->i915, ctx, t->engine,
+			 t->scratch, 0, prio,
+			 t->batch, 0 /* no write hazard! */);
+
+	pthread_mutex_lock(&t->mutex);
+	if (!--t->count)
+		pthread_cond_signal(&t->cond);
+	pthread_mutex_unlock(&t->mutex);
+
+	gem_context_destroy(t->i915, ctx);
+	return NULL;
+}
+
+static void *iova_low(void *arg)
+{
+	return iova_thread(arg, MIN_PRIO);
+}
+
+static void *iova_high(void *arg)
+{
+	return iova_thread(arg, MAX_PRIO);
+}
+
+static void test_pi_iova(int i915, unsigned int engine, unsigned int flags)
+{
+	struct uffdio_api api = { .api = UFFD_API };
+	struct uffdio_register reg;
+	struct uffdio_copy copy;
+	struct uffd_msg msg;
+	struct ufd_thread t;
+	igt_spin_t *spin;
+	pthread_t hi, lo;
+	char poison[4096];
+	uint32_t result;
+	int ufd;
+
+	/*
+	 * In this scenario, we have a pair of contending contexts that
+	 * share the same resource. That resource is stuck behind a slow
+	 * page fault such that neither context has immediate access to it.
+	 * What is expected is that as soon as that resource becomes available,
+	 * the two contexts are queued with the high priority context taking
+	 * precedence. We need to check that we do not cross-contaminate
+	 * the two contents with the page fault on the shared resource
+	 * initiated by the low priority context. (Consider that the low
+	 * priority context may install an exclusive fence for the page
+	 * fault, which is then used for strict ordering by the high priority
+	 * context, causing an unwanted implicit dependency between the two
+	 * and promoting the low priority context to high.)
+	 *
+	 * SHARED: the two contexts share a vm, but still have separate
+	 * timelines that should not mingle.
+	 */
+
+	ufd = userfaultfd(0);
+	igt_require_f(ufd != -1, "kernel support for userfaultfd\n");
+	igt_require_f(ioctl(ufd, UFFDIO_API, &api) == 0 && api.api == UFFD_API,
+		      "userfaultfd API v%lld:%lld\n", UFFD_API, api.api);
+
+	t.i915 = i915;
+	t.engine = engine;
+	t.flags = flags;
+
+	t.count = 2;
+	pthread_cond_init(&t.cond, NULL);
+	pthread_mutex_init(&t.mutex, NULL);
+
+	t.page = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, 0, 0);
+	igt_assert(t.page != MAP_FAILED);
+	t.batch = create_userptr(i915, t.page);
+	t.scratch = gem_create(i915, 4096);
+
+	/* Register our fault handler for t.page */
+	memset(&reg, 0, sizeof(reg));
+	reg.mode = UFFDIO_REGISTER_MODE_MISSING;
+	reg.range.start = to_user_pointer(t.page);
+	reg.range.len = 4096;
+	do_ioctl(ufd, UFFDIO_REGISTER, &reg);
+	igt_assert(reg.ioctls == UFFD_API_RANGE_IOCTLS);
+
+	/*
+	 * Fill the engine with spinners; the store_dword() is too quick!
+	 *
+	 * It is not that it is too quick, it that the order in which the
+	 * requests are signaled from the pagefault completion is loosely
+	 * defined (currently, it's in order of attachment so low context
+	 * wins), then submission into the execlists is immediate with the
+	 * low context filling the last slot in the ELSP. Preemption will
+	 * not take place until after the low priority context has had a
+	 * chance to run, and since the task is very short there is no
+	 * arbitration point inside the batch buffer so we only preempt
+	 * after the low priority context has completed.
+	 *
+	 * One way to prevent such opportunistic execution of the low priority
+	 * context would be to remove direct submission and wait until all
+	 * signals are delivered (as the signal delivery is under the irq lock,
+	 * the local tasklet will not run until after all signals have been
+	 * delivered... but another tasklet might).
+	 */
+	spin = igt_spin_new(i915, .engine = engine);
+	for (int i = 0; i < MAX_ELSP_QLEN; i++) {
+		spin->execbuf.rsvd1 = create_highest_priority(i915);
+		gem_execbuf(i915, &spin->execbuf);
+		gem_context_destroy(i915, spin->execbuf.rsvd1);
+	}
+
+	/* Kick off the submission threads */
+	igt_assert(pthread_create(&lo, NULL, iova_low, &t) == 0);
+
+	/* Wait until the low priority thread is blocked on the fault */
+	igt_assert_eq(read(ufd, &msg, sizeof(msg)), sizeof(msg));
+	igt_assert_eq(msg.event, UFFD_EVENT_PAGEFAULT);
+	igt_assert(from_user_pointer(msg.arg.pagefault.address) == t.page);
+
+	/* Then release a very similar thread, but at high priority! */
+	igt_assert(pthread_create(&hi, NULL, iova_high, &t) == 0);
+
+	/* Service the fault; releasing both contexts */
+	memset(&copy, 0, sizeof(copy));
+	copy.dst = msg.arg.pagefault.address;
+	copy.src = to_user_pointer(memset(poison, 0xc5, sizeof(poison)));
+	copy.len = 4096;
+	do_ioctl(ufd, UFFDIO_COPY, &copy);
+
+	/* Wait until both threads have had a chance to submit */
+	pthread_mutex_lock(&t.mutex);
+	while (t.count)
+		pthread_cond_wait(&t.cond, &t.mutex);
+	pthread_mutex_unlock(&t.mutex);
+	igt_debugfs_dump(i915, "i915_engine_info");
+	igt_spin_free(i915, spin);
+
+	pthread_join(hi, NULL);
+	pthread_join(lo, NULL);
+	gem_close(i915, t.batch);
+
+	gem_sync(i915, t.scratch); /* write hazard lies */
+	gem_read(i915, t.scratch, 0, &result, sizeof(result));
+	igt_assert_eq(result, MIN_PRIO);
+	gem_close(i915, t.scratch);
+
+	munmap(t.page, 4096);
+	close(ufd);
+}
+
 static void measure_semaphore_power(int i915)
 {
 	struct rapl gpu, pkg;
@@ -2019,6 +2179,12 @@ igt_main
 
 				igt_subtest_f("pi-userfault-%s", e->name)
 					test_pi_userfault(fd, eb_ring(e));
+
+				igt_subtest_f("pi-distinct-iova-%s", e->name)
+					test_pi_iova(fd, eb_ring(e), 0);
+
+				igt_subtest_f("pi-shared-iova-%s", e->name)
+					test_pi_iova(fd, eb_ring(e), SHARED);
 			}
 		}
 	}
-- 
2.24.0

_______________________________________________
Intel-gfx mailing list
Intel-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/intel-gfx

WARNING: multiple messages have this Message-ID (diff)
From: Chris Wilson <chris@chris-wilson.co.uk>
To: intel-gfx@lists.freedesktop.org
Cc: igt-dev@lists.freedesktop.org
Subject: [igt-dev] [PATCH i-g-t 3/9] i915/gem_exec_schedule: Beware priority inversion from iova faults
Date: Wed, 13 Nov 2019 12:52:34 +0000	[thread overview]
Message-ID: <20191113125240.3781-3-chris@chris-wilson.co.uk> (raw)
In-Reply-To: <20191113125240.3781-1-chris@chris-wilson.co.uk>

Check that if two contexts (one high priority, one low) share the same
buffer that has taken a page fault that we do not create an implicit
dependency between the two contexts for servicing that page fault and
binding the vma.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
---
 tests/i915/gem_exec_schedule.c | 166 +++++++++++++++++++++++++++++++++
 1 file changed, 166 insertions(+)

diff --git a/tests/i915/gem_exec_schedule.c b/tests/i915/gem_exec_schedule.c
index d98434123..f8b0ef5a8 100644
--- a/tests/i915/gem_exec_schedule.c
+++ b/tests/i915/gem_exec_schedule.c
@@ -1638,9 +1638,15 @@ static int userfaultfd(int flags)
 
 struct ufd_thread {
 	uint32_t batch;
+	uint32_t scratch;
 	uint32_t *page;
 	unsigned int engine;
+	unsigned int flags;
 	int i915;
+
+	pthread_mutex_t mutex;
+	pthread_cond_t cond;
+	int count;
 };
 
 static uint32_t create_userptr(int i915, void *page)
@@ -1777,6 +1783,160 @@ static void test_pi_userfault(int i915, unsigned int engine)
 	close(ufd);
 }
 
+static void *iova_thread(struct ufd_thread *t, int prio)
+{
+	uint32_t ctx =
+		gem_context_clone(t->i915, 0,
+				  t->flags & SHARED ? I915_CONTEXT_CLONE_VM : 0,
+				  0);
+
+	gem_context_set_priority(t->i915, ctx, prio);
+
+	store_dword_plug(t->i915, ctx, t->engine,
+			 t->scratch, 0, prio,
+			 t->batch, 0 /* no write hazard! */);
+
+	pthread_mutex_lock(&t->mutex);
+	if (!--t->count)
+		pthread_cond_signal(&t->cond);
+	pthread_mutex_unlock(&t->mutex);
+
+	gem_context_destroy(t->i915, ctx);
+	return NULL;
+}
+
+static void *iova_low(void *arg)
+{
+	return iova_thread(arg, MIN_PRIO);
+}
+
+static void *iova_high(void *arg)
+{
+	return iova_thread(arg, MAX_PRIO);
+}
+
+static void test_pi_iova(int i915, unsigned int engine, unsigned int flags)
+{
+	struct uffdio_api api = { .api = UFFD_API };
+	struct uffdio_register reg;
+	struct uffdio_copy copy;
+	struct uffd_msg msg;
+	struct ufd_thread t;
+	igt_spin_t *spin;
+	pthread_t hi, lo;
+	char poison[4096];
+	uint32_t result;
+	int ufd;
+
+	/*
+	 * In this scenario, we have a pair of contending contexts that
+	 * share the same resource. That resource is stuck behind a slow
+	 * page fault such that neither context has immediate access to it.
+	 * What is expected is that as soon as that resource becomes available,
+	 * the two contexts are queued with the high priority context taking
+	 * precedence. We need to check that we do not cross-contaminate
+	 * the two contents with the page fault on the shared resource
+	 * initiated by the low priority context. (Consider that the low
+	 * priority context may install an exclusive fence for the page
+	 * fault, which is then used for strict ordering by the high priority
+	 * context, causing an unwanted implicit dependency between the two
+	 * and promoting the low priority context to high.)
+	 *
+	 * SHARED: the two contexts share a vm, but still have separate
+	 * timelines that should not mingle.
+	 */
+
+	ufd = userfaultfd(0);
+	igt_require_f(ufd != -1, "kernel support for userfaultfd\n");
+	igt_require_f(ioctl(ufd, UFFDIO_API, &api) == 0 && api.api == UFFD_API,
+		      "userfaultfd API v%lld:%lld\n", UFFD_API, api.api);
+
+	t.i915 = i915;
+	t.engine = engine;
+	t.flags = flags;
+
+	t.count = 2;
+	pthread_cond_init(&t.cond, NULL);
+	pthread_mutex_init(&t.mutex, NULL);
+
+	t.page = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED | MAP_ANON, 0, 0);
+	igt_assert(t.page != MAP_FAILED);
+	t.batch = create_userptr(i915, t.page);
+	t.scratch = gem_create(i915, 4096);
+
+	/* Register our fault handler for t.page */
+	memset(&reg, 0, sizeof(reg));
+	reg.mode = UFFDIO_REGISTER_MODE_MISSING;
+	reg.range.start = to_user_pointer(t.page);
+	reg.range.len = 4096;
+	do_ioctl(ufd, UFFDIO_REGISTER, &reg);
+	igt_assert(reg.ioctls == UFFD_API_RANGE_IOCTLS);
+
+	/*
+	 * Fill the engine with spinners; the store_dword() is too quick!
+	 *
+	 * It is not that it is too quick, it that the order in which the
+	 * requests are signaled from the pagefault completion is loosely
+	 * defined (currently, it's in order of attachment so low context
+	 * wins), then submission into the execlists is immediate with the
+	 * low context filling the last slot in the ELSP. Preemption will
+	 * not take place until after the low priority context has had a
+	 * chance to run, and since the task is very short there is no
+	 * arbitration point inside the batch buffer so we only preempt
+	 * after the low priority context has completed.
+	 *
+	 * One way to prevent such opportunistic execution of the low priority
+	 * context would be to remove direct submission and wait until all
+	 * signals are delivered (as the signal delivery is under the irq lock,
+	 * the local tasklet will not run until after all signals have been
+	 * delivered... but another tasklet might).
+	 */
+	spin = igt_spin_new(i915, .engine = engine);
+	for (int i = 0; i < MAX_ELSP_QLEN; i++) {
+		spin->execbuf.rsvd1 = create_highest_priority(i915);
+		gem_execbuf(i915, &spin->execbuf);
+		gem_context_destroy(i915, spin->execbuf.rsvd1);
+	}
+
+	/* Kick off the submission threads */
+	igt_assert(pthread_create(&lo, NULL, iova_low, &t) == 0);
+
+	/* Wait until the low priority thread is blocked on the fault */
+	igt_assert_eq(read(ufd, &msg, sizeof(msg)), sizeof(msg));
+	igt_assert_eq(msg.event, UFFD_EVENT_PAGEFAULT);
+	igt_assert(from_user_pointer(msg.arg.pagefault.address) == t.page);
+
+	/* Then release a very similar thread, but at high priority! */
+	igt_assert(pthread_create(&hi, NULL, iova_high, &t) == 0);
+
+	/* Service the fault; releasing both contexts */
+	memset(&copy, 0, sizeof(copy));
+	copy.dst = msg.arg.pagefault.address;
+	copy.src = to_user_pointer(memset(poison, 0xc5, sizeof(poison)));
+	copy.len = 4096;
+	do_ioctl(ufd, UFFDIO_COPY, &copy);
+
+	/* Wait until both threads have had a chance to submit */
+	pthread_mutex_lock(&t.mutex);
+	while (t.count)
+		pthread_cond_wait(&t.cond, &t.mutex);
+	pthread_mutex_unlock(&t.mutex);
+	igt_debugfs_dump(i915, "i915_engine_info");
+	igt_spin_free(i915, spin);
+
+	pthread_join(hi, NULL);
+	pthread_join(lo, NULL);
+	gem_close(i915, t.batch);
+
+	gem_sync(i915, t.scratch); /* write hazard lies */
+	gem_read(i915, t.scratch, 0, &result, sizeof(result));
+	igt_assert_eq(result, MIN_PRIO);
+	gem_close(i915, t.scratch);
+
+	munmap(t.page, 4096);
+	close(ufd);
+}
+
 static void measure_semaphore_power(int i915)
 {
 	struct rapl gpu, pkg;
@@ -2019,6 +2179,12 @@ igt_main
 
 				igt_subtest_f("pi-userfault-%s", e->name)
 					test_pi_userfault(fd, eb_ring(e));
+
+				igt_subtest_f("pi-distinct-iova-%s", e->name)
+					test_pi_iova(fd, eb_ring(e), 0);
+
+				igt_subtest_f("pi-shared-iova-%s", e->name)
+					test_pi_iova(fd, eb_ring(e), SHARED);
 			}
 		}
 	}
-- 
2.24.0

_______________________________________________
igt-dev mailing list
igt-dev@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/igt-dev

  parent reply	other threads:[~2019-11-13 12:52 UTC|newest]

Thread overview: 57+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2019-11-13 12:52 [PATCH i-g-t 1/9] i915/gem_exec_schedule: Split pi-ringfull into two tests Chris Wilson
2019-11-13 12:52 ` [Intel-gfx] " Chris Wilson
2019-11-13 12:52 ` [PATCH i-g-t 2/9] i915/gem_exec_schedule: Exercise priority inversion from resource contention Chris Wilson
2019-11-13 12:52   ` [igt-dev] " Chris Wilson
2019-11-13 12:52   ` [Intel-gfx] " Chris Wilson
2019-11-13 12:52 ` Chris Wilson [this message]
2019-11-13 12:52   ` [igt-dev] [PATCH i-g-t 3/9] i915/gem_exec_schedule: Beware priority inversion from iova faults Chris Wilson
2019-11-13 12:52   ` [Intel-gfx] " Chris Wilson
2019-11-13 12:52 ` [PATCH i-g-t 4/9] i915: Start putting the mmio_base to wider use Chris Wilson
2019-11-13 12:52   ` [igt-dev] " Chris Wilson
2019-11-13 12:52   ` [Intel-gfx] " Chris Wilson
2019-11-21 12:04   ` [igt-dev] " Lionel Landwerlin
2019-11-21 12:04     ` Lionel Landwerlin
2019-11-21 12:04     ` [Intel-gfx] " Lionel Landwerlin
2019-11-21 12:11     ` Chris Wilson
2019-11-21 12:11       ` Chris Wilson
2019-11-21 12:11       ` [Intel-gfx] " Chris Wilson
2019-11-21 13:11       ` Lionel Landwerlin
2019-11-21 13:11         ` Lionel Landwerlin
2019-11-21 13:11         ` [Intel-gfx] " Lionel Landwerlin
2019-11-13 12:52 ` [PATCH i-g-t 5/9] i915/gem_ctx_isolation: Check engine relative registers Chris Wilson
2019-11-13 12:52   ` [Intel-gfx] " Chris Wilson
2019-11-21 21:07   ` Tang, CQ
2019-11-21 21:07     ` [igt-dev] [Intel-gfx] " Tang, CQ
2019-11-21 21:07     ` Tang, CQ
2019-11-21 23:44     ` Chris Wilson
2019-11-21 23:44       ` [igt-dev] [Intel-gfx] " Chris Wilson
2019-11-21 23:44       ` Chris Wilson
2019-11-21 23:56       ` Tang, CQ
2019-11-21 23:56         ` [igt-dev] [Intel-gfx] " Tang, CQ
2019-11-21 23:56         ` Tang, CQ
2019-11-25 19:13   ` Tang, CQ
2019-11-25 19:13     ` [Intel-gfx] " Tang, CQ
2019-11-13 12:52 ` [PATCH i-g-t 6/9] i915: Exercise preemption timeout controls in sysfs Chris Wilson
2019-11-13 12:52   ` [Intel-gfx] " Chris Wilson
2019-11-13 12:52 ` [PATCH i-g-t 7/9] i915: Exercise sysfs heartbeat controls Chris Wilson
2019-11-13 12:52   ` [igt-dev] " Chris Wilson
2019-11-13 12:52   ` [Intel-gfx] " Chris Wilson
2019-11-13 12:52 ` [PATCH i-g-t 8/9] i915: Exercise timeslice sysfs property Chris Wilson
2019-11-13 12:52   ` [igt-dev] " Chris Wilson
2019-11-13 12:52   ` [Intel-gfx] " Chris Wilson
2019-11-13 12:52 ` [PATCH i-g-t 9/9] i915: Exercise I915_CONTEXT_PARAM_RINGSIZE Chris Wilson
2019-11-13 12:52   ` [igt-dev] " Chris Wilson
2019-11-13 12:52   ` [Intel-gfx] " Chris Wilson
2019-12-02 14:42   ` [igt-dev] " Janusz Krzysztofik
2019-12-02 14:42     ` Janusz Krzysztofik
2019-12-02 14:42     ` [Intel-gfx] " Janusz Krzysztofik
2019-12-02 14:59     ` Chris Wilson
2019-12-02 14:59       ` Chris Wilson
2019-12-02 14:59       ` [Intel-gfx] " Chris Wilson
2020-02-20 15:57       ` Janusz Krzysztofik
2020-02-20 15:57         ` Janusz Krzysztofik
2020-02-20 16:00         ` [Intel-gfx] " Chris Wilson
2020-02-20 16:00           ` Chris Wilson
2019-11-13 14:30 ` [igt-dev] ✓ Fi.CI.BAT: success for series starting with [i-g-t,1/9] i915/gem_exec_schedule: Split pi-ringfull into two tests Patchwork
2019-11-13 14:40 ` [igt-dev] ✗ GitLab.Pipeline: warning " Patchwork
2019-11-14  2:10 ` [igt-dev] ✓ Fi.CI.IGT: success " Patchwork

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20191113125240.3781-3-chris@chris-wilson.co.uk \
    --to=chris@chris-wilson.co.uk \
    --cc=igt-dev@lists.freedesktop.org \
    --cc=intel-gfx@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.