All of lore.kernel.org
 help / color / mirror / Atom feed
From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
To: dri-devel@lists.freedesktop.org, amd-gfx@lists.freedesktop.org
Cc: Alexander.Deucher@amd.com, ckoenig.leichtzumerken@gmail.com
Subject: [PATCH 6/7] tests/amdgpu/hotunplug: Add unplug with cs test.
Date: Tue,  1 Jun 2021 16:17:01 -0400	[thread overview]
Message-ID: <20210601201702.23316-7-andrey.grodzovsky@amd.com> (raw)
In-Reply-To: <20210601201702.23316-1-andrey.grodzovsky@amd.com>

Same as simple test but while doing cs

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
---
 tests/amdgpu/hotunplug_tests.c | 128 ++++++++++++++++++++++++++++++++-
 1 file changed, 126 insertions(+), 2 deletions(-)

diff --git a/tests/amdgpu/hotunplug_tests.c b/tests/amdgpu/hotunplug_tests.c
index c2bc1cf2..6e133a07 100644
--- a/tests/amdgpu/hotunplug_tests.c
+++ b/tests/amdgpu/hotunplug_tests.c
@@ -38,11 +38,13 @@
 #include "xf86drm.h"
 #include <pthread.h>
 
+#define GFX_COMPUTE_NOP  0xffff1000
 
 static  amdgpu_device_handle device_handle;
 static  uint32_t  major_version;
 static  uint32_t  minor_version;
 static char *sysfs_remove = NULL;
+static bool do_cs;
 
 CU_BOOL suite_hotunplug_tests_enable(void)
 {
@@ -110,7 +112,7 @@ static int amdgpu_hotunplug_setup_test()
 	int r;
 	char *tmp_str;
 
-	if (amdgpu_open_device_on_test_index(open_render_node) <= 0) {
+	if (amdgpu_open_device_on_test_index(open_render_node) < 0) {
 		printf("\n\n Failed to reopen device file!\n");
 		return CUE_SINIT_FAILED;
 
@@ -165,17 +167,128 @@ static inline int amdgpu_hotunplug_rescan()
 	return amdgpu_hotunplug_trigger("/sys/bus/pci/rescan");
 }
 
+static int amdgpu_cs_sync(amdgpu_context_handle context,
+			   unsigned int ip_type,
+			   int ring,
+			   unsigned int seqno)
+{
+	struct amdgpu_cs_fence fence = {
+		.context = context,
+		.ip_type = ip_type,
+		.ring = ring,
+		.fence = seqno,
+	};
+	uint32_t expired;
+
+	return  amdgpu_cs_query_fence_status(&fence,
+					   AMDGPU_TIMEOUT_INFINITE,
+					   0, &expired);
+}
 
-static void amdgpu_hotunplug_simple(void)
+static void *amdgpu_nop_cs()
+{
+	amdgpu_bo_handle ib_result_handle;
+	void *ib_result_cpu;
+	uint64_t ib_result_mc_address;
+	uint32_t *ptr;
+	int i, r;
+	amdgpu_bo_list_handle bo_list;
+	amdgpu_va_handle va_handle;
+	amdgpu_context_handle context;
+	struct amdgpu_cs_request ibs_request;
+	struct amdgpu_cs_ib_info ib_info;
+
+	r = amdgpu_cs_ctx_create(device_handle, &context);
+	CU_ASSERT_EQUAL(r, 0);
+
+	r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
+				    AMDGPU_GEM_DOMAIN_GTT, 0,
+				    &ib_result_handle, &ib_result_cpu,
+				    &ib_result_mc_address, &va_handle);
+	CU_ASSERT_EQUAL(r, 0);
+
+	ptr = ib_result_cpu;
+	for (i = 0; i < 16; ++i)
+		ptr[i] = GFX_COMPUTE_NOP;
+
+	r = amdgpu_bo_list_create(device_handle, 1, &ib_result_handle, NULL, &bo_list);
+	CU_ASSERT_EQUAL(r, 0);
+
+	memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
+	ib_info.ib_mc_address = ib_result_mc_address;
+	ib_info.size = 16;
+
+	memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
+	ibs_request.ip_type = AMDGPU_HW_IP_GFX;
+	ibs_request.ring = 0;
+	ibs_request.number_of_ibs = 1;
+	ibs_request.ibs = &ib_info;
+	ibs_request.resources = bo_list;
+
+	while (do_cs)
+		amdgpu_cs_submit(context, 0, &ibs_request, 1);
+
+	r = amdgpu_cs_sync(context, AMDGPU_HW_IP_GFX, 0, ibs_request.seq_no);
+	CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
+
+	amdgpu_bo_list_destroy(bo_list);
+	amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
+				 ib_result_mc_address, 4096);
+
+	amdgpu_cs_ctx_free(context);
+
+	return (void *)0;
+}
+
+static pthread_t* amdgpu_create_cs_thread()
+{
+	int r;
+	pthread_t *thread = malloc(sizeof(*thread));
+	if (!thread)
+		return NULL;
+
+	do_cs = true;
+
+	r = pthread_create(thread, NULL, amdgpu_nop_cs, NULL);
+	CU_ASSERT_EQUAL(r, 0);
+
+	/* Give thread enough time to start*/
+	usleep(100000);
+	return thread;
+}
+
+static void amdgpu_destroy_cs_thread(pthread_t *thread)
+{
+	void *status;
+
+	do_cs = false;
+
+	pthread_join(*thread, &status);
+	CU_ASSERT_EQUAL(status, 0);
+
+	free(thread);
+}
+
+
+static void amdgpu_hotunplug_test(bool with_cs)
 {
 	int r;
+	pthread_t *thread = NULL;
 
 	r = amdgpu_hotunplug_setup_test();
 	CU_ASSERT_EQUAL(r , 0);
 
+	if (with_cs) {
+		thread = amdgpu_create_cs_thread();
+		CU_ASSERT_NOT_EQUAL(thread, NULL);
+	}
+
 	r = amdgpu_hotunplug_remove();
 	CU_ASSERT_EQUAL(r > 0, 1);
 
+	if (with_cs)
+		amdgpu_destroy_cs_thread(thread);
+
 	r = amdgpu_hotunplug_teardown_test();
 	CU_ASSERT_EQUAL(r , 0);
 
@@ -183,8 +296,19 @@ static void amdgpu_hotunplug_simple(void)
 	CU_ASSERT_EQUAL(r > 0, 1);
 }
 
+static void amdgpu_hotunplug_simple(void)
+{
+	amdgpu_hotunplug_test(false);
+}
+
+static void amdgpu_hotunplug_with_cs(void)
+{
+	amdgpu_hotunplug_test(true);
+}
+
 CU_TestInfo hotunplug_tests[] = {
 	{ "Unplug card and rescan the bus to plug it back", amdgpu_hotunplug_simple },
+	{ "Same as first test but with command submission", amdgpu_hotunplug_with_cs },
 	CU_TEST_INFO_NULL,
 };
 
-- 
2.25.1


WARNING: multiple messages have this Message-ID (diff)
From: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
To: dri-devel@lists.freedesktop.org, amd-gfx@lists.freedesktop.org
Cc: Alexander.Deucher@amd.com, ckoenig.leichtzumerken@gmail.com,
	Andrey Grodzovsky <andrey.grodzovsky@amd.com>
Subject: [PATCH 6/7] tests/amdgpu/hotunplug: Add unplug with cs test.
Date: Tue,  1 Jun 2021 16:17:01 -0400	[thread overview]
Message-ID: <20210601201702.23316-7-andrey.grodzovsky@amd.com> (raw)
In-Reply-To: <20210601201702.23316-1-andrey.grodzovsky@amd.com>

Same as simple test but while doing cs

Signed-off-by: Andrey Grodzovsky <andrey.grodzovsky@amd.com>
---
 tests/amdgpu/hotunplug_tests.c | 128 ++++++++++++++++++++++++++++++++-
 1 file changed, 126 insertions(+), 2 deletions(-)

diff --git a/tests/amdgpu/hotunplug_tests.c b/tests/amdgpu/hotunplug_tests.c
index c2bc1cf2..6e133a07 100644
--- a/tests/amdgpu/hotunplug_tests.c
+++ b/tests/amdgpu/hotunplug_tests.c
@@ -38,11 +38,13 @@
 #include "xf86drm.h"
 #include <pthread.h>
 
+#define GFX_COMPUTE_NOP  0xffff1000
 
 static  amdgpu_device_handle device_handle;
 static  uint32_t  major_version;
 static  uint32_t  minor_version;
 static char *sysfs_remove = NULL;
+static bool do_cs;
 
 CU_BOOL suite_hotunplug_tests_enable(void)
 {
@@ -110,7 +112,7 @@ static int amdgpu_hotunplug_setup_test()
 	int r;
 	char *tmp_str;
 
-	if (amdgpu_open_device_on_test_index(open_render_node) <= 0) {
+	if (amdgpu_open_device_on_test_index(open_render_node) < 0) {
 		printf("\n\n Failed to reopen device file!\n");
 		return CUE_SINIT_FAILED;
 
@@ -165,17 +167,128 @@ static inline int amdgpu_hotunplug_rescan()
 	return amdgpu_hotunplug_trigger("/sys/bus/pci/rescan");
 }
 
+static int amdgpu_cs_sync(amdgpu_context_handle context,
+			   unsigned int ip_type,
+			   int ring,
+			   unsigned int seqno)
+{
+	struct amdgpu_cs_fence fence = {
+		.context = context,
+		.ip_type = ip_type,
+		.ring = ring,
+		.fence = seqno,
+	};
+	uint32_t expired;
+
+	return  amdgpu_cs_query_fence_status(&fence,
+					   AMDGPU_TIMEOUT_INFINITE,
+					   0, &expired);
+}
 
-static void amdgpu_hotunplug_simple(void)
+static void *amdgpu_nop_cs()
+{
+	amdgpu_bo_handle ib_result_handle;
+	void *ib_result_cpu;
+	uint64_t ib_result_mc_address;
+	uint32_t *ptr;
+	int i, r;
+	amdgpu_bo_list_handle bo_list;
+	amdgpu_va_handle va_handle;
+	amdgpu_context_handle context;
+	struct amdgpu_cs_request ibs_request;
+	struct amdgpu_cs_ib_info ib_info;
+
+	r = amdgpu_cs_ctx_create(device_handle, &context);
+	CU_ASSERT_EQUAL(r, 0);
+
+	r = amdgpu_bo_alloc_and_map(device_handle, 4096, 4096,
+				    AMDGPU_GEM_DOMAIN_GTT, 0,
+				    &ib_result_handle, &ib_result_cpu,
+				    &ib_result_mc_address, &va_handle);
+	CU_ASSERT_EQUAL(r, 0);
+
+	ptr = ib_result_cpu;
+	for (i = 0; i < 16; ++i)
+		ptr[i] = GFX_COMPUTE_NOP;
+
+	r = amdgpu_bo_list_create(device_handle, 1, &ib_result_handle, NULL, &bo_list);
+	CU_ASSERT_EQUAL(r, 0);
+
+	memset(&ib_info, 0, sizeof(struct amdgpu_cs_ib_info));
+	ib_info.ib_mc_address = ib_result_mc_address;
+	ib_info.size = 16;
+
+	memset(&ibs_request, 0, sizeof(struct amdgpu_cs_request));
+	ibs_request.ip_type = AMDGPU_HW_IP_GFX;
+	ibs_request.ring = 0;
+	ibs_request.number_of_ibs = 1;
+	ibs_request.ibs = &ib_info;
+	ibs_request.resources = bo_list;
+
+	while (do_cs)
+		amdgpu_cs_submit(context, 0, &ibs_request, 1);
+
+	r = amdgpu_cs_sync(context, AMDGPU_HW_IP_GFX, 0, ibs_request.seq_no);
+	CU_ASSERT_EQUAL((r == 0 || r == -ECANCELED), 1);
+
+	amdgpu_bo_list_destroy(bo_list);
+	amdgpu_bo_unmap_and_free(ib_result_handle, va_handle,
+				 ib_result_mc_address, 4096);
+
+	amdgpu_cs_ctx_free(context);
+
+	return (void *)0;
+}
+
+static pthread_t* amdgpu_create_cs_thread()
+{
+	int r;
+	pthread_t *thread = malloc(sizeof(*thread));
+	if (!thread)
+		return NULL;
+
+	do_cs = true;
+
+	r = pthread_create(thread, NULL, amdgpu_nop_cs, NULL);
+	CU_ASSERT_EQUAL(r, 0);
+
+	/* Give thread enough time to start*/
+	usleep(100000);
+	return thread;
+}
+
+static void amdgpu_destroy_cs_thread(pthread_t *thread)
+{
+	void *status;
+
+	do_cs = false;
+
+	pthread_join(*thread, &status);
+	CU_ASSERT_EQUAL(status, 0);
+
+	free(thread);
+}
+
+
+static void amdgpu_hotunplug_test(bool with_cs)
 {
 	int r;
+	pthread_t *thread = NULL;
 
 	r = amdgpu_hotunplug_setup_test();
 	CU_ASSERT_EQUAL(r , 0);
 
+	if (with_cs) {
+		thread = amdgpu_create_cs_thread();
+		CU_ASSERT_NOT_EQUAL(thread, NULL);
+	}
+
 	r = amdgpu_hotunplug_remove();
 	CU_ASSERT_EQUAL(r > 0, 1);
 
+	if (with_cs)
+		amdgpu_destroy_cs_thread(thread);
+
 	r = amdgpu_hotunplug_teardown_test();
 	CU_ASSERT_EQUAL(r , 0);
 
@@ -183,8 +296,19 @@ static void amdgpu_hotunplug_simple(void)
 	CU_ASSERT_EQUAL(r > 0, 1);
 }
 
+static void amdgpu_hotunplug_simple(void)
+{
+	amdgpu_hotunplug_test(false);
+}
+
+static void amdgpu_hotunplug_with_cs(void)
+{
+	amdgpu_hotunplug_test(true);
+}
+
 CU_TestInfo hotunplug_tests[] = {
 	{ "Unplug card and rescan the bus to plug it back", amdgpu_hotunplug_simple },
+	{ "Same as first test but with command submission", amdgpu_hotunplug_with_cs },
 	CU_TEST_INFO_NULL,
 };
 
-- 
2.25.1

_______________________________________________
amd-gfx mailing list
amd-gfx@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/amd-gfx

  parent reply	other threads:[~2021-06-01 20:17 UTC|newest]

Thread overview: 44+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-06-01 20:16 [PATCH 0/7] libdrm tests for hot-unplug feature Andrey Grodzovsky
2021-06-01 20:16 ` Andrey Grodzovsky
2021-06-01 20:16 ` [PATCH 1/7] tests/amdgpu: Fix valgrind warning Andrey Grodzovsky
2021-06-01 20:16   ` Andrey Grodzovsky
2021-06-01 20:16 ` [PATCH 2/7] xf86drm: Add function to retrieve char device path Andrey Grodzovsky
2021-06-01 20:16   ` Andrey Grodzovsky
2021-06-02  9:16   ` Simon Ser
2021-06-02  9:16     ` Simon Ser
2021-06-02 14:25     ` Andrey Grodzovsky
2021-06-02 14:25       ` Andrey Grodzovsky
2021-06-01 20:16 ` [PATCH 3/7] test/amdgpu: Add helper functions for hot unplug Andrey Grodzovsky
2021-06-01 20:16   ` Andrey Grodzovsky
2021-06-01 20:16 ` [PATCH 4/7] test/amdgpu/hotunplug: Add test suite for GPU unplug Andrey Grodzovsky
2021-06-01 20:16   ` Andrey Grodzovsky
2021-06-01 20:17 ` [PATCH 5/7] test/amdgpu/hotunplug: Add basic test Andrey Grodzovsky
2021-06-01 20:17   ` Andrey Grodzovsky
2021-06-01 20:17 ` Andrey Grodzovsky [this message]
2021-06-01 20:17   ` [PATCH 6/7] tests/amdgpu/hotunplug: Add unplug with cs test Andrey Grodzovsky
2021-06-01 20:17 ` [PATCH 7/7] tests/amdgpu/hotunplug: Add hotunplug with exported bo test Andrey Grodzovsky
2021-06-01 20:17   ` Andrey Grodzovsky
2021-06-02  7:59 ` [PATCH 0/7] libdrm tests for hot-unplug feature Daniel Vetter
2021-06-02  7:59   ` Daniel Vetter
2021-06-02 14:20   ` Andrey Grodzovsky
2021-06-02 14:20     ` Andrey Grodzovsky
2021-06-03 14:22     ` Andrey Grodzovsky
2021-06-03 14:22       ` Andrey Grodzovsky
2021-06-03 21:11       ` Daniel Vetter
2021-06-03 21:11         ` Daniel Vetter
2021-06-03 21:20         ` Alex Deucher
2021-06-03 21:20           ` Alex Deucher
2021-06-03 21:20 ` Alex Deucher
2021-06-03 21:20   ` Alex Deucher
2021-06-03 22:02   ` [PATCH 0/7] libdrm tests for hot-unplug fe goature Grodzovsky, Andrey
2021-06-03 22:02     ` Grodzovsky, Andrey
2021-06-04  2:26     ` Alex Deucher
2021-06-04  2:26       ` Alex Deucher
2021-06-07 14:29       ` Andrey Grodzovsky
2021-06-07 14:29         ` Andrey Grodzovsky
2021-06-04  1:37   ` [PATCH 0/7] libdrm tests for hot-unplug feature Dave Airlie
2021-06-04  1:37     ` Dave Airlie
2021-06-04  2:53     ` Alex Deucher
2021-06-04  2:53       ` Alex Deucher
2021-06-04  3:31       ` Andrey Grodzovsky
2021-06-04  3:31         ` Andrey Grodzovsky

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20210601201702.23316-7-andrey.grodzovsky@amd.com \
    --to=andrey.grodzovsky@amd.com \
    --cc=Alexander.Deucher@amd.com \
    --cc=amd-gfx@lists.freedesktop.org \
    --cc=ckoenig.leichtzumerken@gmail.com \
    --cc=dri-devel@lists.freedesktop.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is an external index of several public inboxes,
see mirroring instructions on how to clone and mirror
all data and code used by this external index.