summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorFlora Cui <flora.cui@amd.com>2022-07-07 20:48:19 +0800
committerLikun Gao <gaolikunglk@gmail.com>2022-08-15 06:10:15 +0000
commitcc3c80c6aec8a3fa5b3b4445724c5035d3c5e336 (patch)
tree6fcc182ad7ccb327f6da4c21c8c53a23f5a0e547
parent176e6ce6f3504b9cf5ce6a2a8b2e55cb201b986d (diff)
tests/amdgpu: refactor dispatch/draw test
Signed-off-by: Flora Cui <flora.cui@amd.com> Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
-rw-r--r--tests/amdgpu/amdgpu_test.h13
-rw-r--r--tests/amdgpu/basic_tests.c1800
-rw-r--r--tests/amdgpu/deadlock_tests.c53
-rw-r--r--tests/amdgpu/meson.build2
-rw-r--r--tests/amdgpu/shader_code.h144
-rw-r--r--tests/amdgpu/shader_code_gfx10.h202
-rw-r--r--tests/amdgpu/shader_code_gfx9.h204
-rw-r--r--tests/amdgpu/shader_code_hang.h104
-rw-r--r--tests/amdgpu/shader_test_util.c1723
9 files changed, 2393 insertions, 1852 deletions
diff --git a/tests/amdgpu/amdgpu_test.h b/tests/amdgpu/amdgpu_test.h
index 9f4453db..e2ba043b 100644
--- a/tests/amdgpu/amdgpu_test.h
+++ b/tests/amdgpu/amdgpu_test.h
@@ -282,12 +282,6 @@ CU_BOOL suite_cp_dma_tests_enable(void);
*/
extern CU_TestInfo cp_dma_tests[];
-void amdgpu_dispatch_hang_helper(amdgpu_device_handle device_handle, uint32_t ip_type);
-void amdgpu_dispatch_hang_slow_helper(amdgpu_device_handle device_handle, uint32_t ip_type);
-void amdgpu_memcpy_draw_test(amdgpu_device_handle device_handle, uint32_t ring,
- int version, int hang);
-void amdgpu_memcpy_draw_hang_slow_test(amdgpu_device_handle device_handle, uint32_t ring, int version);
-
/**
* Initialize security test suite
*/
@@ -314,7 +308,12 @@ amdgpu_command_submission_write_linear_helper_with_secure(amdgpu_device_handle
unsigned ip_type,
bool secure);
-
+extern void amdgpu_test_dispatch_helper(amdgpu_device_handle device_handle, unsigned ip);
+extern void amdgpu_test_dispatch_hang_helper(amdgpu_device_handle device_handle, uint32_t ip);
+extern void amdgpu_test_dispatch_hang_slow_helper(amdgpu_device_handle device_handle, uint32_t ip);
+extern void amdgpu_test_draw_helper(amdgpu_device_handle device_handle);
+extern void amdgpu_test_draw_hang_helper(amdgpu_device_handle device_handle);
+extern void amdgpu_test_draw_hang_slow_helper(amdgpu_device_handle device_handle);
/**
* Initialize hotunplug test suite
diff --git a/tests/amdgpu/basic_tests.c b/tests/amdgpu/basic_tests.c
index e4914737..42176c00 100644
--- a/tests/amdgpu/basic_tests.c
+++ b/tests/amdgpu/basic_tests.c
@@ -2478,1813 +2478,19 @@ static void amdgpu_sync_dependency_test(void)
free(ibs_request.dependencies);
}
-static int amdgpu_dispatch_load_cs_shader_hang_slow(uint32_t *ptr, int family)
-{
- struct amdgpu_test_shader *shader;
- int i, loop = 0x10000;
-
- switch (family) {
- case AMDGPU_FAMILY_AI:
- shader = &memcpy_cs_hang_slow_ai;
- break;
- case AMDGPU_FAMILY_RV:
- shader = &memcpy_cs_hang_slow_rv;
- break;
- case AMDGPU_FAMILY_NV:
- shader = &memcpy_cs_hang_slow_nv;
- break;
- default:
- return -1;
- break;
- }
-
- memcpy(ptr, shader->shader, shader->header_length * sizeof(uint32_t));
-
- for (i = 0; i < loop; i++)
- memcpy(ptr + shader->header_length + shader->body_length * i,
- shader->shader + shader->header_length,
- shader->body_length * sizeof(uint32_t));
-
- memcpy(ptr + shader->header_length + shader->body_length * loop,
- shader->shader + shader->header_length + shader->body_length,
- shader->foot_length * sizeof(uint32_t));
-
- return 0;
-}
-
-static int amdgpu_dispatch_load_cs_shader(uint8_t *ptr,
- int cs_type,
- uint32_t version)
-{
- uint32_t shader_size;
- const uint32_t *shader;
-
- switch (cs_type) {
- case CS_BUFFERCLEAR:
- if (version == 9) {
- shader = bufferclear_cs_shader_gfx9;
- shader_size = sizeof(bufferclear_cs_shader_gfx9);
- } else if (version == 10) {
- shader = bufferclear_cs_shader_gfx10;
- shader_size = sizeof(bufferclear_cs_shader_gfx10);
- }
- break;
- case CS_BUFFERCOPY:
- if (version == 9) {
- shader = buffercopy_cs_shader_gfx9;
- shader_size = sizeof(buffercopy_cs_shader_gfx9);
- } else if (version == 10) {
- shader = buffercopy_cs_shader_gfx10;
- shader_size = sizeof(buffercopy_cs_shader_gfx10);
- }
- break;
- case CS_HANG:
- shader = memcpy_ps_hang;
- shader_size = sizeof(memcpy_ps_hang);
- break;
- default:
- return -1;
- break;
- }
-
- memcpy(ptr, shader, shader_size);
- return 0;
-}
-
-static int amdgpu_dispatch_init(uint32_t *ptr, uint32_t ip_type, uint32_t version)
-{
- int i = 0;
-
- /* Write context control and load shadowing register if necessary */
- if (ip_type == AMDGPU_HW_IP_GFX) {
- ptr[i++] = PACKET3(PKT3_CONTEXT_CONTROL, 1);
- ptr[i++] = 0x80000000;
- ptr[i++] = 0x80000000;
- }
-
- /* Issue commands to set default compute state. */
- /* clear mmCOMPUTE_START_Z - mmCOMPUTE_START_X */
- ptr[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 3);
- ptr[i++] = 0x204;
- i += 3;
-
- /* clear mmCOMPUTE_TMPRING_SIZE */
- ptr[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 1);
- ptr[i++] = 0x218;
- ptr[i++] = 0;
-
- /* Set new sh registers in GFX10 to 0 */
- if (version == 10) {
- /* mmCOMPUTE_SHADER_CHKSUM */
- ptr[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 1);
- ptr[i++] = 0x22a;
- ptr[i++] = 0;
- /* mmCOMPUTE_REQ_CTRL */
- ptr[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 6);
- ptr[i++] = 0x222;
- i += 6;
- /* mmCP_COHER_START_DELAY */
- ptr[i++] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
- ptr[i++] = 0x7b;
- ptr[i++] = 0x20;
- }
- return i;
-}
-
-static int amdgpu_dispatch_write_cumask(uint32_t *ptr, uint32_t version)
-{
- int i = 0;
-
- /* Issue commands to set cu mask used in current dispatch */
- if (version == 9) {
- /* set mmCOMPUTE_STATIC_THREAD_MGMT_SE1 - mmCOMPUTE_STATIC_THREAD_MGMT_SE0 */
- ptr[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 2);
- ptr[i++] = 0x216;
- ptr[i++] = 0xffffffff;
- ptr[i++] = 0xffffffff;
- /* set mmCOMPUTE_STATIC_THREAD_MGMT_SE3 - mmCOMPUTE_STATIC_THREAD_MGMT_SE2 */
- ptr[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 2);
- ptr[i++] = 0x219;
- ptr[i++] = 0xffffffff;
- ptr[i++] = 0xffffffff;
- } else if (version == 10) {
- /* set mmCOMPUTE_STATIC_THREAD_MGMT_SE1 - mmCOMPUTE_STATIC_THREAD_MGMT_SE0 */
- ptr[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG_INDEX, 2);
- ptr[i++] = 0x30000216;
- ptr[i++] = 0xffffffff;
- ptr[i++] = 0xffffffff;
- /* set mmCOMPUTE_STATIC_THREAD_MGMT_SE3 - mmCOMPUTE_STATIC_THREAD_MGMT_SE2 */
- ptr[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG_INDEX, 2);
- ptr[i++] = 0x30000219;
- ptr[i++] = 0xffffffff;
- ptr[i++] = 0xffffffff;
- }
-
- return i;
-}
-
-static int amdgpu_dispatch_write2hw(uint32_t *ptr, uint64_t shader_addr, uint32_t version)
-{
- int i, j;
-
- i = 0;
-
- /* Writes shader state to HW */
- /* set mmCOMPUTE_PGM_HI - mmCOMPUTE_PGM_LO */
- ptr[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 2);
- ptr[i++] = 0x20c;
- ptr[i++] = (shader_addr >> 8);
- ptr[i++] = (shader_addr >> 40);
- /* write sh regs*/
- for (j = 0; j < bufferclear_cs_shader_registers_num_gfx9; j++) {
- ptr[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 1);
- /* - Gfx9ShRegBase */
- ptr[i++] = bufferclear_cs_shader_registers_gfx9[j][0] - 0x2c00;
- ptr[i++] = bufferclear_cs_shader_registers_gfx9[j][1];
- }
-
- if (version == 10) {
- /* mmCOMPUTE_PGM_RSRC3 */
- ptr[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 1);
- ptr[i++] = 0x228;
- ptr[i++] = 0;
- }
-
- return i;
-}
-
-static void amdgpu_memset_dispatch_test(amdgpu_device_handle device_handle,
- uint32_t ip_type,
- uint32_t ring,
- uint32_t version)
-{
- amdgpu_context_handle context_handle;
- amdgpu_bo_handle bo_dst, bo_shader, bo_cmd, resources[3];
- volatile unsigned char *ptr_dst;
- void *ptr_shader;
- uint32_t *ptr_cmd;
- uint64_t mc_address_dst, mc_address_shader, mc_address_cmd;
- amdgpu_va_handle va_dst, va_shader, va_cmd;
- int i, r;
- int bo_dst_size = 16384;
- int bo_shader_size = 4096;
- int bo_cmd_size = 4096;
- struct amdgpu_cs_request ibs_request = {0};
- struct amdgpu_cs_ib_info ib_info= {0};
- amdgpu_bo_list_handle bo_list;
- struct amdgpu_cs_fence fence_status = {0};
- uint32_t expired;
-
- r = amdgpu_cs_ctx_create(device_handle, &context_handle);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_cmd_size, 4096,
- AMDGPU_GEM_DOMAIN_GTT, 0,
- &bo_cmd, (void **)&ptr_cmd,
- &mc_address_cmd, &va_cmd);
- CU_ASSERT_EQUAL(r, 0);
- memset(ptr_cmd, 0, bo_cmd_size);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_shader_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_shader, &ptr_shader,
- &mc_address_shader, &va_shader);
- CU_ASSERT_EQUAL(r, 0);
- memset(ptr_shader, 0, bo_shader_size);
-
- r = amdgpu_dispatch_load_cs_shader(ptr_shader, CS_BUFFERCLEAR, version);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_dst_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_dst, (void **)&ptr_dst,
- &mc_address_dst, &va_dst);
- CU_ASSERT_EQUAL(r, 0);
-
- i = 0;
- i += amdgpu_dispatch_init(ptr_cmd + i, ip_type, version);
-
- /* Issue commands to set cu mask used in current dispatch */
- i += amdgpu_dispatch_write_cumask(ptr_cmd + i, version);
-
- /* Writes shader state to HW */
- i += amdgpu_dispatch_write2hw(ptr_cmd + i, mc_address_shader, version);
-
- /* Write constant data */
- /* Writes the UAV constant data to the SGPRs. */
- ptr_cmd[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 4);
- ptr_cmd[i++] = 0x240;
- ptr_cmd[i++] = mc_address_dst;
- ptr_cmd[i++] = (mc_address_dst >> 32) | 0x100000;
- ptr_cmd[i++] = 0x400;
- if (version == 9)
- ptr_cmd[i++] = 0x74fac;
- else if (version == 10)
- ptr_cmd[i++] = 0x1104bfac;
-
- /* Sets a range of pixel shader constants */
- ptr_cmd[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 4);
- ptr_cmd[i++] = 0x244;
- ptr_cmd[i++] = 0x22222222;
- ptr_cmd[i++] = 0x22222222;
- ptr_cmd[i++] = 0x22222222;
- ptr_cmd[i++] = 0x22222222;
-
- /* clear mmCOMPUTE_RESOURCE_LIMITS */
- ptr_cmd[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 1);
- ptr_cmd[i++] = 0x215;
- ptr_cmd[i++] = 0;
-
- /* dispatch direct command */
- ptr_cmd[i++] = PACKET3_COMPUTE(PACKET3_DISPATCH_DIRECT, 3);
- ptr_cmd[i++] = 0x10;
- ptr_cmd[i++] = 1;
- ptr_cmd[i++] = 1;
- ptr_cmd[i++] = 1;
-
- while (i & 7)
- ptr_cmd[i++] = 0xffff1000; /* type3 nop packet */
-
- resources[0] = bo_dst;
- resources[1] = bo_shader;
- resources[2] = bo_cmd;
- r = amdgpu_bo_list_create(device_handle, 3, resources, NULL, &bo_list);
- CU_ASSERT_EQUAL(r, 0);
-
- ib_info.ib_mc_address = mc_address_cmd;
- ib_info.size = i;
- ibs_request.ip_type = ip_type;
- ibs_request.ring = ring;
- ibs_request.resources = bo_list;
- ibs_request.number_of_ibs = 1;
- ibs_request.ibs = &ib_info;
- ibs_request.fence_info.handle = NULL;
-
- /* submit CS */
- r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_list_destroy(bo_list);
- CU_ASSERT_EQUAL(r, 0);
-
- fence_status.ip_type = ip_type;
- fence_status.ip_instance = 0;
- fence_status.ring = ring;
- fence_status.context = context_handle;
- fence_status.fence = ibs_request.seq_no;
-
- /* wait for IB accomplished */
- r = amdgpu_cs_query_fence_status(&fence_status,
- AMDGPU_TIMEOUT_INFINITE,
- 0, &expired);
- CU_ASSERT_EQUAL(r, 0);
- CU_ASSERT_EQUAL(expired, true);
-
- /* verify if memset test result meets with expected */
- i = 0;
- while(i < bo_dst_size) {
- CU_ASSERT_EQUAL(ptr_dst[i++], 0x22);
- }
-
- r = amdgpu_bo_unmap_and_free(bo_dst, va_dst, mc_address_dst, bo_dst_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_unmap_and_free(bo_shader, va_shader, mc_address_shader, bo_shader_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_unmap_and_free(bo_cmd, va_cmd, mc_address_cmd, bo_cmd_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_cs_ctx_free(context_handle);
- CU_ASSERT_EQUAL(r, 0);
-}
-
-static void amdgpu_memcpy_dispatch_test(amdgpu_device_handle device_handle,
- uint32_t ip_type,
- uint32_t ring,
- uint32_t version,
- int hang)
-{
- amdgpu_context_handle context_handle;
- amdgpu_bo_handle bo_src, bo_dst, bo_shader, bo_cmd, resources[4];
- volatile unsigned char *ptr_dst;
- void *ptr_shader;
- unsigned char *ptr_src;
- uint32_t *ptr_cmd;
- uint64_t mc_address_src, mc_address_dst, mc_address_shader, mc_address_cmd;
- amdgpu_va_handle va_src, va_dst, va_shader, va_cmd;
- int i, r;
- int bo_dst_size = 16384;
- int bo_shader_size = 4096;
- int bo_cmd_size = 4096;
- struct amdgpu_cs_request ibs_request = {0};
- struct amdgpu_cs_ib_info ib_info= {0};
- uint32_t expired, hang_state, hangs;
- enum cs_type cs_type;
- amdgpu_bo_list_handle bo_list;
- struct amdgpu_cs_fence fence_status = {0};
-
- r = amdgpu_cs_ctx_create(device_handle, &context_handle);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_cmd_size, 4096,
- AMDGPU_GEM_DOMAIN_GTT, 0,
- &bo_cmd, (void **)&ptr_cmd,
- &mc_address_cmd, &va_cmd);
- CU_ASSERT_EQUAL(r, 0);
- memset(ptr_cmd, 0, bo_cmd_size);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_shader_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_shader, &ptr_shader,
- &mc_address_shader, &va_shader);
- CU_ASSERT_EQUAL(r, 0);
- memset(ptr_shader, 0, bo_shader_size);
-
- cs_type = hang ? CS_HANG : CS_BUFFERCOPY;
- r = amdgpu_dispatch_load_cs_shader(ptr_shader, cs_type, version);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_dst_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_src, (void **)&ptr_src,
- &mc_address_src, &va_src);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_dst_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_dst, (void **)&ptr_dst,
- &mc_address_dst, &va_dst);
- CU_ASSERT_EQUAL(r, 0);
-
- memset(ptr_src, 0x55, bo_dst_size);
-
- i = 0;
- i += amdgpu_dispatch_init(ptr_cmd + i, ip_type, version);
-
- /* Issue commands to set cu mask used in current dispatch */
- i += amdgpu_dispatch_write_cumask(ptr_cmd + i, version);
-
- /* Writes shader state to HW */
- i += amdgpu_dispatch_write2hw(ptr_cmd + i, mc_address_shader, version);
-
- /* Write constant data */
- /* Writes the texture resource constants data to the SGPRs */
- ptr_cmd[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 4);
- ptr_cmd[i++] = 0x240;
- ptr_cmd[i++] = mc_address_src;
- ptr_cmd[i++] = (mc_address_src >> 32) | 0x100000;
- ptr_cmd[i++] = 0x400;
- if (version == 9)
- ptr_cmd[i++] = 0x74fac;
- else if (version == 10)
- ptr_cmd[i++] = 0x1104bfac;
-
- /* Writes the UAV constant data to the SGPRs. */
- ptr_cmd[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 4);
- ptr_cmd[i++] = 0x244;
- ptr_cmd[i++] = mc_address_dst;
- ptr_cmd[i++] = (mc_address_dst >> 32) | 0x100000;
- ptr_cmd[i++] = 0x400;
- if (version == 9)
- ptr_cmd[i++] = 0x74fac;
- else if (version == 10)
- ptr_cmd[i++] = 0x1104bfac;
-
- /* clear mmCOMPUTE_RESOURCE_LIMITS */
- ptr_cmd[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 1);
- ptr_cmd[i++] = 0x215;
- ptr_cmd[i++] = 0;
-
- /* dispatch direct command */
- ptr_cmd[i++] = PACKET3_COMPUTE(PACKET3_DISPATCH_DIRECT, 3);
- ptr_cmd[i++] = 0x10;
- ptr_cmd[i++] = 1;
- ptr_cmd[i++] = 1;
- ptr_cmd[i++] = 1;
-
- while (i & 7)
- ptr_cmd[i++] = 0xffff1000; /* type3 nop packet */
-
- resources[0] = bo_shader;
- resources[1] = bo_src;
- resources[2] = bo_dst;
- resources[3] = bo_cmd;
- r = amdgpu_bo_list_create(device_handle, 4, resources, NULL, &bo_list);
- CU_ASSERT_EQUAL(r, 0);
-
- ib_info.ib_mc_address = mc_address_cmd;
- ib_info.size = i;
- ibs_request.ip_type = ip_type;
- ibs_request.ring = ring;
- ibs_request.resources = bo_list;
- ibs_request.number_of_ibs = 1;
- ibs_request.ibs = &ib_info;
- ibs_request.fence_info.handle = NULL;
- r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
- CU_ASSERT_EQUAL(r, 0);
-
- fence_status.ip_type = ip_type;
- fence_status.ip_instance = 0;
- fence_status.ring = ring;
- fence_status.context = context_handle;
- fence_status.fence = ibs_request.seq_no;
-
- /* wait for IB accomplished */
- r = amdgpu_cs_query_fence_status(&fence_status,
- AMDGPU_TIMEOUT_INFINITE,
- 0, &expired);
-
- if (!hang) {
- CU_ASSERT_EQUAL(r, 0);
- CU_ASSERT_EQUAL(expired, true);
-
- /* verify if memcpy test result meets with expected */
- i = 0;
- while(i < bo_dst_size) {
- CU_ASSERT_EQUAL(ptr_dst[i], ptr_src[i]);
- i++;
- }
- } else {
- r = amdgpu_cs_query_reset_state(context_handle, &hang_state, &hangs);
- CU_ASSERT_EQUAL(r, 0);
- CU_ASSERT_EQUAL(hang_state, AMDGPU_CTX_UNKNOWN_RESET);
- }
-
- r = amdgpu_bo_list_destroy(bo_list);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_unmap_and_free(bo_src, va_src, mc_address_src, bo_dst_size);
- CU_ASSERT_EQUAL(r, 0);
- r = amdgpu_bo_unmap_and_free(bo_dst, va_dst, mc_address_dst, bo_dst_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_unmap_and_free(bo_cmd, va_cmd, mc_address_cmd, bo_cmd_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_unmap_and_free(bo_shader, va_shader, mc_address_shader, bo_shader_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_cs_ctx_free(context_handle);
- CU_ASSERT_EQUAL(r, 0);
-}
-
static void amdgpu_compute_dispatch_test(void)
{
- int r;
- struct drm_amdgpu_info_hw_ip info;
- uint32_t ring_id, version;
-
- r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_COMPUTE, 0, &info);
- CU_ASSERT_EQUAL(r, 0);
- if (!info.available_rings)
- printf("SKIP ... as there's no compute ring\n");
-
- version = info.hw_ip_version_major;
- if (version != 9 && version != 10) {
- printf("SKIP ... unsupported gfx version %d\n", version);
- return;
- }
-
- for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
- amdgpu_memset_dispatch_test(device_handle, AMDGPU_HW_IP_COMPUTE, ring_id, version);
- amdgpu_memcpy_dispatch_test(device_handle, AMDGPU_HW_IP_COMPUTE, ring_id, version, 0);
- }
+ amdgpu_test_dispatch_helper(device_handle, AMDGPU_HW_IP_COMPUTE);
}
-
static void amdgpu_gfx_dispatch_test(void)
{
- int r;
- struct drm_amdgpu_info_hw_ip info;
- uint32_t ring_id, version;
-
- r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_GFX, 0, &info);
- CU_ASSERT_EQUAL(r, 0);
- if (!info.available_rings)
- printf("SKIP ... as there's no graphics ring\n");
-
- version = info.hw_ip_version_major;
- if (version != 9 && version != 10) {
- printf("SKIP ... unsupported gfx version %d\n", version);
- return;
- }
-
- for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
- amdgpu_memset_dispatch_test(device_handle, AMDGPU_HW_IP_GFX, ring_id, version);
- amdgpu_memcpy_dispatch_test(device_handle, AMDGPU_HW_IP_GFX, ring_id, version, 0);
- }
-}
-
-void amdgpu_dispatch_hang_helper(amdgpu_device_handle device_handle, uint32_t ip_type)
-{
- int r;
- struct drm_amdgpu_info_hw_ip info;
- uint32_t ring_id, version;
-
- r = amdgpu_query_hw_ip_info(device_handle, ip_type, 0, &info);
- CU_ASSERT_EQUAL(r, 0);
- if (!info.available_rings)
- printf("SKIP ... as there's no ring for ip %d\n", ip_type);
-
- version = info.hw_ip_version_major;
- if (version != 9 && version != 10) {
- printf("SKIP ... unsupported gfx version %d\n", version);
- return;
- }
-
- for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
- amdgpu_memcpy_dispatch_test(device_handle, ip_type, ring_id, version, 0);
- amdgpu_memcpy_dispatch_test(device_handle, ip_type, ring_id, version, 1);
- amdgpu_memcpy_dispatch_test(device_handle, ip_type, ring_id, version, 0);
- }
-}
-
-static void amdgpu_memcpy_dispatch_hang_slow_test(amdgpu_device_handle device_handle,
- uint32_t ip_type, uint32_t ring, int version)
-{
- amdgpu_context_handle context_handle;
- amdgpu_bo_handle bo_src, bo_dst, bo_shader, bo_cmd, resources[4];
- volatile unsigned char *ptr_dst;
- void *ptr_shader;
- unsigned char *ptr_src;
- uint32_t *ptr_cmd;
- uint64_t mc_address_src, mc_address_dst, mc_address_shader, mc_address_cmd;
- amdgpu_va_handle va_src, va_dst, va_shader, va_cmd;
- int i, r;
- int bo_dst_size = 0x4000000;
- int bo_shader_size = 0x400000;
- int bo_cmd_size = 4096;
- struct amdgpu_cs_request ibs_request = {0};
- struct amdgpu_cs_ib_info ib_info= {0};
- uint32_t hang_state, hangs, expired;
- struct amdgpu_gpu_info gpu_info = {0};
- amdgpu_bo_list_handle bo_list;
- struct amdgpu_cs_fence fence_status = {0};
-
- r = amdgpu_query_gpu_info(device_handle, &gpu_info);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_cs_ctx_create(device_handle, &context_handle);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_cmd_size, 4096,
- AMDGPU_GEM_DOMAIN_GTT, 0,
- &bo_cmd, (void **)&ptr_cmd,
- &mc_address_cmd, &va_cmd);
- CU_ASSERT_EQUAL(r, 0);
- memset(ptr_cmd, 0, bo_cmd_size);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_shader_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_shader, &ptr_shader,
- &mc_address_shader, &va_shader);
- CU_ASSERT_EQUAL(r, 0);
- memset(ptr_shader, 0, bo_shader_size);
-
- r = amdgpu_dispatch_load_cs_shader_hang_slow(ptr_shader, gpu_info.family_id);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_dst_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_src, (void **)&ptr_src,
- &mc_address_src, &va_src);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_dst_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_dst, (void **)&ptr_dst,
- &mc_address_dst, &va_dst);
- CU_ASSERT_EQUAL(r, 0);
-
- memset(ptr_src, 0x55, bo_dst_size);
-
- i = 0;
- i += amdgpu_dispatch_init(ptr_cmd + i, ip_type, version);
-
- /* Issue commands to set cu mask used in current dispatch */
- i += amdgpu_dispatch_write_cumask(ptr_cmd + i, version);
-
- /* Writes shader state to HW */
- i += amdgpu_dispatch_write2hw(ptr_cmd + i, mc_address_shader, version);
-
- /* Write constant data */
- /* Writes the texture resource constants data to the SGPRs */
- ptr_cmd[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 4);
- ptr_cmd[i++] = 0x240;
- ptr_cmd[i++] = mc_address_src;
- ptr_cmd[i++] = (mc_address_src >> 32) | 0x100000;
- ptr_cmd[i++] = 0x400000;
- if (version == 9)
- ptr_cmd[i++] = 0x74fac;
- else if (version == 10)
- ptr_cmd[i++] = 0x1104bfac;
-
- /* Writes the UAV constant data to the SGPRs. */
- ptr_cmd[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 4);
- ptr_cmd[i++] = 0x244;
- ptr_cmd[i++] = mc_address_dst;
- ptr_cmd[i++] = (mc_address_dst >> 32) | 0x100000;
- ptr_cmd[i++] = 0x400000;
- if (version == 9)
- ptr_cmd[i++] = 0x74fac;
- else if (version == 10)
- ptr_cmd[i++] = 0x1104bfac;
-
- /* clear mmCOMPUTE_RESOURCE_LIMITS */
- ptr_cmd[i++] = PACKET3_COMPUTE(PKT3_SET_SH_REG, 1);
- ptr_cmd[i++] = 0x215;
- ptr_cmd[i++] = 0;
-
- /* dispatch direct command */
- ptr_cmd[i++] = PACKET3_COMPUTE(PACKET3_DISPATCH_DIRECT, 3);
- ptr_cmd[i++] = 0x10000;
- ptr_cmd[i++] = 1;
- ptr_cmd[i++] = 1;
- ptr_cmd[i++] = 1;
-
- while (i & 7)
- ptr_cmd[i++] = 0xffff1000; /* type3 nop packet */
-
- resources[0] = bo_shader;
- resources[1] = bo_src;
- resources[2] = bo_dst;
- resources[3] = bo_cmd;
- r = amdgpu_bo_list_create(device_handle, 4, resources, NULL, &bo_list);
- CU_ASSERT_EQUAL(r, 0);
-
- ib_info.ib_mc_address = mc_address_cmd;
- ib_info.size = i;
- ibs_request.ip_type = ip_type;
- ibs_request.ring = ring;
- ibs_request.resources = bo_list;
- ibs_request.number_of_ibs = 1;
- ibs_request.ibs = &ib_info;
- ibs_request.fence_info.handle = NULL;
- r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
- CU_ASSERT_EQUAL(r, 0);
-
- fence_status.ip_type = ip_type;
- fence_status.ip_instance = 0;
- fence_status.ring = ring;
- fence_status.context = context_handle;
- fence_status.fence = ibs_request.seq_no;
-
- /* wait for IB accomplished */
- r = amdgpu_cs_query_fence_status(&fence_status,
- AMDGPU_TIMEOUT_INFINITE,
- 0, &expired);
-
- r = amdgpu_cs_query_reset_state(context_handle, &hang_state, &hangs);
- CU_ASSERT_EQUAL(r, 0);
- CU_ASSERT_EQUAL(hang_state, AMDGPU_CTX_UNKNOWN_RESET);
-
- r = amdgpu_bo_list_destroy(bo_list);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_unmap_and_free(bo_src, va_src, mc_address_src, bo_dst_size);
- CU_ASSERT_EQUAL(r, 0);
- r = amdgpu_bo_unmap_and_free(bo_dst, va_dst, mc_address_dst, bo_dst_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_unmap_and_free(bo_cmd, va_cmd, mc_address_cmd, bo_cmd_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_unmap_and_free(bo_shader, va_shader, mc_address_shader, bo_shader_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_cs_ctx_free(context_handle);
- CU_ASSERT_EQUAL(r, 0);
-}
-
-void amdgpu_dispatch_hang_slow_helper(amdgpu_device_handle device_handle, uint32_t ip_type)
-{
- int r;
- struct drm_amdgpu_info_hw_ip info;
- uint32_t ring_id, version;
-
- r = amdgpu_query_hw_ip_info(device_handle, ip_type, 0, &info);
- CU_ASSERT_EQUAL(r, 0);
- if (!info.available_rings)
- printf("SKIP ... as there's no ring for ip %d\n", ip_type);
-
- version = info.hw_ip_version_major;
- if (version != 9 && version != 10) {
- printf("SKIP ... unsupported gfx version %d\n", version);
- return;
- }
-
- for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
- amdgpu_memcpy_dispatch_test(device_handle, ip_type, ring_id, version, 0);
- amdgpu_memcpy_dispatch_hang_slow_test(device_handle, ip_type, ring_id, version);
- amdgpu_memcpy_dispatch_test(device_handle, ip_type, ring_id, version, 0);
- }
-}
-
-static int amdgpu_draw_load_ps_shader_hang_slow(uint32_t *ptr, int family)
-{
- struct amdgpu_test_shader *shader;
- int i, loop = 0x40000;
-
- switch (family) {
- case AMDGPU_FAMILY_AI:
- case AMDGPU_FAMILY_RV:
- shader = &memcpy_ps_hang_slow_ai;
- break;
- default:
- return -1;
- break;
- }
-
- memcpy(ptr, shader->shader, shader->header_length * sizeof(uint32_t));
-
- for (i = 0; i < loop; i++)
- memcpy(ptr + shader->header_length + shader->body_length * i,
- shader->shader + shader->header_length,
- shader->body_length * sizeof(uint32_t));
-
- memcpy(ptr + shader->header_length + shader->body_length * loop,
- shader->shader + shader->header_length + shader->body_length,
- shader->foot_length * sizeof(uint32_t));
-
- return 0;
-}
-
-static int amdgpu_draw_load_ps_shader(uint8_t *ptr, int ps_type, uint32_t version)
-{
- int i;
- uint32_t shader_offset= 256;
- uint32_t mem_offset, patch_code_offset;
- uint32_t shader_size, patchinfo_code_size;
- const uint32_t *shader;
- const uint32_t *patchinfo_code;
- const uint32_t *patchcode_offset;
-
- switch (ps_type) {
- case PS_CONST:
- if (version == 9) {
- shader = ps_const_shader_gfx9;
- shader_size = sizeof(ps_const_shader_gfx9);
- patchinfo_code = (const uint32_t *)ps_const_shader_patchinfo_code_gfx9;
- patchinfo_code_size = ps_const_shader_patchinfo_code_size_gfx9;
- patchcode_offset = ps_const_shader_patchinfo_offset_gfx9;
- } else if (version == 10){
- shader = ps_const_shader_gfx10;
- shader_size = sizeof(ps_const_shader_gfx10);
- patchinfo_code = (const uint32_t *)ps_const_shader_patchinfo_code_gfx10;
- patchinfo_code_size = ps_const_shader_patchinfo_code_size_gfx10;
- patchcode_offset = ps_const_shader_patchinfo_offset_gfx10;
- }
- break;
- case PS_TEX:
- if (version == 9) {
- shader = ps_tex_shader_gfx9;
- shader_size = sizeof(ps_tex_shader_gfx9);
- patchinfo_code = (const uint32_t *)ps_tex_shader_patchinfo_code_gfx9;
- patchinfo_code_size = ps_tex_shader_patchinfo_code_size_gfx9;
- patchcode_offset = ps_tex_shader_patchinfo_offset_gfx9;
- } else if (version == 10) {
- shader = ps_tex_shader_gfx10;
- shader_size = sizeof(ps_tex_shader_gfx10);
- patchinfo_code = (const uint32_t *)ps_tex_shader_patchinfo_code_gfx10;
- patchinfo_code_size = ps_tex_shader_patchinfo_code_size_gfx10;
- patchcode_offset = ps_tex_shader_patchinfo_offset_gfx10;
- }
- break;
- case PS_HANG:
- shader = memcpy_ps_hang;
- shader_size = sizeof(memcpy_ps_hang);
-
- memcpy(ptr, shader, shader_size);
- return 0;
- default:
- return -1;
- break;
- }
-
- /* write main shader program */
- for (i = 0 ; i < 10; i++) {
- mem_offset = i * shader_offset;
- memcpy(ptr + mem_offset, shader, shader_size);
- }
-
- /* overwrite patch codes */
- for (i = 0 ; i < 10; i++) {
- mem_offset = i * shader_offset + patchcode_offset[0] * sizeof(uint32_t);
- patch_code_offset = i * patchinfo_code_size;
- memcpy(ptr + mem_offset,
- patchinfo_code + patch_code_offset,
- patchinfo_code_size * sizeof(uint32_t));
- }
-
- return 0;
-}
-
-/* load RectPosTexFast_VS */
-static int amdgpu_draw_load_vs_shader(uint8_t *ptr, uint32_t version)
-{
- const uint32_t *shader;
- uint32_t shader_size;
-
- if (version == 9) {
- shader = vs_RectPosTexFast_shader_gfx9;
- shader_size = sizeof(vs_RectPosTexFast_shader_gfx9);
- } else if (version == 10) {
- shader = vs_RectPosTexFast_shader_gfx10;
- shader_size = sizeof(vs_RectPosTexFast_shader_gfx10);
- }
-
- memcpy(ptr, shader, shader_size);
-
- return 0;
-}
-
-static int amdgpu_draw_init(uint32_t *ptr, uint32_t version)
-{
- int i = 0;
- const uint32_t *preamblecache_ptr;
- uint32_t preamblecache_size;
-
- /* Write context control and load shadowing register if necessary */
- ptr[i++] = PACKET3(PKT3_CONTEXT_CONTROL, 1);
- ptr[i++] = 0x80000000;
- ptr[i++] = 0x80000000;
-
- if (version == 9) {
- preamblecache_ptr = preamblecache_gfx9;
- preamblecache_size = sizeof(preamblecache_gfx9);
- } else if (version == 10) {
- preamblecache_ptr = preamblecache_gfx10;
- preamblecache_size = sizeof(preamblecache_gfx10);
- }
-
- memcpy(ptr + i, preamblecache_ptr, preamblecache_size);
- return i + preamblecache_size/sizeof(uint32_t);
-}
-
-static int amdgpu_draw_setup_and_write_drawblt_surf_info(uint32_t *ptr,
- uint64_t dst_addr,
- uint32_t version,
- int hang_slow)
-{
- int i = 0;
-
- /* setup color buffer */
- if (version == 9) {
- /* offset reg
- 0xA318 CB_COLOR0_BASE
- 0xA319 CB_COLOR0_BASE_EXT
- 0xA31A CB_COLOR0_ATTRIB2
- 0xA31B CB_COLOR0_VIEW
- 0xA31C CB_COLOR0_INFO
- 0xA31D CB_COLOR0_ATTRIB
- 0xA31E CB_COLOR0_DCC_CONTROL
- 0xA31F CB_COLOR0_CMASK
- 0xA320 CB_COLOR0_CMASK_BASE_EXT
- 0xA321 CB_COLOR0_FMASK
- 0xA322 CB_COLOR0_FMASK_BASE_EXT
- 0xA323 CB_COLOR0_CLEAR_WORD0
- 0xA324 CB_COLOR0_CLEAR_WORD1
- 0xA325 CB_COLOR0_DCC_BASE
- 0xA326 CB_COLOR0_DCC_BASE_EXT */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 15);
- ptr[i++] = 0x318;
- ptr[i++] = dst_addr >> 8;
- ptr[i++] = dst_addr >> 40;
- ptr[i++] = hang_slow ? 0x3ffc7ff : 0x7c01f;
- ptr[i++] = 0;
- ptr[i++] = 0x50438;
- ptr[i++] = 0x10140000;
- i += 9;
-
- /* mmCB_MRT0_EPITCH */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0x1e8;
- ptr[i++] = hang_slow ? 0xfff : 0x1f;
- } else if (version == 10) {
- /* 0xA318 CB_COLOR0_BASE
- 0xA319 CB_COLOR0_PITCH
- 0xA31A CB_COLOR0_SLICE
- 0xA31B CB_COLOR0_VIEW
- 0xA31C CB_COLOR0_INFO
- 0xA31D CB_COLOR0_ATTRIB
- 0xA31E CB_COLOR0_DCC_CONTROL
- 0xA31F CB_COLOR0_CMASK
- 0xA320 CB_COLOR0_CMASK_SLICE
- 0xA321 CB_COLOR0_FMASK
- 0xA322 CB_COLOR0_FMASK_SLICE
- 0xA323 CB_COLOR0_CLEAR_WORD0
- 0xA324 CB_COLOR0_CLEAR_WORD1
- 0xA325 CB_COLOR0_DCC_BASE */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 14);
- ptr[i++] = 0x318;
- ptr[i++] = dst_addr >> 8;
- i += 3;
- ptr[i++] = 0x50438;
- i += 9;
-
- /* 0xA390 CB_COLOR0_BASE_EXT */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0x390;
- ptr[i++] = dst_addr >> 40;
-
- /* 0xA398 CB_COLOR0_CMASK_BASE_EXT */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0x398;
- ptr[i++] = 0;
-
- /* 0xA3A0 CB_COLOR0_FMASK_BASE_EXT */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0x3a0;
- ptr[i++] = 0;
-
- /* 0xA3A8 CB_COLOR0_DCC_BASE_EXT */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0x3a8;
- ptr[i++] = 0;
-
- /* 0xA3B0 CB_COLOR0_ATTRIB2 */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0x3b0;
- ptr[i++] = hang_slow ? 0x3ffc7ff : 0x7c01f;
-
- /* 0xA3B8 CB_COLOR0_ATTRIB3 */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0x3b8;
- ptr[i++] = 0x9014000;
- }
-
- /* 0xA32B CB_COLOR1_BASE */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0x32b;
- ptr[i++] = 0;
-
- /* 0xA33A CB_COLOR1_BASE */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0x33a;
- ptr[i++] = 0;
-
- /* SPI_SHADER_COL_FORMAT */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0x1c5;
- ptr[i++] = 9;
-
- /* Setup depth buffer */
- if (version == 9) {
- /* mmDB_Z_INFO */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
- ptr[i++] = 0xe;
- i += 2;
- } else if (version == 10) {
- /* mmDB_Z_INFO */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
- ptr[i++] = 0x10;
- i += 2;
- }
-
- return i;
-}
-
-static int amdgpu_draw_setup_and_write_drawblt_state(uint32_t *ptr,
- uint32_t version,
- int hang_slow)
-{
- int i = 0;
- const uint32_t *cached_cmd_ptr;
- uint32_t cached_cmd_size;
-
- /* mmPA_SC_TILE_STEERING_OVERRIDE */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0xd7;
- ptr[i++] = 0;
-
- ptr[i++] = 0xffff1000;
- ptr[i++] = 0xc0021000;
-
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0xd7;
- if (version == 9)
- ptr[i++] = 1;
- else if (version == 10)
- ptr[i++] = 0;
-
- /* mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 16);
- ptr[i++] = 0x2fe;
- i += 16;
-
- /* mmPA_SC_CENTROID_PRIORITY_0 */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
- ptr[i++] = 0x2f5;
- i += 2;
-
- if (version == 9) {
- cached_cmd_ptr = cached_cmd_gfx9;
- cached_cmd_size = sizeof(cached_cmd_gfx9);
- } else if (version == 10) {
- cached_cmd_ptr = cached_cmd_gfx10;
- cached_cmd_size = sizeof(cached_cmd_gfx10);
- }
-
- memcpy(ptr + i, cached_cmd_ptr, cached_cmd_size);
- if (hang_slow)
- *(ptr + i + 12) = 0x8000800;
- i += cached_cmd_size/sizeof(uint32_t);
-
- if (version == 10) {
- /* mmCB_RMI_GL2_CACHE_CONTROL */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0x104;
- ptr[i++] = 0x40aa0055;
- /* mmDB_RMI_L2_CACHE_CONTROL */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0x1f;
- ptr[i++] = 0x2a0055;
- }
-
- return i;
-}
-
-static int amdgpu_draw_vs_RectPosTexFast_write2hw(uint32_t *ptr,
- int ps_type,
- uint64_t shader_addr,
- uint32_t version,
- int hang_slow)
-{
- int i = 0;
-
- /* mmPA_CL_VS_OUT_CNTL */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0x207;
- ptr[i++] = 0;
-
- if (version == 9) {
- /* mmSPI_SHADER_PGM_RSRC3_VS */
- ptr[i++] = PACKET3(PKT3_SET_SH_REG, 1);
- ptr[i++] = 0x46;
- ptr[i++] = 0xffff;
- } else if (version == 10) {
- /* mmSPI_SHADER_PGM_RSRC3_VS */
- ptr[i++] = PACKET3(PKT3_SET_SH_REG_INDEX, 1);
- ptr[i++] = 0x30000046;
- ptr[i++] = 0xffff;
- /* mmSPI_SHADER_PGM_RSRC4_VS */
- ptr[i++] = PACKET3(PKT3_SET_SH_REG_INDEX, 1);
- ptr[i++] = 0x30000041;
- ptr[i++] = 0xffff;
- }
-
- /* mmSPI_SHADER_PGM_LO_VS...mmSPI_SHADER_PGM_HI_VS */
- ptr[i++] = PACKET3(PKT3_SET_SH_REG, 2);
- ptr[i++] = 0x48;
- ptr[i++] = shader_addr >> 8;
- ptr[i++] = shader_addr >> 40;
-
- /* mmSPI_SHADER_PGM_RSRC1_VS */
- ptr[i++] = PACKET3(PKT3_SET_SH_REG, 1);
- ptr[i++] = 0x4a;
- if (version == 9)
- ptr[i++] = 0xc0081;
- else if (version == 10)
- ptr[i++] = 0xc0041;
- /* mmSPI_SHADER_PGM_RSRC2_VS */
- ptr[i++] = PACKET3(PKT3_SET_SH_REG, 1);
- ptr[i++] = 0x4b;
- ptr[i++] = 0x18;
-
- /* mmSPI_VS_OUT_CONFIG */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0x1b1;
- ptr[i++] = 2;
-
- /* mmSPI_SHADER_POS_FORMAT */
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0x1c3;
- ptr[i++] = 4;
-
- ptr[i++] = PACKET3(PKT3_SET_SH_REG, 4);
- ptr[i++] = 0x4c;
- i += 2;
- ptr[i++] = hang_slow ? 0x45000000 : 0x42000000;
- ptr[i++] = hang_slow ? 0x45000000 : 0x42000000;
-
- ptr[i++] = PACKET3(PKT3_SET_SH_REG, 4);
- ptr[i++] = 0x50;
- i += 2;
- if (ps_type == PS_CONST) {
- i += 2;
- } else if (ps_type == PS_TEX) {
- ptr[i++] = 0x3f800000;
- ptr[i++] = 0x3f800000;
- }
-
- ptr[i++] = PACKET3(PKT3_SET_SH_REG, 4);
- ptr[i++] = 0x54;
- i += 4;
-
- return i;
-}
-
-static int amdgpu_draw_ps_write2hw(uint32_t *ptr,
- int ps_type,
- uint64_t shader_addr,
- uint32_t version)
-{
- int i, j;
- const uint32_t *sh_registers;
- const uint32_t *context_registers;
- uint32_t num_sh_reg, num_context_reg;
-
- if (ps_type == PS_CONST) {
- if (version == 9) {
- sh_registers = (const uint32_t *)ps_const_sh_registers_gfx9;
- num_sh_reg = ps_num_sh_registers_gfx9;
- } else if (version == 10) {
- sh_registers = (const uint32_t *)ps_const_sh_registers_gfx10;
- num_sh_reg = ps_num_sh_registers_gfx10;
- }
- context_registers = (const uint32_t *)ps_const_context_reg_gfx9;
- num_context_reg = ps_num_context_registers_gfx9;
- } else if (ps_type == PS_TEX) {
- sh_registers = (const uint32_t *)ps_tex_sh_registers_gfx9;
- context_registers = (const uint32_t *)ps_tex_context_reg_gfx9;
- num_sh_reg = ps_num_sh_registers_gfx9;
- num_context_reg = ps_num_context_registers_gfx9;
- }
-
- i = 0;
-
- if (version == 9) {
- /* 0x2c07 SPI_SHADER_PGM_RSRC3_PS
- 0x2c08 SPI_SHADER_PGM_LO_PS
- 0x2c09 SPI_SHADER_PGM_HI_PS */
- /* multiplicator 9 is from SPI_SHADER_COL_FORMAT */
- shader_addr += 256 * 9;
- ptr[i++] = PACKET3(PKT3_SET_SH_REG, 3);
- ptr[i++] = 0x7;
- ptr[i++] = 0xffff;
- ptr[i++] = shader_addr >> 8;
- ptr[i++] = shader_addr >> 40;
- } else if (version == 10) {
- shader_addr += 256 * 9;
- /* 0x2c08 SPI_SHADER_PGM_LO_PS
- 0x2c09 SPI_SHADER_PGM_HI_PS */
- ptr[i++] = PACKET3(PKT3_SET_SH_REG, 2);
- ptr[i++] = 0x8;
- ptr[i++] = shader_addr >> 8;
- ptr[i++] = shader_addr >> 40;
-
- /* mmSPI_SHADER_PGM_RSRC3_PS */
- ptr[i++] = PACKET3(PKT3_SET_SH_REG_INDEX, 1);
- ptr[i++] = 0x30000007;
- ptr[i++] = 0xffff;
- /* mmSPI_SHADER_PGM_RSRC4_PS */
- ptr[i++] = PACKET3(PKT3_SET_SH_REG_INDEX, 1);
- ptr[i++] = 0x30000001;
- ptr[i++] = 0xffff;
- }
-
- for (j = 0; j < num_sh_reg; j++) {
- ptr[i++] = PACKET3(PKT3_SET_SH_REG, 1);
- ptr[i++] = sh_registers[j * 2] - 0x2c00;
- ptr[i++] = sh_registers[j * 2 + 1];
- }
-
- for (j = 0; j < num_context_reg; j++) {
- if (context_registers[j * 2] != 0xA1C5) {
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = context_registers[j * 2] - 0xa000;
- ptr[i++] = context_registers[j * 2 + 1];
- }
-
- if (context_registers[j * 2] == 0xA1B4) {
- ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr[i++] = 0x1b3;
- ptr[i++] = 2;
- }
- }
-
- return i;
-}
-
-static int amdgpu_draw_draw(uint32_t *ptr, uint32_t version)
-{
- int i = 0;
-
- if (version == 9) {
- /* mmIA_MULTI_VGT_PARAM */
- ptr[i++] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
- ptr[i++] = 0x40000258;
- ptr[i++] = 0xd00ff;
- /* mmVGT_PRIMITIVE_TYPE */
- ptr[i++] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
- ptr[i++] = 0x10000242;
- ptr[i++] = 0x11;
- } else if (version == 10) {
- /* mmGE_CNTL */
- ptr[i++] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
- ptr[i++] = 0x25b;
- ptr[i++] = 0xff;
- /* mmVGT_PRIMITIVE_TYPE */
- ptr[i++] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
- ptr[i++] = 0x242;
- ptr[i++] = 0x11;
- }
-
- ptr[i++] = PACKET3(PACKET3_DRAW_INDEX_AUTO, 1);
- ptr[i++] = 3;
- ptr[i++] = 2;
-
- return i;
-}
-
-void amdgpu_memset_draw(amdgpu_device_handle device_handle,
- amdgpu_bo_handle bo_shader_ps,
- amdgpu_bo_handle bo_shader_vs,
- uint64_t mc_address_shader_ps,
- uint64_t mc_address_shader_vs,
- uint32_t ring_id, uint32_t version)
-{
- amdgpu_context_handle context_handle;
- amdgpu_bo_handle bo_dst, bo_cmd, resources[4];
- volatile unsigned char *ptr_dst;
- uint32_t *ptr_cmd;
- uint64_t mc_address_dst, mc_address_cmd;
- amdgpu_va_handle va_dst, va_cmd;
- int i, r;
- int bo_dst_size = 16384;
- int bo_cmd_size = 4096;
- struct amdgpu_cs_request ibs_request = {0};
- struct amdgpu_cs_ib_info ib_info = {0};
- struct amdgpu_cs_fence fence_status = {0};
- uint32_t expired;
- amdgpu_bo_list_handle bo_list;
-
- r = amdgpu_cs_ctx_create(device_handle, &context_handle);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_cmd_size, 4096,
- AMDGPU_GEM_DOMAIN_GTT, 0,
- &bo_cmd, (void **)&ptr_cmd,
- &mc_address_cmd, &va_cmd);
- CU_ASSERT_EQUAL(r, 0);
- memset(ptr_cmd, 0, bo_cmd_size);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_dst_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_dst, (void **)&ptr_dst,
- &mc_address_dst, &va_dst);
- CU_ASSERT_EQUAL(r, 0);
-
- i = 0;
- i += amdgpu_draw_init(ptr_cmd + i, version);
-
- i += amdgpu_draw_setup_and_write_drawblt_surf_info(ptr_cmd + i, mc_address_dst, version, 0);
-
- i += amdgpu_draw_setup_and_write_drawblt_state(ptr_cmd + i, version, 0);
-
- i += amdgpu_draw_vs_RectPosTexFast_write2hw(ptr_cmd + i, PS_CONST, mc_address_shader_vs,
- version, 0);
-
- i += amdgpu_draw_ps_write2hw(ptr_cmd + i, PS_CONST, mc_address_shader_ps, version);
-
- ptr_cmd[i++] = PACKET3(PKT3_SET_SH_REG, 4);
- ptr_cmd[i++] = 0xc;
- ptr_cmd[i++] = 0x33333333;
- ptr_cmd[i++] = 0x33333333;
- ptr_cmd[i++] = 0x33333333;
- ptr_cmd[i++] = 0x33333333;
-
- i += amdgpu_draw_draw(ptr_cmd + i, version);
-
- while (i & 7)
- ptr_cmd[i++] = 0xffff1000; /* type3 nop packet */
-
- resources[0] = bo_dst;
- resources[1] = bo_shader_ps;
- resources[2] = bo_shader_vs;
- resources[3] = bo_cmd;
- r = amdgpu_bo_list_create(device_handle, 4, resources, NULL, &bo_list);
- CU_ASSERT_EQUAL(r, 0);
-
- ib_info.ib_mc_address = mc_address_cmd;
- ib_info.size = i;
- ibs_request.ip_type = AMDGPU_HW_IP_GFX;
- ibs_request.ring = ring_id;
- ibs_request.resources = bo_list;
- ibs_request.number_of_ibs = 1;
- ibs_request.ibs = &ib_info;
- ibs_request.fence_info.handle = NULL;
-
- /* submit CS */
- r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_list_destroy(bo_list);
- CU_ASSERT_EQUAL(r, 0);
-
- fence_status.ip_type = AMDGPU_HW_IP_GFX;
- fence_status.ip_instance = 0;
- fence_status.ring = ring_id;
- fence_status.context = context_handle;
- fence_status.fence = ibs_request.seq_no;
-
- /* wait for IB accomplished */
- r = amdgpu_cs_query_fence_status(&fence_status,
- AMDGPU_TIMEOUT_INFINITE,
- 0, &expired);
- CU_ASSERT_EQUAL(r, 0);
- CU_ASSERT_EQUAL(expired, true);
-
- /* verify if memset test result meets with expected */
- i = 0;
- while(i < bo_dst_size) {
- CU_ASSERT_EQUAL(ptr_dst[i++], 0x33);
- }
-
- r = amdgpu_bo_unmap_and_free(bo_dst, va_dst, mc_address_dst, bo_dst_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_unmap_and_free(bo_cmd, va_cmd, mc_address_cmd, bo_cmd_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_cs_ctx_free(context_handle);
- CU_ASSERT_EQUAL(r, 0);
-}
-
-static void amdgpu_memset_draw_test(amdgpu_device_handle device_handle,
- uint32_t ring, int version)
-{
- amdgpu_bo_handle bo_shader_ps, bo_shader_vs;
- void *ptr_shader_ps;
- void *ptr_shader_vs;
- uint64_t mc_address_shader_ps, mc_address_shader_vs;
- amdgpu_va_handle va_shader_ps, va_shader_vs;
- int r;
- int bo_shader_size = 4096;
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_shader_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_shader_ps, &ptr_shader_ps,
- &mc_address_shader_ps, &va_shader_ps);
- CU_ASSERT_EQUAL(r, 0);
- memset(ptr_shader_ps, 0, bo_shader_size);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_shader_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_shader_vs, &ptr_shader_vs,
- &mc_address_shader_vs, &va_shader_vs);
- CU_ASSERT_EQUAL(r, 0);
- memset(ptr_shader_vs, 0, bo_shader_size);
-
- r = amdgpu_draw_load_ps_shader(ptr_shader_ps, PS_CONST, version);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_draw_load_vs_shader(ptr_shader_vs, version);
- CU_ASSERT_EQUAL(r, 0);
-
- amdgpu_memset_draw(device_handle, bo_shader_ps, bo_shader_vs,
- mc_address_shader_ps, mc_address_shader_vs,
- ring, version);
-
- r = amdgpu_bo_unmap_and_free(bo_shader_ps, va_shader_ps, mc_address_shader_ps, bo_shader_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_unmap_and_free(bo_shader_vs, va_shader_vs, mc_address_shader_vs, bo_shader_size);
- CU_ASSERT_EQUAL(r, 0);
-}
-
-static void amdgpu_memcpy_draw(amdgpu_device_handle device_handle,
- amdgpu_bo_handle bo_shader_ps,
- amdgpu_bo_handle bo_shader_vs,
- uint64_t mc_address_shader_ps,
- uint64_t mc_address_shader_vs,
- uint32_t ring, int version, int hang)
-{
- amdgpu_context_handle context_handle;
- amdgpu_bo_handle bo_dst, bo_src, bo_cmd, resources[5];
- volatile unsigned char *ptr_dst;
- unsigned char *ptr_src;
- uint32_t *ptr_cmd;
- uint64_t mc_address_dst, mc_address_src, mc_address_cmd;
- amdgpu_va_handle va_dst, va_src, va_cmd;
- int i, r;
- int bo_size = 16384;
- int bo_cmd_size = 4096;
- struct amdgpu_cs_request ibs_request = {0};
- struct amdgpu_cs_ib_info ib_info= {0};
- uint32_t hang_state, hangs;
- uint32_t expired;
- amdgpu_bo_list_handle bo_list;
- struct amdgpu_cs_fence fence_status = {0};
-
- r = amdgpu_cs_ctx_create(device_handle, &context_handle);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_cmd_size, 4096,
- AMDGPU_GEM_DOMAIN_GTT, 0,
- &bo_cmd, (void **)&ptr_cmd,
- &mc_address_cmd, &va_cmd);
- CU_ASSERT_EQUAL(r, 0);
- memset(ptr_cmd, 0, bo_cmd_size);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_src, (void **)&ptr_src,
- &mc_address_src, &va_src);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_dst, (void **)&ptr_dst,
- &mc_address_dst, &va_dst);
- CU_ASSERT_EQUAL(r, 0);
-
- memset(ptr_src, 0x55, bo_size);
-
- i = 0;
- i += amdgpu_draw_init(ptr_cmd + i, version);
-
- i += amdgpu_draw_setup_and_write_drawblt_surf_info(ptr_cmd + i, mc_address_dst, version, 0);
-
- i += amdgpu_draw_setup_and_write_drawblt_state(ptr_cmd + i, version, 0);
-
- i += amdgpu_draw_vs_RectPosTexFast_write2hw(ptr_cmd + i, PS_TEX, mc_address_shader_vs,
- version, 0);
-
- i += amdgpu_draw_ps_write2hw(ptr_cmd + i, PS_TEX, mc_address_shader_ps, version);
-
- ptr_cmd[i++] = PACKET3(PKT3_SET_SH_REG, 8);
- if (version == 9) {
- ptr_cmd[i++] = 0xc;
- ptr_cmd[i++] = mc_address_src >> 8;
- ptr_cmd[i++] = mc_address_src >> 40 | 0x10e00000;
- ptr_cmd[i++] = 0x7c01f;
- ptr_cmd[i++] = 0x90500fac;
- ptr_cmd[i++] = 0x3e000;
- i += 3;
- } else if (version == 10) {
- ptr_cmd[i++] = 0xc;
- ptr_cmd[i++] = mc_address_src >> 8;
- ptr_cmd[i++] = mc_address_src >> 40 | 0xc4b00000;
- ptr_cmd[i++] = 0x8007c007;
- ptr_cmd[i++] = 0x90500fac;
- i += 2;
- ptr_cmd[i++] = 0x400;
- i++;
- }
-
- ptr_cmd[i++] = PACKET3(PKT3_SET_SH_REG, 4);
- ptr_cmd[i++] = 0x14;
- ptr_cmd[i++] = 0x92;
- i += 3;
-
- ptr_cmd[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr_cmd[i++] = 0x191;
- ptr_cmd[i++] = 0;
-
- i += amdgpu_draw_draw(ptr_cmd + i, version);
-
- while (i & 7)
- ptr_cmd[i++] = 0xffff1000; /* type3 nop packet */
-
- resources[0] = bo_dst;
- resources[1] = bo_src;
- resources[2] = bo_shader_ps;
- resources[3] = bo_shader_vs;
- resources[4] = bo_cmd;
- r = amdgpu_bo_list_create(device_handle, 5, resources, NULL, &bo_list);
- CU_ASSERT_EQUAL(r, 0);
-
- ib_info.ib_mc_address = mc_address_cmd;
- ib_info.size = i;
- ibs_request.ip_type = AMDGPU_HW_IP_GFX;
- ibs_request.ring = ring;
- ibs_request.resources = bo_list;
- ibs_request.number_of_ibs = 1;
- ibs_request.ibs = &ib_info;
- ibs_request.fence_info.handle = NULL;
- r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
- CU_ASSERT_EQUAL(r, 0);
-
- fence_status.ip_type = AMDGPU_HW_IP_GFX;
- fence_status.ip_instance = 0;
- fence_status.ring = ring;
- fence_status.context = context_handle;
- fence_status.fence = ibs_request.seq_no;
-
- /* wait for IB accomplished */
- r = amdgpu_cs_query_fence_status(&fence_status,
- AMDGPU_TIMEOUT_INFINITE,
- 0, &expired);
- if (!hang) {
- CU_ASSERT_EQUAL(r, 0);
- CU_ASSERT_EQUAL(expired, true);
-
- /* verify if memcpy test result meets with expected */
- i = 0;
- while(i < bo_size) {
- CU_ASSERT_EQUAL(ptr_dst[i], ptr_src[i]);
- i++;
- }
- } else {
- r = amdgpu_cs_query_reset_state(context_handle, &hang_state, &hangs);
- CU_ASSERT_EQUAL(r, 0);
- CU_ASSERT_EQUAL(hang_state, AMDGPU_CTX_UNKNOWN_RESET);
- }
-
- r = amdgpu_bo_list_destroy(bo_list);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_unmap_and_free(bo_src, va_src, mc_address_src, bo_size);
- CU_ASSERT_EQUAL(r, 0);
- r = amdgpu_bo_unmap_and_free(bo_dst, va_dst, mc_address_dst, bo_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_unmap_and_free(bo_cmd, va_cmd, mc_address_cmd, bo_cmd_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_cs_ctx_free(context_handle);
- CU_ASSERT_EQUAL(r, 0);
-}
-
-void amdgpu_memcpy_draw_test(amdgpu_device_handle device_handle, uint32_t ring,
- int version, int hang)
-{
- amdgpu_bo_handle bo_shader_ps, bo_shader_vs;
- void *ptr_shader_ps;
- void *ptr_shader_vs;
- uint64_t mc_address_shader_ps, mc_address_shader_vs;
- amdgpu_va_handle va_shader_ps, va_shader_vs;
- int bo_shader_size = 4096;
- enum ps_type ps_type = hang ? PS_HANG : PS_TEX;
- int r;
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_shader_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_shader_ps, &ptr_shader_ps,
- &mc_address_shader_ps, &va_shader_ps);
- CU_ASSERT_EQUAL(r, 0);
- memset(ptr_shader_ps, 0, bo_shader_size);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_shader_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_shader_vs, &ptr_shader_vs,
- &mc_address_shader_vs, &va_shader_vs);
- CU_ASSERT_EQUAL(r, 0);
- memset(ptr_shader_vs, 0, bo_shader_size);
-
- r = amdgpu_draw_load_ps_shader(ptr_shader_ps, ps_type, version);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_draw_load_vs_shader(ptr_shader_vs, version);
- CU_ASSERT_EQUAL(r, 0);
-
- amdgpu_memcpy_draw(device_handle, bo_shader_ps, bo_shader_vs,
- mc_address_shader_ps, mc_address_shader_vs,
- ring, version, hang);
-
- r = amdgpu_bo_unmap_and_free(bo_shader_ps, va_shader_ps, mc_address_shader_ps, bo_shader_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_unmap_and_free(bo_shader_vs, va_shader_vs, mc_address_shader_vs, bo_shader_size);
- CU_ASSERT_EQUAL(r, 0);
+ amdgpu_test_dispatch_helper(device_handle, AMDGPU_HW_IP_GFX);
}
static void amdgpu_draw_test(void)
{
- int r;
- struct drm_amdgpu_info_hw_ip info;
- uint32_t ring_id, version;
-
- r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_GFX, 0, &info);
- CU_ASSERT_EQUAL(r, 0);
- if (!info.available_rings)
- printf("SKIP ... as there's no graphics ring\n");
-
- version = info.hw_ip_version_major;
- if (version != 9 && version != 10) {
- printf("SKIP ... unsupported gfx version %d\n", version);
- return;
- }
-
- for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
- amdgpu_memset_draw_test(device_handle, ring_id, version);
- amdgpu_memcpy_draw_test(device_handle, ring_id, version, 0);
- }
-}
-
-void amdgpu_memcpy_draw_hang_slow_test(amdgpu_device_handle device_handle, uint32_t ring, int version)
-{
- amdgpu_context_handle context_handle;
- amdgpu_bo_handle bo_shader_ps, bo_shader_vs;
- amdgpu_bo_handle bo_dst, bo_src, bo_cmd, resources[5];
- void *ptr_shader_ps;
- void *ptr_shader_vs;
- volatile unsigned char *ptr_dst;
- unsigned char *ptr_src;
- uint32_t *ptr_cmd;
- uint64_t mc_address_dst, mc_address_src, mc_address_cmd;
- uint64_t mc_address_shader_ps, mc_address_shader_vs;
- amdgpu_va_handle va_shader_ps, va_shader_vs;
- amdgpu_va_handle va_dst, va_src, va_cmd;
- struct amdgpu_gpu_info gpu_info = {0};
- int i, r;
- int bo_size = 0x4000000;
- int bo_shader_ps_size = 0x400000;
- int bo_shader_vs_size = 4096;
- int bo_cmd_size = 4096;
- struct amdgpu_cs_request ibs_request = {0};
- struct amdgpu_cs_ib_info ib_info= {0};
- uint32_t hang_state, hangs, expired;
- amdgpu_bo_list_handle bo_list;
- struct amdgpu_cs_fence fence_status = {0};
-
- r = amdgpu_query_gpu_info(device_handle, &gpu_info);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_cs_ctx_create(device_handle, &context_handle);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_cmd_size, 4096,
- AMDGPU_GEM_DOMAIN_GTT, 0,
- &bo_cmd, (void **)&ptr_cmd,
- &mc_address_cmd, &va_cmd);
- CU_ASSERT_EQUAL(r, 0);
- memset(ptr_cmd, 0, bo_cmd_size);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_shader_ps_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_shader_ps, &ptr_shader_ps,
- &mc_address_shader_ps, &va_shader_ps);
- CU_ASSERT_EQUAL(r, 0);
- memset(ptr_shader_ps, 0, bo_shader_ps_size);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_shader_vs_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_shader_vs, &ptr_shader_vs,
- &mc_address_shader_vs, &va_shader_vs);
- CU_ASSERT_EQUAL(r, 0);
- memset(ptr_shader_vs, 0, bo_shader_vs_size);
-
- r = amdgpu_draw_load_ps_shader_hang_slow(ptr_shader_ps, gpu_info.family_id);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_draw_load_vs_shader(ptr_shader_vs, version);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_src, (void **)&ptr_src,
- &mc_address_src, &va_src);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_alloc_and_map(device_handle, bo_size, 4096,
- AMDGPU_GEM_DOMAIN_VRAM, 0,
- &bo_dst, (void **)&ptr_dst,
- &mc_address_dst, &va_dst);
- CU_ASSERT_EQUAL(r, 0);
-
- memset(ptr_src, 0x55, bo_size);
-
- i = 0;
- i += amdgpu_draw_init(ptr_cmd + i, version);
-
- i += amdgpu_draw_setup_and_write_drawblt_surf_info(ptr_cmd + i, mc_address_dst, version, 1);
-
- i += amdgpu_draw_setup_and_write_drawblt_state(ptr_cmd + i, version, 1);
-
- i += amdgpu_draw_vs_RectPosTexFast_write2hw(ptr_cmd + i, PS_TEX,
- mc_address_shader_vs, version, 1);
-
- i += amdgpu_draw_ps_write2hw(ptr_cmd + i, PS_TEX, mc_address_shader_ps, version);
-
- ptr_cmd[i++] = PACKET3(PKT3_SET_SH_REG, 8);
-
- if (version == 9) {
- ptr_cmd[i++] = 0xc;
- ptr_cmd[i++] = mc_address_src >> 8;
- ptr_cmd[i++] = mc_address_src >> 40 | 0x10e00000;
- ptr_cmd[i++] = 0x1ffcfff;
- ptr_cmd[i++] = 0x90500fac;
- ptr_cmd[i++] = 0x1ffe000;
- i += 3;
- } else if (version == 10) {
- ptr_cmd[i++] = 0xc;
- ptr_cmd[i++] = mc_address_src >> 8;
- ptr_cmd[i++] = mc_address_src >> 40 | 0xc4b00000;
- ptr_cmd[i++] = 0x81ffc1ff;
- ptr_cmd[i++] = 0x90500fac;
- i += 4;
- }
-
- ptr_cmd[i++] = PACKET3(PKT3_SET_SH_REG, 4);
- ptr_cmd[i++] = 0x14;
- ptr_cmd[i++] = 0x92;
- i += 3;
-
- ptr_cmd[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
- ptr_cmd[i++] = 0x191;
- ptr_cmd[i++] = 0;
-
- i += amdgpu_draw_draw(ptr_cmd + i, version);
-
- while (i & 7)
- ptr_cmd[i++] = 0xffff1000; /* type3 nop packet */
-
- resources[0] = bo_dst;
- resources[1] = bo_src;
- resources[2] = bo_shader_ps;
- resources[3] = bo_shader_vs;
- resources[4] = bo_cmd;
- r = amdgpu_bo_list_create(device_handle, 5, resources, NULL, &bo_list);
- CU_ASSERT_EQUAL(r, 0);
-
- ib_info.ib_mc_address = mc_address_cmd;
- ib_info.size = i;
- ibs_request.ip_type = AMDGPU_HW_IP_GFX;
- ibs_request.ring = ring;
- ibs_request.resources = bo_list;
- ibs_request.number_of_ibs = 1;
- ibs_request.ibs = &ib_info;
- ibs_request.fence_info.handle = NULL;
- r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
- CU_ASSERT_EQUAL(r, 0);
-
- fence_status.ip_type = AMDGPU_HW_IP_GFX;
- fence_status.ip_instance = 0;
- fence_status.ring = ring;
- fence_status.context = context_handle;
- fence_status.fence = ibs_request.seq_no;
-
- /* wait for IB accomplished */
- r = amdgpu_cs_query_fence_status(&fence_status,
- AMDGPU_TIMEOUT_INFINITE,
- 0, &expired);
-
- r = amdgpu_cs_query_reset_state(context_handle, &hang_state, &hangs);
- CU_ASSERT_EQUAL(r, 0);
- CU_ASSERT_EQUAL(hang_state, AMDGPU_CTX_UNKNOWN_RESET);
-
- r = amdgpu_bo_list_destroy(bo_list);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_unmap_and_free(bo_dst, va_dst, mc_address_dst, bo_size);
- CU_ASSERT_EQUAL(r, 0);
- r = amdgpu_bo_unmap_and_free(bo_src, va_src, mc_address_src, bo_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_unmap_and_free(bo_cmd, va_cmd, mc_address_cmd, bo_cmd_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_bo_unmap_and_free(bo_shader_ps, va_shader_ps, mc_address_shader_ps, bo_shader_ps_size);
- CU_ASSERT_EQUAL(r, 0);
- r = amdgpu_bo_unmap_and_free(bo_shader_vs, va_shader_vs, mc_address_shader_vs, bo_shader_vs_size);
- CU_ASSERT_EQUAL(r, 0);
-
- r = amdgpu_cs_ctx_free(context_handle);
- CU_ASSERT_EQUAL(r, 0);
+ amdgpu_test_draw_helper(device_handle);
}
-
static void amdgpu_gpu_reset_test(void)
{
int r;
diff --git a/tests/amdgpu/deadlock_tests.c b/tests/amdgpu/deadlock_tests.c
index f29a83ab..2435751d 100644
--- a/tests/amdgpu/deadlock_tests.c
+++ b/tests/amdgpu/deadlock_tests.c
@@ -511,66 +511,25 @@ static void amdgpu_illegal_mem_access()
static void amdgpu_dispatch_hang_gfx(void)
{
- amdgpu_dispatch_hang_helper(device_handle, AMDGPU_HW_IP_GFX);
+ amdgpu_test_dispatch_hang_helper(device_handle, AMDGPU_HW_IP_GFX);
}
-
static void amdgpu_dispatch_hang_compute(void)
{
- amdgpu_dispatch_hang_helper(device_handle, AMDGPU_HW_IP_COMPUTE);
+ amdgpu_test_dispatch_hang_helper(device_handle, AMDGPU_HW_IP_COMPUTE);
}
-
static void amdgpu_dispatch_hang_slow_gfx(void)
{
- amdgpu_dispatch_hang_slow_helper(device_handle, AMDGPU_HW_IP_GFX);
+ amdgpu_test_dispatch_hang_slow_helper(device_handle, AMDGPU_HW_IP_GFX);
}
-
static void amdgpu_dispatch_hang_slow_compute(void)
{
- amdgpu_dispatch_hang_slow_helper(device_handle, AMDGPU_HW_IP_COMPUTE);
+ amdgpu_test_dispatch_hang_slow_helper(device_handle, AMDGPU_HW_IP_COMPUTE);
}
-
static void amdgpu_draw_hang_gfx(void)
{
- int r;
- struct drm_amdgpu_info_hw_ip info;
- uint32_t ring_id, version;
-
- r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_GFX, 0, &info);
- CU_ASSERT_EQUAL(r, 0);
- if (!info.available_rings)
- printf("SKIP ... as there's no graphic ring\n");
-
- version = info.hw_ip_version_major;
- if (version != 9 && version != 10) {
- printf("SKIP ... unsupported gfx version %d\n", version);
- return;
- }
-
- for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
- amdgpu_memcpy_draw_test(device_handle, ring_id, version, 0);
- amdgpu_memcpy_draw_test(device_handle, ring_id, version, 1);
- amdgpu_memcpy_draw_test(device_handle, ring_id, version, 0);
- }
+ amdgpu_test_draw_hang_helper(device_handle);
}
-
static void amdgpu_draw_hang_slow_gfx(void)
{
- struct drm_amdgpu_info_hw_ip info;
- uint32_t ring_id, version;
- int r;
-
- r = amdgpu_query_hw_ip_info(device_handle, AMDGPU_HW_IP_GFX, 0, &info);
- CU_ASSERT_EQUAL(r, 0);
-
- version = info.hw_ip_version_major;
- if (version != 9 && version != 10) {
- printf("SKIP ... unsupported gfx version %d\n", version);
- return;
- }
-
- for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
- amdgpu_memcpy_draw_test(device_handle, ring_id, version, 0);
- amdgpu_memcpy_draw_hang_slow_test(device_handle, ring_id, version);
- amdgpu_memcpy_draw_test(device_handle, ring_id, version, 0);
- }
+ amdgpu_test_draw_hang_slow_helper(device_handle);
}
diff --git a/tests/amdgpu/meson.build b/tests/amdgpu/meson.build
index 53f2010b..8618f6a1 100644
--- a/tests/amdgpu/meson.build
+++ b/tests/amdgpu/meson.build
@@ -25,7 +25,7 @@ if dep_cunit.found()
'amdgpu_test.c', 'basic_tests.c', 'bo_tests.c', 'cs_tests.c',
'vce_tests.c', 'uvd_enc_tests.c', 'vcn_tests.c', 'deadlock_tests.c',
'vm_tests.c', 'ras_tests.c', 'syncobj_tests.c', 'security_tests.c',
- 'hotunplug_tests.c', 'jpeg_tests.c', 'cp_dma_tests.c'
+ 'hotunplug_tests.c', 'jpeg_tests.c', 'cp_dma_tests.c', 'shader_test_util.c'
),
dependencies : [dep_cunit, dep_threads, dep_atomic_ops],
include_directories : [inc_root, inc_drm, include_directories('../../amdgpu')],
diff --git a/tests/amdgpu/shader_code.h b/tests/amdgpu/shader_code.h
new file mode 100644
index 00000000..8f9357b4
--- /dev/null
+++ b/tests/amdgpu/shader_code.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+*/
+
+#ifndef _shader_code_h_
+#define _shader_code_h_
+
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
+#endif
+
+enum amdgpu_test_gfx_version {
+ AMDGPU_TEST_GFX_V9 = 0,
+ AMDGPU_TEST_GFX_V10,
+ AMDGPU_TEST_GFX_MAX,
+};
+
+enum cs_type {
+ CS_BUFFERCLEAR = 0,
+ CS_BUFFERCOPY,
+ CS_HANG,
+ CS_HANG_SLOW,
+};
+
+enum ps_type {
+ PS_CONST,
+ PS_TEX,
+ PS_HANG,
+ PS_HANG_SLOW
+};
+
+enum vs_type {
+ VS_RECTPOSTEXFAST,
+};
+
+struct reg_info {
+ uint32_t reg_offset; ///< Memory mapped register offset
+ uint32_t reg_value; ///< register value
+};
+
+#include "shader_code_hang.h"
+#include "shader_code_gfx9.h"
+#include "shader_code_gfx10.h"
+
+struct shader_test_cs_shader {
+ const uint32_t *shader;
+ uint32_t shader_size;
+ const struct reg_info *sh_reg;
+ uint32_t num_sh_reg;
+ const struct reg_info *context_reg;
+ uint32_t num_context_reg;
+};
+
+struct shader_test_ps_shader {
+ const uint32_t *shader;
+ unsigned shader_size;
+ const uint32_t patchinfo_code_size;
+ const uint32_t *patchinfo_code;
+ const uint32_t *patchinfo_code_offset;
+ const struct reg_info *sh_reg;
+ const uint32_t num_sh_reg;
+ const struct reg_info *context_reg;
+ const uint32_t num_context_reg;
+};
+
+struct shader_test_vs_shader {
+ const uint32_t *shader;
+ uint32_t shader_size;
+ const struct reg_info *sh_reg;
+ uint32_t num_sh_reg;
+ const struct reg_info *context_reg;
+ uint32_t num_context_reg;
+};
+
+static const struct shader_test_cs_shader shader_test_cs[AMDGPU_TEST_GFX_MAX][2] = {
+ // gfx9, cs_bufferclear
+ {{bufferclear_cs_shader_gfx9, sizeof(bufferclear_cs_shader_gfx9), bufferclear_cs_shader_registers_gfx9, ARRAY_SIZE(bufferclear_cs_shader_registers_gfx9)},
+ // gfx9, cs_buffercopy
+ {buffercopy_cs_shader_gfx9, sizeof(buffercopy_cs_shader_gfx9), bufferclear_cs_shader_registers_gfx9, ARRAY_SIZE(bufferclear_cs_shader_registers_gfx9)}},
+ // gfx10, cs_bufferclear
+ {{bufferclear_cs_shader_gfx10, sizeof(bufferclear_cs_shader_gfx10), bufferclear_cs_shader_registers_gfx9, ARRAY_SIZE(bufferclear_cs_shader_registers_gfx9)},
+ // gfx10, cs_buffercopy
+ {buffercopy_cs_shader_gfx10, sizeof(bufferclear_cs_shader_gfx10), bufferclear_cs_shader_registers_gfx9, ARRAY_SIZE(bufferclear_cs_shader_registers_gfx9)}},
+};
+
+#define SHADER_PS_INFO(_ps, _n) \
+ {ps_##_ps##_shader_gfx##_n, sizeof(ps_##_ps##_shader_gfx##_n), \
+ ps_##_ps##_shader_patchinfo_code_size_gfx##_n, \
+ ps_##_ps##_shader_patchinfo_code_gfx##_n, \
+ ps_##_ps##_shader_patchinfo_offset_gfx##_n, \
+ ps_##_ps##_sh_registers_gfx##_n, ps_##_ps##_num_sh_registers_gfx##_n, \
+ ps_##_ps##_context_registers_gfx##_n, ps_##_ps##_num_context_registers_gfx##_n}
+static const struct shader_test_ps_shader shader_test_ps[AMDGPU_TEST_GFX_MAX][2] = {
+ {SHADER_PS_INFO(const, 9), SHADER_PS_INFO(tex, 9)},
+ {SHADER_PS_INFO(const, 10), SHADER_PS_INFO(tex, 10)},
+};
+
+#define SHADER_VS_INFO(_vs, _n) \
+ {vs_##_vs##_shader_gfx##_n, sizeof(vs_##_vs##_shader_gfx##_n), \
+ vs_##_vs##_sh_registers_gfx##_n, vs_##_vs##_num_sh_registers_gfx##_n, \
+ vs_##_vs##_context_registers_gfx##_n, vs_##_vs##_num_context_registers_gfx##_n}
+static const struct shader_test_vs_shader shader_test_vs[AMDGPU_TEST_GFX_MAX][1] = {
+ {SHADER_VS_INFO(RectPosTexFast, 9)},
+ {SHADER_VS_INFO(RectPosTexFast, 10)},
+};
+
+struct shader_test_gfx_info {
+ const uint32_t *preamble_cache;
+ uint32_t size_preamble_cache;
+ const uint32_t *cached_cmd;
+ uint32_t size_cached_cmd;
+ uint32_t sh_reg_base;
+ uint32_t context_reg_base;
+};
+
+#define SHADER_TEST_GFX_INFO(_n) \
+ preamblecache_gfx##_n, sizeof(preamblecache_gfx##_n), \
+ cached_cmd_gfx##_n, sizeof(cached_cmd_gfx##_n), \
+ sh_reg_base_gfx##_n, context_reg_base_gfx##_n
+
+static struct shader_test_gfx_info shader_test_gfx_info[AMDGPU_TEST_GFX_MAX] = {
+ {SHADER_TEST_GFX_INFO(9),},
+ {SHADER_TEST_GFX_INFO(10),},
+};
+#endif
diff --git a/tests/amdgpu/shader_code_gfx10.h b/tests/amdgpu/shader_code_gfx10.h
new file mode 100644
index 00000000..4849bbc9
--- /dev/null
+++ b/tests/amdgpu/shader_code_gfx10.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+*/
+
+#ifndef _shader_code_gfx10_h_
+#define _shader_code_gfx10_h_
+
+static const uint32_t bufferclear_cs_shader_gfx10[] = {
+ 0xD7460004, 0x04010C08, 0x7E000204, 0x7E020205,
+ 0x7E040206, 0x7E060207, 0xE01C2000, 0x80000004,
+ 0xBF810000
+};
+
+static const uint32_t buffercopy_cs_shader_gfx10[] = {
+ 0xD7460001, 0x04010C08, 0xE00C2000, 0x80000201,
+ 0xBF8C3F70, 0xE01C2000, 0x80010201, 0xBF810000
+};
+
+static const uint32_t ps_const_shader_gfx10[] = {
+ 0x7E000200, 0x7E020201, 0x7E040202, 0x7E060203,
+ 0x5E000300, 0x5E020702, 0xBF800000, 0xBF800000,
+ 0xF8001C0F, 0x00000100, 0xBF810000
+};
+
+static const uint32_t ps_const_shader_patchinfo_code_size_gfx10 = 6;
+
+static const uint32_t ps_const_shader_patchinfo_code_gfx10[][10][6] = {
+ {{ 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xF8001890, 0x00000000 },
+ { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xF8001801, 0x00000000 },
+ { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xF8001803, 0x00000100 },
+ { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xF8001803, 0x00000300 },
+ { 0x5E000300, 0x5E020702, 0xBF800000, 0xBF800000, 0xF8001C0F, 0x00000100 },
+ { 0xD7690000, 0x00020300, 0xD7690001, 0x00020702, 0xF8001C0F, 0x00000100 },
+ { 0xD7680000, 0x00020300, 0xD7680001, 0x00020702, 0xF8001C0F, 0x00000100 },
+ { 0xD76A0000, 0x00020300, 0xD76A0001, 0x00020702, 0xF8001C0F, 0x00000100 },
+ { 0xD76B0000, 0x00020300, 0xD76B0001, 0x00020702, 0xF8001C0F, 0x00000100 },
+ { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xF800180F, 0x03020100 }
+ }
+};
+
+static const uint32_t ps_const_shader_patchinfo_offset_gfx10[] = {
+ 0x00000004
+};
+
+static const uint32_t ps_const_num_sh_registers_gfx10 = 2;
+
+static const struct reg_info ps_const_sh_registers_gfx10[] = {
+ {0x2C0A, 0x000C0000},//{ mmSPI_SHADER_PGM_RSRC1_PS, 0x000C0000 },
+ {0x2C0B, 0x00000008}, //{ mmSPI_SHADER_PGM_RSRC2_PS, 0x00000008 }
+};
+
+static const struct reg_info ps_const_context_registers_gfx10[] =
+{
+ {0xA1B4, 0x00000002}, //{ mmSPI_PS_INPUT_ADDR, 0x00000002 },
+ {0xA1B6, 0x00000000}, //{ mmSPI_PS_IN_CONTROL, 0x00000000 },
+ {0xA08F, 0x0000000F}, //{ mmCB_SHADER_MASK, 0x0000000F },
+ {0xA203, 0x00000010}, //{ mmDB_SHADER_CONTROL, 0x00000010 },
+ {0xA1C4, 0x00000000}, //{ mmSPI_SHADER_Z_FORMAT, 0x00000000 },
+ {0xA1B8, 0x00000000}, //{ mmSPI_BARYC_CNTL, 0x00000000 /* Always 0 for now */},
+ {0xA1C5, 0x00000004}, //{ mmSPI_SHADER_COL_FORMAT, 0x00000004 /* SI_EXPORT_FMT_FP16_ABGR */ }
+};
+
+static const uint32_t ps_const_num_context_registers_gfx10 = 7;
+
+static const uint32_t ps_tex_shader_gfx10[] = {
+ 0xBEFC030C, 0xBE8E047E, 0xBEFE0A7E, 0xC8080000,
+ 0xC80C0100, 0xC8090001, 0xC80D0101, 0xF0800F0A,
+ 0x00400402, 0x00000003, 0xBEFE040E, 0xBF8C0F70,
+ 0x5E000B04, 0x5E020F06, 0xBF800000, 0xBF800000,
+ 0xF8001C0F, 0x00000100, 0xBF810000
+};
+
+static const uint32_t ps_tex_shader_patchinfo_offset_gfx10[] = {
+ 0x0000000C
+};
+
+static const uint32_t ps_tex_shader_patchinfo_code_size_gfx10 = 6;
+
+static const uint32_t ps_tex_shader_patchinfo_code_gfx10[][10][6] = {
+ {{ 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xF8001890, 0x00000000 },
+ { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xF8001801, 0x00000004 },
+ { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xF8001803, 0x00000504 },
+ { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xF8001803, 0x00000704 },
+ { 0x5E000B04, 0x5E020F06, 0xBF800000, 0xBF800000, 0xF8001C0F, 0x00000100 },
+ { 0xD7690000, 0x00020B04, 0xD7690001, 0x00020F06, 0xF8001C0F, 0x00000100 },
+ { 0xD7680000, 0x00020B04, 0xD7680001, 0x00020F06, 0xF8001C0F, 0x00000100 },
+ { 0xD76A0000, 0x00020B04, 0xD76A0001, 0x00020F06, 0xF8001C0F, 0x00000100 },
+ { 0xD76B0000, 0x00020B04, 0xD76B0001, 0x00020F06, 0xF8001C0F, 0x00000100 },
+ { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xF800180F, 0x07060504 }
+ }
+};
+
+static const struct reg_info ps_tex_sh_registers_gfx10[] =
+{
+ {0x2C0A, 0xc0081}, //0x020C0080 }, //{ mmSPI_SHADER_PGM_RSRC1_PS, 0x020C0080 },
+ {0x2C0B, 0x00000018 }, //{ mmSPI_SHADER_PGM_RSRC2_PS, 0x00000018 }
+};
+
+static const uint32_t ps_tex_num_sh_registers_gfx10 = 2;
+
+// Holds Context Register Information
+static const struct reg_info ps_tex_context_registers_gfx10[] =
+{
+ {0xA1B4, 0x00000002}, //{ mmSPI_PS_INPUT_ADDR, 0x00000002 },
+ {0xA1B6, 0x00000001}, //{ mmSPI_PS_IN_CONTROL, 0x00000001 },
+ {0xA08F, 0x0000000F}, //{ mmCB_SHADER_MASK, 0x0000000F },
+ {0xA203, 0x00000010}, //{ mmDB_SHADER_CONTROL, 0x00000010 },
+ {0xA1C4, 0x00000000}, //{ mmSPI_SHADER_Z_FORMAT, 0x00000000 },
+ {0xA1B8, 0x00000000}, //{ mmSPI_BARYC_CNTL, 0x00000000 /* Always 0 for now */},
+ {0xA1C5, 0x00000004}, //{ mmSPI_SHADER_COL_FORMAT, 0x00000004 /* SI_EXPORT_FMT_FP16_ABGR */ }
+};
+
+static const uint32_t ps_tex_num_context_registers_gfx10 = 7;
+
+static const uint32_t vs_RectPosTexFast_shader_gfx10[] = {
+ 0x7E000B00, 0x060000F3, 0x7E020202, 0x7E040206,
+ 0x7C040080, 0x060000F3, 0xD5010001, 0x01AA0200,
+ 0x7E060203, 0xD5010002, 0x01AA0404, 0x7E080207,
+ 0x7C040080, 0xD5010000, 0x01A80101, 0xD5010001,
+ 0x01AA0601, 0x7E060208, 0x7E0A02F2, 0xD5010002,
+ 0x01A80902, 0xD5010004, 0x01AA0805, 0x7E0C0209,
+ 0xF80008CF, 0x05030100, 0xF800020F, 0x05060402,
+ 0xBF810000
+};
+
+static const struct reg_info vs_RectPosTexFast_sh_registers_gfx10[] =
+{
+ {0x2C4A, 0x080C0041 }, //{ mmSPI_SHADER_PGM_RSRC1_VS, 0x080C0041 },
+ {0x2C4B, 0x00000018 }, //{ mmSPI_SHADER_PGM_RSRC2_VS, 0x00000018 }
+};
+
+static const uint32_t vs_RectPosTexFast_num_sh_registers_gfx10 = 2;
+
+// Holds Context Register Information
+static const struct reg_info vs_RectPosTexFast_context_registers_gfx10[] =
+{
+ {0xA1B1, 0x00000000}, //{ mmSPI_VS_OUT_CONFIG, 0x00000000 },
+ {0xA1C3, 0x00000000}, //{ mmSPI_SHADER_POS_FORMAT, 0x00000000 /* Always 0 for now */}
+};
+
+static const uint32_t vs_RectPosTexFast_num_context_registers_gfx10 = 2;
+
+static const uint32_t preamblecache_gfx10[] = {
+ 0xc0026900, 0x81, 0x80000000, 0x40004000, 0xc0026900, 0x8c, 0xaa99aaaa, 0x0,
+ 0xc0026900, 0x90, 0x80000000, 0x40004000, 0xc0026900, 0x94, 0x80000000, 0x40004000,
+ 0xc0026900, 0xb4, 0x0, 0x3f800000, 0xc0016900, 0x103, 0x0,
+ 0xc0016900, 0x208, 0x0, 0xc0016900, 0x290, 0x0,
+ 0xc0016900, 0x2a1, 0x0, 0xc0026900, 0x2ad, 0x0, 0x0,
+ 0xc0016900, 0x2d5, 0x10000, 0xc0016900, 0x2dc, 0x0,
+ 0xc0066900, 0x2de, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0026900, 0x2e5, 0x0, 0x0,
+ 0xc0056900, 0x2f9, 0x5, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
+ 0xc0046900, 0x310, 0, 0x3, 0, 0x100000, 0xc0026900, 0x316, 0xe, 0x20,
+ 0xc0016900, 0x349, 0x0, 0xc0016900, 0x358, 0x0, 0xc0016900, 0x367, 0x0,
+ 0xc0016900, 0x376, 0x0, 0xc0016900, 0x385, 0x0, 0xc0016900, 0x6, 0x0,
+ 0xc0056900, 0xe8, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0xc0076900, 0x1e1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0xc0026900, 0x204, 0x90000, 0x4, 0xc0046900, 0x20c, 0x0, 0x0, 0x0, 0x0,
+ 0xc0016900, 0x2b2, 0x0, 0xc0026900, 0x30e, 0xffffffff, 0xffffffff,
+ 0xc0016900, 0x314, 0x0, 0xc0016900, 0x10a, 0, 0xc0016900, 0x2a6, 0, 0xc0016900, 0x210, 0,
+ 0xc0016900, 0x2db, 0, 0xc0016900, 0x1d4, 0, 0xc0002f00, 0x1, 0xc0016900, 0x1, 0x1, 0xc0016900, 0xe, 0x2,
+ 0xc0016900, 0x206, 0x300, 0xc0016900, 0x212, 0x200, 0xc0017900, 0x7b, 0x20, 0xc0017a00, 0x20000243, 0x0,
+ 0xc0017900, 0x249, 0, 0xc0017900, 0x24a, 0, 0xc0017900, 0x24b, 0, 0xc0017900, 0x259, 0xffffffff,
+ 0xc0017900, 0x25f, 0, 0xc0017900, 0x260, 0, 0xc0017900, 0x262, 0,
+ 0xc0017600, 0x45, 0x0, 0xc0017600, 0x6, 0x0,
+ 0xc0067600, 0x70, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0xc0067600, 0x30, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0
+};
+
+static const uint32_t cached_cmd_gfx10[] = {
+ 0xc0016900, 0x0, 0x0, 0xc0026900, 0x3, 0x2a, 0x0,
+ 0xc0046900, 0xa, 0x0, 0x0, 0x0, 0x200020,
+ 0xc0016900, 0x83, 0xffff, 0xc0026900, 0x8e, 0xf, 0xf,
+ 0xc0056900, 0x105, 0x0, 0x0, 0x0, 0x0, 0x18,
+ 0xc0026900, 0x10b, 0x0, 0x0, 0xc0016900, 0x1e0, 0x0,
+ 0xc0036900, 0x200, 0x0, 0x10000, 0xcc0011,
+ 0xc0026900, 0x292, 0x20, 0x6020000,
+ 0xc0026900, 0x2b0, 0x0, 0x0, 0xc0016900, 0x2f8, 0x0
+};
+
+static const uint32_t sh_reg_base_gfx10 = 0x2C00;
+static const uint32_t context_reg_base_gfx10 = 0xA000;
+
+#endif
diff --git a/tests/amdgpu/shader_code_gfx9.h b/tests/amdgpu/shader_code_gfx9.h
new file mode 100644
index 00000000..3ad1ca8f
--- /dev/null
+++ b/tests/amdgpu/shader_code_gfx9.h
@@ -0,0 +1,204 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+*/
+
+#ifndef _shader_code_gfx9_h_
+#define _shader_code_gfx9_h_
+
+static const uint32_t bufferclear_cs_shader_gfx9[] = {
+ 0x260000ff, 0x000003ff, 0xd1fd0000, 0x04010c08,
+ 0x7e020280, 0x7e040204, 0x7e060205, 0x7e080206,
+ 0x7e0a0207, 0xe01c2000, 0x80000200, 0xbf8c0000,
+ 0xbf810000
+};
+
+static const struct reg_info bufferclear_cs_shader_registers_gfx9[] = {
+ {0x2e12, 0x000C0041}, //{ mmCOMPUTE_PGM_RSRC1, 0x000C0041 },
+ {0x2e13, 0x00000090}, //{ mmCOMPUTE_PGM_RSRC2, 0x00000090 },
+ {0x2e07, 0x00000040}, //{ mmCOMPUTE_NUM_THREAD_X, 0x00000040 },
+ {0x2e08, 0x00000001}, //{ mmCOMPUTE_NUM_THREAD_Y, 0x00000001 },
+ {0x2e09, 0x00000001}, //{ mmCOMPUTE_NUM_THREAD_Z, 0x00000001 }
+};
+
+static const uint32_t buffercopy_cs_shader_gfx9[] = {
+ 0x260000ff, 0x000003ff, 0xd1fd0000, 0x04010c08,
+ 0x7e020280, 0xe00c2000, 0x80000200, 0xbf8c0f70,
+ 0xe01c2000, 0x80010200, 0xbf810000
+};
+
+static const uint32_t ps_const_shader_gfx9[] = {
+ 0x7E000200, 0x7E020201, 0x7E040202, 0x7E060203,
+ 0xD2960000, 0x00020300, 0xD2960001, 0x00020702,
+ 0xC4001C0F, 0x00000100, 0xBF810000
+};
+
+static const uint32_t ps_const_shader_patchinfo_code_size_gfx9 = 6;
+
+static const uint32_t ps_const_shader_patchinfo_code_gfx9[][10][6] = {
+ {{ 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xC4001890, 0x00000000 },
+ { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xC4001801, 0x00000000 },
+ { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xC4001803, 0x00000100 },
+ { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xC4001803, 0x00000300 },
+ { 0xD2960000, 0x00020300, 0xD2960001, 0x00020702, 0xC4001C0F, 0x00000100 },
+ { 0xD2950000, 0x00020300, 0xD2950001, 0x00020702, 0xC4001C0F, 0x00000100 },
+ { 0xD2940000, 0x00020300, 0xD2940001, 0x00020702, 0xC4001C0F, 0x00000100 },
+ { 0xD2970000, 0x00020300, 0xD2970001, 0x00020702, 0xC4001C0F, 0x00000100 },
+ { 0xD2980000, 0x00020300, 0xD2980001, 0x00020702, 0xC4001C0F, 0x00000100 },
+ { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xC400180F, 0x03020100 }
+ }
+};
+
+static const uint32_t ps_const_shader_patchinfo_offset_gfx9[] = {
+ 0x00000004
+};
+
+static const uint32_t ps_const_num_sh_registers_gfx9 = 2;
+
+static const struct reg_info ps_const_sh_registers_gfx9[] = {
+ {0x2C0A, 0x000C0040},//{ mmSPI_SHADER_PGM_RSRC1_PS, 0x000C0040 },
+ {0x2C0B, 0x00000008}, //{ mmSPI_SHADER_PGM_RSRC2_PS, 0x00000008 }
+};
+
+static const uint32_t ps_const_num_context_registers_gfx9 = 7;
+
+static const struct reg_info ps_const_context_registers_gfx9[] = {
+ {0xA1B4, 0x00000002}, //{ mmSPI_PS_INPUT_ADDR, 0x00000002 },
+ {0xA1B6, 0x00000000}, //{ mmSPI_PS_IN_CONTROL, 0x00000000 },
+ {0xA08F, 0x0000000F}, //{ mmCB_SHADER_MASK, 0x0000000F },
+ {0xA203, 0x00000010}, //{ mmDB_SHADER_CONTROL, 0x00000010 },
+ {0xA1C4, 0x00000000}, //{ mmSPI_SHADER_Z_FORMAT, 0x00000000 },
+ {0xA1B8, 0x00000000}, //{ mmSPI_BARYC_CNTL, 0x00000000 /* Always 0 for now */},
+ {0xA1C5, 0x00000004}, //{ mmSPI_SHADER_COL_FORMAT, 0x00000004 }
+};
+
+static const uint32_t ps_tex_shader_gfx9[] = {
+ 0xBEFC000C, 0xBE8E017E, 0xBEFE077E, 0xD4180000,
+ 0xD4190001, 0xD41C0100, 0xD41D0101, 0xF0800F00,
+ 0x00400206, 0xBEFE010E, 0xBF8C0F70, 0xD2960000,
+ 0x00020702, 0xD2960001, 0x00020B04, 0xC4001C0F,
+ 0x00000100, 0xBF810000
+};
+
+static const uint32_t ps_tex_shader_patchinfo_offset_gfx9[] = {
+ 0x0000000B
+};
+
+static const uint32_t ps_tex_shader_patchinfo_code_size_gfx9 = 6;
+
+static const uint32_t ps_tex_shader_patchinfo_code_gfx9[][10][6] = {
+ {{ 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xC4001890, 0x00000000 },
+ { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xC4001801, 0x00000002 },
+ { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xC4001803, 0x00000302 },
+ { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xC4001803, 0x00000502 },
+ { 0xD2960000, 0x00020702, 0xD2960001, 0x00020B04, 0xC4001C0F, 0x00000100 },
+ { 0xD2950000, 0x00020702, 0xD2950001, 0x00020B04, 0xC4001C0F, 0x00000100 },
+ { 0xD2940000, 0x00020702, 0xD2940001, 0x00020B04, 0xC4001C0F, 0x00000100 },
+ { 0xD2970000, 0x00020702, 0xD2970001, 0x00020B04, 0xC4001C0F, 0x00000100 },
+ { 0xD2980000, 0x00020702, 0xD2980001, 0x00020B04, 0xC4001C0F, 0x00000100 },
+ { 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xC400180F, 0x05040302 }
+ }
+};
+
+static const uint32_t ps_tex_num_sh_registers_gfx9 = 2;
+static const struct reg_info ps_tex_sh_registers_gfx9[] = {
+ {0x2C0A, 0x000C0081},//{ mmSPI_SHADER_PGM_RSRC1_PS, 0x000C0081 },
+ {0x2C0B, 0x00000018}, //{ mmSPI_SHADER_PGM_RSRC2_PS, 0x00000018 }
+};
+
+static const uint32_t ps_tex_num_context_registers_gfx9 = 7;
+
+static const struct reg_info ps_tex_context_registers_gfx9[] = {
+ {0xA1B4, 0x00000002}, //{ mmSPI_PS_INPUT_ADDR, 0x00000002 },
+ {0xA1B6, 0x00000001}, //{ mmSPI_PS_IN_CONTROL, 0x00000001 },
+ {0xA08F, 0x0000000F}, //{ mmCB_SHADER_MASK, 0x0000000F },
+ {0xA203, 0x00000010}, //{ mmDB_SHADER_CONTROL, 0x00000010 },
+ {0xA1C4, 0x00000000}, //{ mmSPI_SHADER_Z_FORMAT, 0x00000000 },
+ {0xA1B8, 0x00000000}, //{ mmSPI_BARYC_CNTL, 0x00000000 /* Always 0 for now */},
+ {0xA1C5, 0x00000004}, //{ mmSPI_SHADER_COL_FORMAT, 0x00000004 }
+};
+
+static const uint32_t vs_RectPosTexFast_shader_gfx9[] = {
+ 0x7E000B00, 0x020000F3, 0xD042000A, 0x00010100,
+ 0x7E020202, 0x7E040200, 0x020000F3, 0x7E060206,
+ 0x7E080204, 0xD1000001, 0x002A0302, 0x7C840080,
+ 0x7E000200, 0x7E040203, 0x7E0A0201, 0xD1000003,
+ 0x002A0704, 0x7E0C0207, 0x7E0E0205, 0x00000101,
+ 0x00020505, 0x7E040208, 0x7E0A02F2, 0x00060903,
+ 0x00080D07, 0x7E0C0209, 0xC40008CF, 0x05020100,
+ 0xC400020F, 0x05060403, 0xBF810000
+};
+
+static const struct reg_info vs_RectPosTexFast_sh_registers_gfx9[] =
+{
+ {0x2C4A, 0x000C0081}, //{ mmSPI_SHADER_PGM_RSRC1_VS, 0x000C0081 },
+ {0x2C4B, 0x00000018}, //{ mmSPI_SHADER_PGM_RSRC2_VS, 0x00000018 }
+};
+
+static const uint32_t vs_RectPosTexFast_num_sh_registers_gfx9 = 2;
+
+// Holds Context Register Information
+static const struct reg_info vs_RectPosTexFast_context_registers_gfx9[] =
+{
+ {0xA1B1, 0x00000000}, //{ mmSPI_VS_OUT_CONFIG, 0x00000000 },
+ {0xA1C3, 0x00000000}, //{ mmSPI_SHADER_POS_FORMAT, 0x00000000 /* Always 0 for now */}
+};
+
+static const uint32_t vs_RectPosTexFast_num_context_registers_gfx9 = 2;
+
+static const uint32_t preamblecache_gfx9[] = {
+ 0xc0026900, 0x81, 0x80000000, 0x40004000, 0xc0026900, 0x8c, 0xaa99aaaa, 0x0,
+ 0xc0026900, 0x90, 0x80000000, 0x40004000, 0xc0026900, 0x94, 0x80000000, 0x40004000,
+ 0xc0026900, 0xb4, 0x0, 0x3f800000, 0xc0016900, 0x103, 0x0,
+ 0xc0016900, 0x208, 0x0, 0xc0016900, 0x290, 0x0,
+ 0xc0016900, 0x2a1, 0x0, 0xc0026900, 0x2ad, 0x0, 0x0,
+ 0xc0016900, 0x2d5, 0x10000, 0xc0016900, 0x2dc, 0x0,
+ 0xc0066900, 0x2de, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xc0026900, 0x2e5, 0x0, 0x0,
+ 0xc0056900, 0x2f9, 0x5, 0x3f800000, 0x3f800000, 0x3f800000, 0x3f800000,
+ 0xc0036900, 0x311, 0x3, 0, 0x100000, 0xc0026900, 0x316, 0x1e, 0x20,
+ 0xc0016900, 0x349, 0x0, 0xc0016900, 0x358, 0x0, 0xc0016900, 0x367, 0x0,
+ 0xc0016900, 0x376, 0x0, 0xc0016900, 0x385, 0x0, 0xc0016900, 0x19, 0x0,
+ 0xc0056900, 0xe8, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0xc0076900, 0x1e1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
+ 0xc0026900, 0x204, 0x90000, 0x4, 0xc0046900, 0x20c, 0x0, 0x0, 0x0, 0x0,
+ 0xc0016900, 0x2b2, 0x0, 0xc0026900, 0x30e, 0xffffffff, 0xffffffff,
+ 0xc0016900, 0x314, 0x0, 0xc0016900, 0x2a6, 0, 0xc0016900, 0x210, 0,
+ 0xc0002f00, 0x1, 0xc0016900, 0x1, 0x1,
+ 0xc0016900, 0x18, 0x2, 0xc0016900, 0x206, 0x300, 0xc0017900, 0x20000243, 0x0,
+ 0xc0017900, 0x248, 0xffffffff, 0xc0017900, 0x249, 0x0, 0xc0017900, 0x24a, 0x0,
+ 0xc0017900, 0x24b, 0x0
+};
+
+static const uint32_t cached_cmd_gfx9[] = {
+ 0xc0016900, 0x0, 0x0, 0xc0026900, 0x3, 0x2a, 0x0,
+ 0xc0046900, 0xa, 0x0, 0x0, 0x0, 0x200020,
+ 0xc0016900, 0x83, 0xffff, 0xc0026900, 0x8e, 0xf, 0xf,
+ 0xc0056900, 0x105, 0x0, 0x0, 0x0, 0x0, 0x12,
+ 0xc0026900, 0x10b, 0x0, 0x0, 0xc0016900, 0x1e0, 0x0,
+ 0xc0036900, 0x200, 0x0, 0x10000, 0xcc0011,
+ 0xc0026900, 0x292, 0x20, 0x60201b8,
+ 0xc0026900, 0x2b0, 0x0, 0x0, 0xc0016900, 0x2f8, 0x0
+};
+
+static const uint32_t sh_reg_base_gfx9 = 0x2C00;
+static const uint32_t context_reg_base_gfx9 = 0xA000;
+
+#endif
diff --git a/tests/amdgpu/shader_code_hang.h b/tests/amdgpu/shader_code_hang.h
new file mode 100644
index 00000000..070bd718
--- /dev/null
+++ b/tests/amdgpu/shader_code_hang.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+*/
+
+#ifndef _shader_code_hang_h_
+#define _shader_code_hang_h_
+
+static const unsigned int memcpy_shader_hang[] = {
+ 0xFFFFFFFF, 0xBEFE0A7E, 0xBEFC0304, 0xC0C20100,
+ 0xC0800300, 0xC8080000, 0xC80C0100, 0xC8090001,
+ 0xC80D0101, 0xBF8C007F, 0xF0800F00, 0x00010002,
+ 0xBEFE040C, 0xBF8C0F70, 0xBF800000, 0xBF800000,
+ 0xF800180F, 0x03020100, 0xBF810000
+};
+
+struct shader_test_shader_bin {
+ const uint32_t *shader;
+ uint32_t header_length;
+ uint32_t body_length;
+ uint32_t foot_length;
+};
+
+static const unsigned int memcpy_cs_hang_slow_ai_codes[] = {
+ 0xd1fd0000, 0x04010c08, 0xe00c2000, 0x80000100,
+ 0xbf8c0f70, 0xe01c2000, 0x80010100, 0xbf810000
+};
+
+static struct shader_test_shader_bin memcpy_cs_hang_slow_ai = {
+ memcpy_cs_hang_slow_ai_codes, 4, 3, 1
+};
+
+static const unsigned int memcpy_cs_hang_slow_rv_codes[] = {
+ 0x8e00860c, 0x32000000, 0xe00c2000, 0x80010100,
+ 0xbf8c0f70, 0xe01c2000, 0x80020100, 0xbf810000
+};
+
+static struct shader_test_shader_bin memcpy_cs_hang_slow_rv = {
+ memcpy_cs_hang_slow_rv_codes, 4, 3, 1
+};
+
+static const unsigned int memcpy_cs_hang_slow_nv_codes[] = {
+ 0xd7460000, 0x04010c08, 0xe00c2000, 0x80000100,
+ 0xbf8c0f70, 0xe01ca000, 0x80010100, 0xbf810000
+};
+
+static struct shader_test_shader_bin memcpy_cs_hang_slow_nv = {
+ memcpy_cs_hang_slow_nv_codes, 4, 3, 1
+};
+
+
+static const unsigned int memcpy_ps_hang_slow_ai_codes[] = {
+ 0xbefc000c, 0xbe8e017e, 0xbefe077e, 0xd4080000,
+ 0xd4090001, 0xd40c0100, 0xd40d0101, 0xf0800f00,
+ 0x00400002, 0xbefe010e, 0xbf8c0f70, 0xbf800000,
+ 0xbf800000, 0xbf800000, 0xbf800000, 0xc400180f,
+ 0x03020100, 0xbf810000
+};
+
+static struct shader_test_shader_bin memcpy_ps_hang_slow_ai = {
+ memcpy_ps_hang_slow_ai_codes, 7, 2, 9
+};
+
+static const unsigned int memcpy_ps_hang_slow_navi10_codes[] = {
+ 0xBEFC030C,0xBE8E047E,0xBEFE0A7E,0xC8080000,
+ 0xC80C0100,0xC8090001,0xC80D0101,0xF0800F0A,
+ 0x00400402,0x00000003,0xBEFE040E,0xBF8C0F70,
+ 0xBF800000,0xBF800000,0xBF800000,0xBF800000,
+ 0xF800180F,0x07060504,0xBF810000
+};
+
+static struct shader_test_shader_bin memcpy_ps_hang_slow_navi10 = {
+ memcpy_ps_hang_slow_navi10_codes, 7, 3, 9
+};
+
+static const unsigned int memcpy_ps_hang_slow_navi21_codes[] = {
+ 0xBEFC030C, 0xBE8E047E, 0xBEFE0A7E, 0xC8080000, 0xC8000100, 0xC8090001, 0xC8010101, 0x87FE0E7E, // header
+ 0xF0800F0A, 0x00400002, 0x00000000, // body - image_sample instruction
+ 0xBFA3FFE3, 0xBEFE040E, 0xBF8C3F70, 0xBF800000, 0xBF800000, 0xBF800000, 0xBF800000, 0xF800180F, 0x03020100, 0xBF810000 // footer
+};
+
+static struct shader_test_shader_bin memcpy_ps_hang_slow_navi21 = {
+ memcpy_ps_hang_slow_navi21_codes, 8, 3, 10
+};
+
+#endif
diff --git a/tests/amdgpu/shader_test_util.c b/tests/amdgpu/shader_test_util.c
new file mode 100644
index 00000000..bf62efbe
--- /dev/null
+++ b/tests/amdgpu/shader_test_util.c
@@ -0,0 +1,1723 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <string.h>
+
+#include "CUnit/Basic.h"
+#include "amdgpu_test.h"
+#include "shader_code.h"
+
+#define PACKET3_DISPATCH_DIRECT 0x15
+#define PACKET3_CONTEXT_CONTROL 0x28
+#define PACKET3_DRAW_INDEX_AUTO 0x2D
+#define PACKET3_SET_CONTEXT_REG 0x69
+#define PACKET3_SET_SH_REG 0x76
+#define PACKET3_SET_SH_REG_OFFSET 0x77
+#define PACKET3_SET_UCONFIG_REG 0x79
+#define PACKET3_SET_SH_REG_INDEX 0x9B
+
+#define PACKET_TYPE3 3
+#define PACKET3(op, n) ((PACKET_TYPE3 << 30) | \
+ (((op) & 0xFF) << 8) | \
+ ((n) & 0x3FFF) << 16)
+#define PACKET3_COMPUTE(op, n) PACKET3(op, n) | (1 << 1)
+
+
+struct shader_test_bo {
+ amdgpu_bo_handle bo;
+ unsigned size;
+ unsigned heap;
+ void *ptr;
+ uint64_t mc_address;
+ amdgpu_va_handle va;
+};
+
+struct shader_test_draw {
+ struct shader_test_bo ps_bo;
+ enum ps_type ps_type;
+ struct shader_test_bo vs_bo;
+ enum vs_type vs_type;
+};
+struct shader_test_dispatch {
+ struct shader_test_bo cs_bo;
+ enum cs_type cs_type;
+};
+
+struct shader_test_info {
+ amdgpu_device_handle device_handle;
+ enum amdgpu_test_gfx_version version;
+ unsigned ip;
+ unsigned ring;
+ int hang;
+ int hang_slow;
+};
+
+struct shader_test_priv {
+ const struct shader_test_info *info;
+ unsigned cmd_curr;
+
+ union {
+ struct shader_test_draw shader_draw;
+ struct shader_test_dispatch shader_dispatch;
+ };
+ struct shader_test_bo cmd;
+ struct shader_test_bo src;
+ struct shader_test_bo dst;
+};
+
+static int shader_test_bo_alloc(amdgpu_device_handle device_handle,
+ struct shader_test_bo *shader_test_bo)
+{
+ return amdgpu_bo_alloc_and_map(device_handle, shader_test_bo->size, 4096,
+ shader_test_bo->heap, 0,
+ &(shader_test_bo->bo), (void **)&(shader_test_bo->ptr),
+ &(shader_test_bo->mc_address), &(shader_test_bo->va));
+}
+
+static int shader_test_bo_free(struct shader_test_bo *shader_test_bo)
+{
+ return amdgpu_bo_unmap_and_free(shader_test_bo->bo, shader_test_bo->va,
+ shader_test_bo->mc_address,
+ shader_test_bo->size);
+}
+
+void shader_test_for_each(amdgpu_device_handle device_handle, unsigned ip,
+ void (*fn)(struct shader_test_info *test_info))
+{
+ int r;
+ uint32_t ring_id;
+ struct shader_test_info test_info = {0};
+ struct drm_amdgpu_info_hw_ip info = {0};
+
+ r = amdgpu_query_hw_ip_info(device_handle, ip, 0, &info);
+ CU_ASSERT_EQUAL(r, 0);
+ if (!info.available_rings) {
+ printf("SKIP ... as there's no %s ring\n",
+ (ip == AMDGPU_HW_IP_GFX) ? "graphics": "compute");
+ return;
+ }
+
+ switch (info.hw_ip_version_major) {
+ case 9:
+ test_info.version = AMDGPU_TEST_GFX_V9;
+ break;
+ case 10:
+ test_info.version = AMDGPU_TEST_GFX_V10;
+ break;
+ default:
+ printf("SKIP ... unsupported gfx version %d\n", info.hw_ip_version_major);
+ return;
+ }
+
+ test_info.device_handle = device_handle;
+ test_info.ip = ip;
+
+ printf("\n");
+ for (ring_id = 0; (1 << ring_id) & info.available_rings; ring_id++) {
+ printf("%s ring %d\n", (ip == AMDGPU_HW_IP_GFX) ? "graphics": "compute",
+ ring_id);
+ test_info.ring = ring_id;
+ fn(&test_info);
+ }
+}
+
+static void write_context_control(struct shader_test_priv *test_priv)
+{
+ int i = test_priv->cmd_curr;
+ uint32_t *ptr = test_priv->cmd.ptr;
+
+ if (test_priv->info->ip == AMDGPU_HW_IP_GFX) {
+ ptr[i++] = PACKET3(PACKET3_CONTEXT_CONTROL, 1);
+ ptr[i++] = 0x80000000;
+ ptr[i++] = 0x80000000;
+ }
+
+ test_priv->cmd_curr = i;
+}
+
+static void shader_test_load_shader_hang_slow(struct shader_test_bo *shader_bo,
+ struct shader_test_shader_bin *shader_bin)
+{
+ int i, j, loop;
+
+ loop = (shader_bo->size / sizeof(uint32_t) - shader_bin->header_length
+ - shader_bin->foot_length) / shader_bin->body_length;
+
+ memcpy(shader_bo->ptr, shader_bin->shader, shader_bin->header_length * sizeof(uint32_t));
+
+ j = shader_bin->header_length;
+ for (i = 0; i < loop; i++) {
+ memcpy(shader_bo->ptr + j,
+ shader_bin->shader + shader_bin->header_length,
+ shader_bin->body_length * sizeof(uint32_t));
+ j += shader_bin->body_length;
+ }
+
+ memcpy(shader_bo->ptr + j,
+ shader_bin->shader + shader_bin->header_length + shader_bin->body_length,
+ shader_bin->foot_length * sizeof(uint32_t));
+}
+
+static void amdgpu_dispatch_load_cs_shader_hang_slow(struct shader_test_priv *test_priv)
+{
+ struct amdgpu_gpu_info gpu_info = {0};
+ struct shader_test_shader_bin *cs_shader_bin;
+ int r;
+
+ r = amdgpu_query_gpu_info(test_priv->info->device_handle, &gpu_info);
+ CU_ASSERT_EQUAL(r, 0);
+
+ switch (gpu_info.family_id) {
+ case AMDGPU_FAMILY_AI:
+ cs_shader_bin = &memcpy_cs_hang_slow_ai;
+ break;
+ case AMDGPU_FAMILY_RV:
+ cs_shader_bin = &memcpy_cs_hang_slow_rv;
+ break;
+ default:
+ cs_shader_bin = &memcpy_cs_hang_slow_nv;
+ break;
+ }
+
+ shader_test_load_shader_hang_slow(&test_priv->shader_dispatch.cs_bo, cs_shader_bin);
+}
+
+static void amdgpu_dispatch_load_cs_shader(struct shader_test_priv *test_priv)
+{
+ if (test_priv->info->hang) {
+ if (test_priv->info->hang_slow)
+ amdgpu_dispatch_load_cs_shader_hang_slow(test_priv);
+ else
+ memcpy(test_priv->shader_dispatch.cs_bo.ptr, memcpy_shader_hang,
+ sizeof(memcpy_shader_hang));
+ } else {
+ memcpy(test_priv->shader_dispatch.cs_bo.ptr,
+ shader_test_cs[test_priv->info->version][test_priv->shader_dispatch.cs_type].shader,
+ shader_test_cs[test_priv->info->version][test_priv->shader_dispatch.cs_type].shader_size);
+ }
+}
+
+static void amdgpu_dispatch_init_gfx9(struct shader_test_priv *test_priv)
+{
+ int i;
+ uint32_t *ptr = test_priv->cmd.ptr;
+
+ /* Write context control and load shadowing register if necessary */
+ write_context_control(test_priv);
+
+ i = test_priv->cmd_curr;
+
+ /* Issue commands to set default compute state. */
+ /* clear mmCOMPUTE_START_Z - mmCOMPUTE_START_X */
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 3);
+ ptr[i++] = 0x204;
+ i += 3;
+
+ /* clear mmCOMPUTE_TMPRING_SIZE */
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 1);
+ ptr[i++] = 0x218;
+ ptr[i++] = 0;
+
+ test_priv->cmd_curr = i;
+}
+
+static void amdgpu_dispatch_init_gfx10(struct shader_test_priv *test_priv)
+{
+ int i;
+ uint32_t *ptr = test_priv->cmd.ptr;
+
+ amdgpu_dispatch_init_gfx9(test_priv);
+
+ i = test_priv->cmd_curr;
+
+ /* mmCOMPUTE_SHADER_CHKSUM */
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 1);
+ ptr[i++] = 0x22a;
+ ptr[i++] = 0;
+ /* mmCOMPUTE_REQ_CTRL */
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 6);
+ ptr[i++] = 0x222;
+ i += 6;
+ /* mmCP_COHER_START_DELAY */
+ ptr[i++] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
+ ptr[i++] = 0x7b;
+ ptr[i++] = 0x20;
+
+ test_priv->cmd_curr = i;
+}
+
+static void amdgpu_dispatch_init(struct shader_test_priv *test_priv)
+{
+ switch (test_priv->info->version) {
+ case AMDGPU_TEST_GFX_V9:
+ amdgpu_dispatch_init_gfx9(test_priv);
+ break;
+ case AMDGPU_TEST_GFX_V10:
+ amdgpu_dispatch_init_gfx10(test_priv);
+ break;
+ }
+}
+
+static void amdgpu_dispatch_write_cumask(struct shader_test_priv *test_priv)
+{
+ int i = test_priv->cmd_curr;
+ uint32_t *ptr = test_priv->cmd.ptr;
+
+ /* Issue commands to set cu mask used in current dispatch */
+ switch (test_priv->info->version) {
+ case AMDGPU_TEST_GFX_V9:
+ /* set mmCOMPUTE_STATIC_THREAD_MGMT_SE1 - mmCOMPUTE_STATIC_THREAD_MGMT_SE0 */
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 2);
+ ptr[i++] = 0x216;
+ ptr[i++] = 0xffffffff;
+ ptr[i++] = 0xffffffff;
+ /* set mmCOMPUTE_STATIC_THREAD_MGMT_SE3 - mmCOMPUTE_STATIC_THREAD_MGMT_SE2 */
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 2);
+ ptr[i++] = 0x219;
+ ptr[i++] = 0xffffffff;
+ ptr[i++] = 0xffffffff;
+ break;
+ case AMDGPU_TEST_GFX_V10:
+ /* set mmCOMPUTE_STATIC_THREAD_MGMT_SE1 - mmCOMPUTE_STATIC_THREAD_MGMT_SE0 */
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG_INDEX, 2);
+ ptr[i++] = 0x30000216;
+ ptr[i++] = 0xffffffff;
+ ptr[i++] = 0xffffffff;
+ /* set mmCOMPUTE_STATIC_THREAD_MGMT_SE3 - mmCOMPUTE_STATIC_THREAD_MGMT_SE2 */
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG_INDEX, 2);
+ ptr[i++] = 0x30000219;
+ ptr[i++] = 0xffffffff;
+ ptr[i++] = 0xffffffff;
+ break;
+ }
+
+ test_priv->cmd_curr = i;
+}
+
+static void amdgpu_dispatch_write2hw_gfx9(struct shader_test_priv *test_priv)
+{
+ const struct shader_test_cs_shader *cs_shader = &shader_test_cs[test_priv->info->version][test_priv->shader_dispatch.cs_type];
+ int j, i = test_priv->cmd_curr;
+ uint32_t *ptr = test_priv->cmd.ptr;
+ uint64_t shader_addr = test_priv->shader_dispatch.cs_bo.mc_address;
+
+ /* Writes shader state to HW */
+ /* set mmCOMPUTE_PGM_HI - mmCOMPUTE_PGM_LO */
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 2);
+ ptr[i++] = 0x20c;
+ ptr[i++] = (shader_addr >> 8);
+ ptr[i++] = (shader_addr >> 40);
+ /* write sh regs*/
+ for (j = 0; j < cs_shader->num_sh_reg; j++) {
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 1);
+ /* - Gfx9ShRegBase */
+ ptr[i++] = cs_shader->sh_reg[j].reg_offset - shader_test_gfx_info[test_priv->info->version].sh_reg_base;
+ ptr[i++] = cs_shader->sh_reg[j].reg_value;
+ }
+
+ /* Write constant data */
+ if (CS_BUFFERCLEAR == test_priv->shader_dispatch.cs_type) {
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 4);
+ ptr[i++] = 0x240;
+ ptr[i++] = test_priv->dst.mc_address;
+ ptr[i++] = (test_priv->dst.mc_address >> 32) | 0x100000;
+ ptr[i++] = test_priv->dst.size / 16;
+ ptr[i++] = 0x74fac;
+
+ /* Sets a range of pixel shader constants */
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 4);
+ ptr[i++] = 0x244;
+ ptr[i++] = 0x22222222;
+ ptr[i++] = 0x22222222;
+ ptr[i++] = 0x22222222;
+ ptr[i++] = 0x22222222;
+ } else {
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 4);
+ ptr[i++] = 0x240;
+ ptr[i++] = test_priv->src.mc_address;
+ ptr[i++] = (test_priv->src.mc_address >> 32) | 0x100000;
+ ptr[i++] = test_priv->src.size / 16;
+ ptr[i++] = 0x74fac;
+
+ /* Writes the UAV constant data to the SGPRs. */
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 4);
+ ptr[i++] = 0x244;
+ ptr[i++] = test_priv->dst.mc_address;
+ ptr[i++] = (test_priv->dst.mc_address >> 32) | 0x100000;
+ ptr[i++] = test_priv->dst.size / 16;
+ ptr[i++] = 0x74fac;
+ }
+
+ test_priv->cmd_curr = i;
+}
+
+static void amdgpu_dispatch_write2hw_gfx10(struct shader_test_priv *test_priv)
+{
+ int i = test_priv->cmd_curr;
+ uint32_t *ptr = test_priv->cmd.ptr;
+ const struct shader_test_cs_shader *cs_shader = &shader_test_cs[test_priv->info->version][test_priv->shader_dispatch.cs_type];
+ int j;
+ uint64_t shader_addr = test_priv->shader_dispatch.cs_bo.mc_address;
+
+ /* Writes shader state to HW */
+ /* set mmCOMPUTE_PGM_HI - mmCOMPUTE_PGM_LO */
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 2);
+ ptr[i++] = 0x20c;
+ ptr[i++] = (shader_addr >> 8);
+ ptr[i++] = (shader_addr >> 40);
+ /* write sh regs*/
+ for (j = 0; j < cs_shader->num_sh_reg; j++) {
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 1);
+ /* - Gfx9ShRegBase */
+ ptr[i++] = cs_shader->sh_reg[j].reg_offset - shader_test_gfx_info[test_priv->info->version].sh_reg_base;
+ ptr[i++] = cs_shader->sh_reg[j].reg_value;
+ }
+
+ /* mmCOMPUTE_PGM_RSRC3 */
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 1);
+ ptr[i++] = 0x228;
+ ptr[i++] = 0;
+
+ if (CS_BUFFERCLEAR == test_priv->shader_dispatch.cs_type) {
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 4);
+ ptr[i++] = 0x240;
+ ptr[i++] = test_priv->dst.mc_address;
+ ptr[i++] = (test_priv->dst.mc_address >> 32) | 0x100000;
+ ptr[i++] = test_priv->dst.size / 16;
+ ptr[i++] = 0x1104bfac;
+
+ /* Sets a range of pixel shader constants */
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 4);
+ ptr[i++] = 0x244;
+ ptr[i++] = 0x22222222;
+ ptr[i++] = 0x22222222;
+ ptr[i++] = 0x22222222;
+ ptr[i++] = 0x22222222;
+ } else {
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 4);
+ ptr[i++] = 0x240;
+ ptr[i++] = test_priv->src.mc_address;
+ ptr[i++] = (test_priv->src.mc_address >> 32) | 0x100000;
+ ptr[i++] = test_priv->src.size / 16;
+ ptr[i++] = 0x1104bfac;
+
+ /* Writes the UAV constant data to the SGPRs. */
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 4);
+ ptr[i++] = 0x244;
+ ptr[i++] = test_priv->dst.mc_address;
+ ptr[i++] = (test_priv->dst.mc_address>> 32) | 0x100000;
+ ptr[i++] = test_priv->dst.size / 16;
+ ptr[i++] = 0x1104bfac;
+ }
+
+ test_priv->cmd_curr = i;
+}
+
+static void amdgpu_dispatch_write2hw(struct shader_test_priv *test_priv)
+{
+ switch (test_priv->info->version) {
+ case AMDGPU_TEST_GFX_V9:
+ amdgpu_dispatch_write2hw_gfx9(test_priv);
+ break;
+ case AMDGPU_TEST_GFX_V10:
+ amdgpu_dispatch_write2hw_gfx10(test_priv);
+ break;
+ }
+}
+
+static void amdgpu_dispatch_write_dispatch_cmd(struct shader_test_priv *test_priv)
+{
+ int i = test_priv->cmd_curr;
+ uint32_t *ptr = test_priv->cmd.ptr;
+
+ /* clear mmCOMPUTE_RESOURCE_LIMITS */
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_SET_SH_REG, 1);
+ ptr[i++] = 0x215;
+ ptr[i++] = 0;
+
+ /* dispatch direct command */
+ ptr[i++] = PACKET3_COMPUTE(PACKET3_DISPATCH_DIRECT, 3);
+ ptr[i++] = (test_priv->dst.size / 16 + 0x40 - 1 ) / 0x40;//0x10;
+ ptr[i++] = 1;
+ ptr[i++] = 1;
+ ptr[i++] = 1;
+
+ test_priv->cmd_curr = i;
+}
+static void amdgpu_test_dispatch_memset(struct shader_test_info *test_info)
+{
+ amdgpu_context_handle context_handle;
+ amdgpu_bo_handle resources[3];
+ struct shader_test_priv test_priv;
+ struct shader_test_bo *cmd = &(test_priv.cmd);
+ struct shader_test_bo *dst = &(test_priv.dst);
+ struct shader_test_bo *shader = &(test_priv.shader_dispatch.cs_bo);
+ uint32_t *ptr_cmd;
+ uint8_t *ptr_dst;
+ int i, r;
+ struct amdgpu_cs_request ibs_request = {0};
+ struct amdgpu_cs_ib_info ib_info= {0};
+ amdgpu_bo_list_handle bo_list;
+ struct amdgpu_cs_fence fence_status = {0};
+ uint32_t expired;
+ uint8_t cptr[16];
+
+ memset(&test_priv, 0, sizeof(test_priv));
+ test_priv.info = test_info;
+ test_priv.shader_dispatch.cs_type = CS_BUFFERCLEAR;
+ r = amdgpu_cs_ctx_create(test_info->device_handle, &context_handle);
+ CU_ASSERT_EQUAL(r, 0);
+
+ cmd->size = 4096;
+ cmd->heap = AMDGPU_GEM_DOMAIN_GTT;
+ r = shader_test_bo_alloc(test_info->device_handle, cmd);
+ CU_ASSERT_EQUAL(r, 0);
+ ptr_cmd = cmd->ptr;
+ memset(ptr_cmd, 0, cmd->size);
+
+ shader->size = 4096;
+ shader->heap = AMDGPU_GEM_DOMAIN_VRAM;
+ r = shader_test_bo_alloc(test_info->device_handle, shader);
+ CU_ASSERT_EQUAL(r, 0);
+ memset(shader->ptr, 0, shader->size);
+ amdgpu_dispatch_load_cs_shader(&test_priv);
+
+ dst->size = 0x4000;
+ dst->heap = AMDGPU_GEM_DOMAIN_VRAM;
+ r = shader_test_bo_alloc(test_info->device_handle, dst);
+ CU_ASSERT_EQUAL(r, 0);
+
+ amdgpu_dispatch_init(&test_priv);
+
+ /* Issue commands to set cu mask used in current dispatch */
+ amdgpu_dispatch_write_cumask(&test_priv);
+
+ /* Writes shader state to HW */
+ amdgpu_dispatch_write2hw(&test_priv);
+
+ amdgpu_dispatch_write_dispatch_cmd(&test_priv);
+
+ i = test_priv.cmd_curr;
+ while (i & 7)
+ ptr_cmd[i++] = 0xffff1000; /* type3 nop packet */
+ test_priv.cmd_curr = i;
+
+ resources[0] = dst->bo;
+ resources[1] = shader->bo;
+ resources[2] = cmd->bo;
+ r = amdgpu_bo_list_create(test_info->device_handle, 3, resources, NULL, &bo_list);
+ CU_ASSERT_EQUAL(r, 0);
+
+ ib_info.ib_mc_address = cmd->mc_address;
+ ib_info.size = test_priv.cmd_curr;
+ ibs_request.ip_type = test_info->ip;
+ ibs_request.ring = test_info->ring;
+ ibs_request.resources = bo_list;
+ ibs_request.number_of_ibs = 1;
+ ibs_request.ibs = &ib_info;
+ ibs_request.fence_info.handle = NULL;
+
+ /* submit CS */
+ r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_bo_list_destroy(bo_list);
+ CU_ASSERT_EQUAL(r, 0);
+
+ fence_status.ip_type = test_info->ip;
+ fence_status.ip_instance = 0;
+ fence_status.ring = test_info->ring;
+ fence_status.context = context_handle;
+ fence_status.fence = ibs_request.seq_no;
+
+ /* wait for IB accomplished */
+ r = amdgpu_cs_query_fence_status(&fence_status,
+ AMDGPU_TIMEOUT_INFINITE,
+ 0, &expired);
+ CU_ASSERT_EQUAL(r, 0);
+ CU_ASSERT_EQUAL(expired, true);
+
+ /* verify if memset test result meets with expected */
+ i = 0;
+ ptr_dst = (uint8_t *)(dst->ptr);
+ memset(cptr, 0x22, 16);
+ CU_ASSERT_EQUAL(memcmp(ptr_dst + i, cptr, 16), 0);
+ i = dst->size - 16;
+ CU_ASSERT_EQUAL(memcmp(ptr_dst + i, cptr, 16), 0);
+ i = dst->size / 2;
+ CU_ASSERT_EQUAL(memcmp(ptr_dst + i, cptr, 16), 0);
+
+ r = shader_test_bo_free(dst);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = shader_test_bo_free(shader);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = shader_test_bo_free(cmd);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_cs_ctx_free(context_handle);
+ CU_ASSERT_EQUAL(r, 0);
+}
+
+static
+void amdgpu_test_dispatch_memcpy(struct shader_test_info *test_info)
+{
+ struct shader_test_priv test_priv;
+ amdgpu_context_handle context_handle;
+ amdgpu_bo_handle resources[4];
+ struct shader_test_bo *cmd = &(test_priv.cmd);
+ struct shader_test_bo *src = &(test_priv.src);
+ struct shader_test_bo *dst = &(test_priv.dst);
+ struct shader_test_bo *shader = &(test_priv.shader_dispatch.cs_bo);
+ uint32_t *ptr_cmd;
+ uint8_t *ptr_src;
+ uint8_t *ptr_dst;
+ int i, r;
+ struct amdgpu_cs_request ibs_request = {0};
+ struct amdgpu_cs_ib_info ib_info= {0};
+ uint32_t expired, hang_state, hangs;
+ amdgpu_bo_list_handle bo_list;
+ struct amdgpu_cs_fence fence_status = {0};
+
+ memset(&test_priv, 0, sizeof(test_priv));
+ test_priv.info = test_info;
+ test_priv.cmd.size = 4096;
+ test_priv.cmd.heap = AMDGPU_GEM_DOMAIN_GTT;
+
+ test_priv.shader_dispatch.cs_bo.heap = AMDGPU_GEM_DOMAIN_VRAM;
+ test_priv.shader_dispatch.cs_type = CS_BUFFERCOPY;
+ test_priv.src.heap = AMDGPU_GEM_DOMAIN_VRAM;
+ test_priv.dst.heap = AMDGPU_GEM_DOMAIN_VRAM;
+ if (test_info->hang_slow) {
+ test_priv.shader_dispatch.cs_bo.size = 0x4000000;
+ test_priv.src.size = 0x4000000;
+ test_priv.dst.size = 0x4000000;
+ } else {
+ test_priv.shader_dispatch.cs_bo.size = 4096;
+ test_priv.src.size = 0x4000;
+ test_priv.dst.size = 0x4000;
+ }
+
+ r = amdgpu_cs_ctx_create(test_info->device_handle, &context_handle);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = shader_test_bo_alloc(test_info->device_handle, cmd);
+ CU_ASSERT_EQUAL(r, 0);
+ ptr_cmd = cmd->ptr;
+ memset(ptr_cmd, 0, cmd->size);
+
+ r = shader_test_bo_alloc(test_info->device_handle, shader);
+ CU_ASSERT_EQUAL(r, 0);
+ memset(shader->ptr, 0, shader->size);
+ amdgpu_dispatch_load_cs_shader(&test_priv);
+
+ r = shader_test_bo_alloc(test_info->device_handle, src);
+ CU_ASSERT_EQUAL(r, 0);
+ ptr_src = (uint8_t *)(src->ptr);
+ memset(ptr_src, 0x55, src->size);
+
+ r = shader_test_bo_alloc(test_info->device_handle, dst);
+ CU_ASSERT_EQUAL(r, 0);
+
+ amdgpu_dispatch_init(&test_priv);
+
+ /* Issue commands to set cu mask used in current dispatch */
+ amdgpu_dispatch_write_cumask(&test_priv);
+
+ /* Writes shader state to HW */
+ amdgpu_dispatch_write2hw(&test_priv);
+
+ amdgpu_dispatch_write_dispatch_cmd(&test_priv);
+
+ i = test_priv.cmd_curr;
+ while (i & 7)
+ ptr_cmd[i++] = 0xffff1000; /* type3 nop packet */
+ test_priv.cmd_curr = i;
+
+ resources[0] = shader->bo;
+ resources[1] = src->bo;
+ resources[2] = dst->bo;
+ resources[3] = cmd->bo;
+ r = amdgpu_bo_list_create(test_info->device_handle, 4, resources, NULL, &bo_list);
+ CU_ASSERT_EQUAL(r, 0);
+
+ ib_info.ib_mc_address = cmd->mc_address;
+ ib_info.size = test_priv.cmd_curr;
+ ibs_request.ip_type = test_info->ip;
+ ibs_request.ring = test_info->ring;
+ ibs_request.resources = bo_list;
+ ibs_request.number_of_ibs = 1;
+ ibs_request.ibs = &ib_info;
+ ibs_request.fence_info.handle = NULL;
+ r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
+ CU_ASSERT_EQUAL(r, 0);
+
+ fence_status.ip_type = test_info->ip;
+ fence_status.ip_instance = 0;
+ fence_status.ring = test_info->ring;
+ fence_status.context = context_handle;
+ fence_status.fence = ibs_request.seq_no;
+
+ /* wait for IB accomplished */
+ r = amdgpu_cs_query_fence_status(&fence_status,
+ AMDGPU_TIMEOUT_INFINITE,
+ 0, &expired);
+
+ if (!test_info->hang) {
+ CU_ASSERT_EQUAL(r, 0);
+ CU_ASSERT_EQUAL(expired, true);
+
+ /* verify if memcpy test result meets with expected */
+ i = 0;
+ ptr_dst = (uint8_t *)dst->ptr;
+ CU_ASSERT_EQUAL(memcmp(ptr_dst + i, ptr_src + i, 16), 0);
+ i = dst->size - 16;
+ CU_ASSERT_EQUAL(memcmp(ptr_dst + i, ptr_src + i, 16), 0);
+ i = dst->size / 2;
+ CU_ASSERT_EQUAL(memcmp(ptr_dst + i, ptr_src + i, 16), 0);
+ } else {
+ r = amdgpu_cs_query_reset_state(context_handle, &hang_state, &hangs);
+ CU_ASSERT_EQUAL(r, 0);
+ CU_ASSERT_EQUAL(hang_state, AMDGPU_CTX_UNKNOWN_RESET);
+ }
+
+ r = amdgpu_bo_list_destroy(bo_list);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = shader_test_bo_free(src);
+ CU_ASSERT_EQUAL(r, 0);
+ r = shader_test_bo_free(dst);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = shader_test_bo_free(shader);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = shader_test_bo_free(cmd);
+
+ r = amdgpu_cs_ctx_free(context_handle);
+ CU_ASSERT_EQUAL(r, 0);
+}
+
+static void shader_test_dispatch_cb(struct shader_test_info *test_info)
+{
+ amdgpu_test_dispatch_memset(test_info);
+ amdgpu_test_dispatch_memcpy(test_info);
+}
+static void shader_test_dispatch_hang_cb(struct shader_test_info *test_info)
+{
+ test_info->hang = 0;
+ amdgpu_test_dispatch_memcpy(test_info);
+
+ test_info->hang = 1;
+ amdgpu_test_dispatch_memcpy(test_info);
+
+ test_info->hang = 0;
+ amdgpu_test_dispatch_memcpy(test_info);
+}
+
+static void shader_test_dispatch_hang_slow_cb(struct shader_test_info *test_info)
+{
+ test_info->hang = 0;
+ test_info->hang_slow = 0;
+ amdgpu_test_dispatch_memcpy(test_info);
+
+ test_info->hang = 1;
+ test_info->hang_slow = 1;
+ amdgpu_test_dispatch_memcpy(test_info);
+
+ test_info->hang = 0;
+ test_info->hang_slow = 0;
+ amdgpu_test_dispatch_memcpy(test_info);
+}
+
+void amdgpu_test_dispatch_helper(amdgpu_device_handle device_handle, unsigned ip)
+{
+ shader_test_for_each(device_handle, ip, shader_test_dispatch_cb);
+}
+
+void amdgpu_test_dispatch_hang_helper(amdgpu_device_handle device_handle, uint32_t ip)
+{
+ shader_test_for_each(device_handle, ip, shader_test_dispatch_hang_cb);
+}
+
+void amdgpu_test_dispatch_hang_slow_helper(amdgpu_device_handle device_handle, uint32_t ip)
+{
+ shader_test_for_each(device_handle, ip, shader_test_dispatch_hang_slow_cb);
+}
+
+static void amdgpu_draw_load_ps_shader_hang_slow(struct shader_test_priv *test_priv)
+{
+ struct amdgpu_gpu_info gpu_info = {0};
+ struct shader_test_shader_bin *ps_shader_bin = &memcpy_ps_hang_slow_navi21;
+ int r;
+
+ r = amdgpu_query_gpu_info(test_priv->info->device_handle, &gpu_info);
+ CU_ASSERT_EQUAL(r, 0);
+
+ switch (gpu_info.family_id) {
+ case AMDGPU_FAMILY_AI:
+ case AMDGPU_FAMILY_RV:
+ ps_shader_bin = &memcpy_ps_hang_slow_ai;
+ break;
+ case AMDGPU_FAMILY_NV:
+ if (gpu_info.chip_external_rev < 40)
+ ps_shader_bin = &memcpy_ps_hang_slow_navi10;
+ break;
+ }
+
+ shader_test_load_shader_hang_slow(&test_priv->shader_draw.ps_bo, ps_shader_bin);
+}
+
+static uint32_t round_up_size(uint32_t size)
+{
+ return (size + 255) & ~255;
+}
+static void amdgpu_draw_load_ps_shader(struct shader_test_priv *test_priv)
+{
+ uint8_t *ptr_shader = test_priv->shader_draw.ps_bo.ptr;
+ const struct shader_test_ps_shader *shader;
+ uint32_t shader_offset, num_export_fmt;
+ uint32_t mem_offset, patch_code_offset;
+ int i;
+
+ if (test_priv->info->hang) {
+ if (test_priv->info->hang_slow)
+ amdgpu_draw_load_ps_shader_hang_slow(test_priv);
+ else
+ memcpy(ptr_shader, memcpy_shader_hang, sizeof(memcpy_shader_hang));
+
+ return;
+ }
+
+ shader = &shader_test_ps[test_priv->info->version][test_priv->shader_draw.ps_type];
+ num_export_fmt = 10;
+ shader_offset = round_up_size(shader->shader_size);
+ /* write main shader program */
+ for (i = 0 ; i < num_export_fmt; i++) {
+ mem_offset = i * shader_offset;
+ memcpy(ptr_shader + mem_offset, shader->shader, shader->shader_size);
+ }
+
+ /* overwrite patch codes */
+ for (i = 0 ; i < num_export_fmt; i++) {
+ mem_offset = i * shader_offset + shader->patchinfo_code_offset[0] * sizeof(uint32_t);
+ patch_code_offset = i * shader->patchinfo_code_size;
+ memcpy(ptr_shader + mem_offset,
+ shader->patchinfo_code + patch_code_offset,
+ shader->patchinfo_code_size * sizeof(uint32_t));
+ }
+}
+
+/* load RectPosTexFast_VS */
+static void amdgpu_draw_load_vs_shader(struct shader_test_priv *test_priv)
+{
+ uint8_t *ptr_shader = test_priv->shader_draw.vs_bo.ptr;
+ const struct shader_test_vs_shader *shader = &shader_test_vs[test_priv->info->version][test_priv->shader_draw.vs_type];
+
+ memcpy(ptr_shader, shader->shader, shader->shader_size);
+}
+
+static void amdgpu_draw_init(struct shader_test_priv *test_priv)
+{
+ int i;
+ uint32_t *ptr = test_priv->cmd.ptr;
+ const struct shader_test_gfx_info *gfx_info = &shader_test_gfx_info[test_priv->info->version];
+
+ /* Write context control and load shadowing register if necessary */
+ write_context_control(test_priv);
+ i = test_priv->cmd_curr;
+
+ memcpy(ptr + i, gfx_info->preamble_cache, gfx_info->size_preamble_cache);
+
+ test_priv->cmd_curr = i + gfx_info->size_preamble_cache/sizeof(uint32_t);
+}
+
+static void amdgpu_draw_setup_and_write_drawblt_surf_info_gfx9(struct shader_test_priv *test_priv)
+{
+ int i = test_priv->cmd_curr;
+ uint32_t *ptr = test_priv->cmd.ptr;
+
+ /* setup color buffer */
+ /* offset reg
+ 0xA318 CB_COLOR0_BASE
+ 0xA319 CB_COLOR0_BASE_EXT
+ 0xA31A CB_COLOR0_ATTRIB2
+ 0xA31B CB_COLOR0_VIEW
+ 0xA31C CB_COLOR0_INFO
+ 0xA31D CB_COLOR0_ATTRIB
+ 0xA31E CB_COLOR0_DCC_CONTROL
+ 0xA31F CB_COLOR0_CMASK
+ 0xA320 CB_COLOR0_CMASK_BASE_EXT
+ 0xA321 CB_COLOR0_FMASK
+ 0xA322 CB_COLOR0_FMASK_BASE_EXT
+ 0xA323 CB_COLOR0_CLEAR_WORD0
+ 0xA324 CB_COLOR0_CLEAR_WORD1
+ 0xA325 CB_COLOR0_DCC_BASE
+ 0xA326 CB_COLOR0_DCC_BASE_EXT */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 15);
+ ptr[i++] = 0x318;
+ ptr[i++] = test_priv->dst.mc_address >> 8;
+ ptr[i++] = test_priv->dst.mc_address >> 40;
+ ptr[i++] = test_priv->info->hang_slow ? 0x3ffc7ff : 0x7c01f;
+ ptr[i++] = 0;
+ ptr[i++] = 0x50438;
+ ptr[i++] = 0x10140000;
+ i += 9;
+
+ /* mmCB_MRT0_EPITCH */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x1e8;
+ ptr[i++] = test_priv->info->hang_slow ? 0xfff : 0x1f;
+
+ /* 0xA32B CB_COLOR1_BASE */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x32b;
+ ptr[i++] = 0;
+
+ /* 0xA33A CB_COLOR1_BASE */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x33a;
+ ptr[i++] = 0;
+
+ /* SPI_SHADER_COL_FORMAT */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x1c5;
+ ptr[i++] = 9;
+
+ /* Setup depth buffer */
+ /* mmDB_Z_INFO */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
+ ptr[i++] = 0xe;
+ i += 2;
+
+ test_priv->cmd_curr = i;
+}
+static void amdgpu_draw_setup_and_write_drawblt_surf_info_gfx10(struct shader_test_priv *test_priv)
+{
+ int i = test_priv->cmd_curr;
+ uint32_t *ptr = test_priv->cmd.ptr;
+
+ /* setup color buffer */
+ /* 0xA318 CB_COLOR0_BASE
+ 0xA319 CB_COLOR0_PITCH
+ 0xA31A CB_COLOR0_SLICE
+ 0xA31B CB_COLOR0_VIEW
+ 0xA31C CB_COLOR0_INFO
+ 0xA31D CB_COLOR0_ATTRIB
+ 0xA31E CB_COLOR0_DCC_CONTROL
+ 0xA31F CB_COLOR0_CMASK
+ 0xA320 CB_COLOR0_CMASK_SLICE
+ 0xA321 CB_COLOR0_FMASK
+ 0xA322 CB_COLOR0_FMASK_SLICE
+ 0xA323 CB_COLOR0_CLEAR_WORD0
+ 0xA324 CB_COLOR0_CLEAR_WORD1
+ 0xA325 CB_COLOR0_DCC_BASE */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 14);
+ ptr[i++] = 0x318;
+ ptr[i++] = test_priv->dst.mc_address >> 8;
+ i += 3;
+ ptr[i++] = 0x50438;
+ i += 9;
+
+ /* 0xA390 CB_COLOR0_BASE_EXT */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x390;
+ ptr[i++] = test_priv->dst.mc_address >> 40;
+
+ /* 0xA398 CB_COLOR0_CMASK_BASE_EXT */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x398;
+ ptr[i++] = 0;
+
+ /* 0xA3A0 CB_COLOR0_FMASK_BASE_EXT */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x3a0;
+ ptr[i++] = 0;
+
+ /* 0xA3A8 CB_COLOR0_DCC_BASE_EXT */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x3a8;
+ ptr[i++] = 0;
+
+ /* 0xA3B0 CB_COLOR0_ATTRIB2 */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x3b0;
+ ptr[i++] = test_priv->info->hang_slow ? 0x3ffc7ff : 0x7c01f;
+
+ /* 0xA3B8 CB_COLOR0_ATTRIB3 */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x3b8;
+ ptr[i++] = 0x9014000;
+
+ /* 0xA32B CB_COLOR1_BASE */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x32b;
+ ptr[i++] = 0;
+
+ /* 0xA33A CB_COLOR1_BASE */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x33a;
+ ptr[i++] = 0;
+
+ /* SPI_SHADER_COL_FORMAT */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x1c5;
+ ptr[i++] = 9;
+
+ /* Setup depth buffer */
+ /* mmDB_Z_INFO */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
+ ptr[i++] = 0x10;
+ i += 2;
+
+ test_priv->cmd_curr = i;
+}
+
+static void amdgpu_draw_setup_and_write_drawblt_surf_info(struct shader_test_priv *test_priv)
+{
+ switch (test_priv->info->version) {
+ case AMDGPU_TEST_GFX_V9:
+ amdgpu_draw_setup_and_write_drawblt_surf_info_gfx9(test_priv);
+ break;
+ case AMDGPU_TEST_GFX_V10:
+ amdgpu_draw_setup_and_write_drawblt_surf_info_gfx10(test_priv);
+ break;
+ }
+}
+
+static void amdgpu_draw_setup_and_write_drawblt_state_gfx9(struct shader_test_priv *test_priv)
+{
+ int i = test_priv->cmd_curr;
+ uint32_t *ptr = test_priv->cmd.ptr;
+ const struct shader_test_gfx_info *gfx_info = &shader_test_gfx_info[test_priv->info->version];
+
+ /* mmPA_SC_TILE_STEERING_OVERRIDE */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0xd7;
+ ptr[i++] = 0;
+
+ ptr[i++] = 0xffff1000;
+ ptr[i++] = 0xc0021000;
+
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0xd7;
+ ptr[i++] = 1;
+
+ /* mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 16);
+ ptr[i++] = 0x2fe;
+ i += 16;
+
+ /* mmPA_SC_CENTROID_PRIORITY_0 */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
+ ptr[i++] = 0x2f5;
+ i += 2;
+
+ memcpy(ptr + i, gfx_info->cached_cmd, gfx_info->size_cached_cmd);
+ if (test_priv->info->hang_slow)
+ *(ptr + i + 12) = 0x8000800;
+
+ test_priv->cmd_curr = i + gfx_info->size_cached_cmd/sizeof(uint32_t);
+}
+
+static void amdgpu_draw_setup_and_write_drawblt_state_gfx10(struct shader_test_priv *test_priv)
+{
+ int i = test_priv->cmd_curr;
+ uint32_t *ptr = test_priv->cmd.ptr;
+ const struct shader_test_gfx_info *gfx_info = &shader_test_gfx_info[test_priv->info->version];
+
+ /* mmPA_SC_TILE_STEERING_OVERRIDE */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0xd7;
+ ptr[i++] = 0;
+
+ ptr[i++] = 0xffff1000;
+ ptr[i++] = 0xc0021000;
+
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0xd7;
+ ptr[i++] = 0;
+
+ /* mmPA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 16);
+ ptr[i++] = 0x2fe;
+ i += 16;
+
+ /* mmPA_SC_CENTROID_PRIORITY_0 */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 2);
+ ptr[i++] = 0x2f5;
+ i += 2;
+
+ memcpy(ptr + i, gfx_info->cached_cmd, gfx_info->size_cached_cmd);
+ if (test_priv->info->hang_slow)
+ *(ptr + i + 12) = 0x8000800;
+ i += gfx_info->size_cached_cmd/sizeof(uint32_t);
+
+ /* mmCB_RMI_GL2_CACHE_CONTROL */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x104;
+ ptr[i++] = 0x40aa0055;
+ /* mmDB_RMI_L2_CACHE_CONTROL */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x1f;
+ ptr[i++] = 0x2a0055;
+
+ test_priv->cmd_curr = i;
+}
+
+static void amdgpu_draw_setup_and_write_drawblt_state(struct shader_test_priv *test_priv)
+{
+ switch (test_priv->info->version) {
+ case AMDGPU_TEST_GFX_V9:
+ amdgpu_draw_setup_and_write_drawblt_state_gfx9(test_priv);
+ break;
+ case AMDGPU_TEST_GFX_V10:
+ amdgpu_draw_setup_and_write_drawblt_state_gfx10(test_priv);
+ break;
+ }
+}
+
+static void amdgpu_draw_vs_RectPosTexFast_write2hw_gfx9(struct shader_test_priv *test_priv)
+{
+ int i = test_priv->cmd_curr;
+ uint32_t *ptr = test_priv->cmd.ptr;
+ uint64_t shader_addr = test_priv->shader_draw.vs_bo.mc_address;
+ enum ps_type ps = test_priv->shader_draw.ps_type;
+
+ /* mmPA_CL_VS_OUT_CNTL */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x207;
+ ptr[i++] = 0;
+
+ /* mmSPI_SHADER_PGM_RSRC3_VS */
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG, 1);
+ ptr[i++] = 0x46;
+ ptr[i++] = 0xffff;
+
+ /* mmSPI_SHADER_PGM_LO_VS...mmSPI_SHADER_PGM_HI_VS */
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG, 2);
+ ptr[i++] = 0x48;
+ ptr[i++] = shader_addr >> 8;
+ ptr[i++] = shader_addr >> 40;
+
+ /* mmSPI_SHADER_PGM_RSRC1_VS */
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG, 1);
+ ptr[i++] = 0x4a;
+ ptr[i++] = 0xc0081;
+
+ /* mmSPI_SHADER_PGM_RSRC2_VS */
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG, 1);
+ ptr[i++] = 0x4b;
+ ptr[i++] = 0x18;
+
+ /* mmSPI_VS_OUT_CONFIG */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x1b1;
+ ptr[i++] = 2;
+
+ /* mmSPI_SHADER_POS_FORMAT */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x1c3;
+ ptr[i++] = 4;
+
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG, 4);
+ ptr[i++] = 0x4c;
+ i += 2;
+ ptr[i++] = test_priv->info->hang_slow ? 0x45000000 : 0x42000000;
+ ptr[i++] = test_priv->info->hang_slow ? 0x45000000 : 0x42000000;
+
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG, 4);
+ ptr[i++] = 0x50;
+ i += 2;
+ if (ps == PS_CONST) {
+ i += 2;
+ } else if (ps == PS_TEX) {
+ ptr[i++] = 0x3f800000;
+ ptr[i++] = 0x3f800000;
+ }
+
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG, 4);
+ ptr[i++] = 0x54;
+ i += 4;
+
+ test_priv->cmd_curr = i;
+}
+
+static void amdgpu_draw_vs_RectPosTexFast_write2hw_gfx10(struct shader_test_priv *test_priv)
+{
+ int i = test_priv->cmd_curr;
+ uint32_t *ptr = test_priv->cmd.ptr;
+ uint64_t shader_addr = test_priv->shader_draw.vs_bo.mc_address;
+ enum ps_type ps = test_priv->shader_draw.ps_type;
+
+ /* mmPA_CL_VS_OUT_CNTL */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x207;
+ ptr[i++] = 0;
+
+ /* mmSPI_SHADER_PGM_RSRC3_VS */
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG_INDEX, 1);
+ ptr[i++] = 0x30000046;
+ ptr[i++] = 0xffff;
+ /* mmSPI_SHADER_PGM_RSRC4_VS */
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG_INDEX, 1);
+ ptr[i++] = 0x30000041;
+ ptr[i++] = 0xffff;
+
+ /* mmSPI_SHADER_PGM_LO_VS...mmSPI_SHADER_PGM_HI_VS */
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG, 2);
+ ptr[i++] = 0x48;
+ ptr[i++] = shader_addr >> 8;
+ ptr[i++] = shader_addr >> 40;
+
+ /* mmSPI_SHADER_PGM_RSRC1_VS */
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG, 1);
+ ptr[i++] = 0x4a;
+ ptr[i++] = 0xc0041;
+ /* mmSPI_SHADER_PGM_RSRC2_VS */
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG, 1);
+ ptr[i++] = 0x4b;
+ ptr[i++] = 0x18;
+
+ /* mmSPI_VS_OUT_CONFIG */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x1b1;
+ ptr[i++] = 2;
+
+ /* mmSPI_SHADER_POS_FORMAT */
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x1c3;
+ ptr[i++] = 4;
+
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG, 4);
+ ptr[i++] = 0x4c;
+ i += 2;
+ ptr[i++] = test_priv->info->hang_slow ? 0x45000000 : 0x42000000;
+ ptr[i++] = test_priv->info->hang_slow ? 0x45000000 : 0x42000000;
+
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG, 4);
+ ptr[i++] = 0x50;
+ i += 2;
+ if (ps == PS_CONST) {
+ i += 2;
+ } else if (ps == PS_TEX) {
+ ptr[i++] = 0x3f800000;
+ ptr[i++] = 0x3f800000;
+ }
+
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG, 4);
+ ptr[i++] = 0x54;
+ i += 4;
+
+ test_priv->cmd_curr = i;
+}
+
+static void amdgpu_draw_vs_RectPosTexFast_write2hw(struct shader_test_priv *test_priv)
+{
+ switch (test_priv->info->version) {
+ case AMDGPU_TEST_GFX_V9:
+ amdgpu_draw_vs_RectPosTexFast_write2hw_gfx9(test_priv);
+ break;
+ case AMDGPU_TEST_GFX_V10:
+ amdgpu_draw_vs_RectPosTexFast_write2hw_gfx10(test_priv);
+ break;
+ }
+}
+
+static void amdgpu_draw_ps_write2hw_gfx9_10(struct shader_test_priv *test_priv)
+{
+ int i, j;
+ uint64_t shader_addr = test_priv->shader_draw.ps_bo.mc_address;
+ const struct shader_test_ps_shader *ps = &shader_test_ps[test_priv->info->version][test_priv->shader_draw.ps_type];
+ uint32_t *ptr = test_priv->cmd.ptr;
+
+ i = test_priv->cmd_curr;
+
+ if (test_priv->info->version == AMDGPU_TEST_GFX_V9) {
+ /* 0x2c07 SPI_SHADER_PGM_RSRC3_PS
+ 0x2c08 SPI_SHADER_PGM_LO_PS
+ 0x2c09 SPI_SHADER_PGM_HI_PS */
+ /* multiplicator 9 is from SPI_SHADER_COL_FORMAT */
+ if (!test_priv->info->hang)
+ shader_addr += 256 * 9;
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG, 3);
+ ptr[i++] = 0x7;
+ ptr[i++] = 0xffff;
+ ptr[i++] = shader_addr >> 8;
+ ptr[i++] = shader_addr >> 40;
+ } else {
+ //if (!test_priv->info->hang)
+ shader_addr += 256 * 9;
+ /* 0x2c08 SPI_SHADER_PGM_LO_PS
+ 0x2c09 SPI_SHADER_PGM_HI_PS */
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG, 2);
+ ptr[i++] = 0x8;
+ ptr[i++] = shader_addr >> 8;
+ ptr[i++] = shader_addr >> 40;
+
+ /* mmSPI_SHADER_PGM_RSRC3_PS */
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG_INDEX, 1);
+ ptr[i++] = 0x30000007;
+ ptr[i++] = 0xffff;
+ /* mmSPI_SHADER_PGM_RSRC4_PS */
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG_INDEX, 1);
+ ptr[i++] = 0x30000001;
+ ptr[i++] = 0xffff;
+ }
+
+ for (j = 0; j < ps->num_sh_reg; j++) {
+ ptr[i++] = PACKET3(PACKET3_SET_SH_REG, 1);
+ ptr[i++] = ps->sh_reg[j].reg_offset - 0x2c00;
+ ptr[i++] = ps->sh_reg[j].reg_value;
+ }
+
+ for (j = 0; j < ps->num_context_reg; j++) {
+ if (ps->context_reg[j].reg_offset != 0xA1C5) {
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = ps->context_reg[j].reg_offset - 0xa000;
+ ptr[i++] = ps->context_reg[j].reg_value;
+ }
+
+ if (ps->context_reg[j].reg_offset == 0xA1B4) {
+ ptr[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr[i++] = 0x1b3;
+ ptr[i++] = 2;
+ }
+ }
+
+ test_priv->cmd_curr = i;
+}
+
+static void amdgpu_draw_ps_write2hw(struct shader_test_priv *test_priv)
+{
+ switch (test_priv->info->version) {
+ case AMDGPU_TEST_GFX_V9:
+ case AMDGPU_TEST_GFX_V10:
+ amdgpu_draw_ps_write2hw_gfx9_10(test_priv);
+ break;
+ }
+}
+
+static void amdgpu_draw_draw(struct shader_test_priv *test_priv)
+{
+ int i = test_priv->cmd_curr;
+ uint32_t *ptr = test_priv->cmd.ptr;
+
+ switch (test_priv->info->version) {
+ case AMDGPU_TEST_GFX_V9:
+ /* mmIA_MULTI_VGT_PARAM */
+ ptr[i++] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
+ ptr[i++] = 0x40000258;
+ ptr[i++] = 0xd00ff;
+ /* mmVGT_PRIMITIVE_TYPE */
+ ptr[i++] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
+ ptr[i++] = 0x10000242;
+ ptr[i++] = 0x11;
+ break;
+ case AMDGPU_TEST_GFX_V10:
+ /* mmGE_CNTL */
+ ptr[i++] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
+ ptr[i++] = 0x25b;
+ ptr[i++] = 0xff;
+ /* mmVGT_PRIMITIVE_TYPE */
+ ptr[i++] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
+ ptr[i++] = 0x242;
+ ptr[i++] = 0x11;
+ break;
+ }
+
+ ptr[i++] = PACKET3(PACKET3_DRAW_INDEX_AUTO, 1);
+ ptr[i++] = 3;
+ ptr[i++] = 2;
+
+ test_priv->cmd_curr = i;
+}
+
+static void amdgpu_memset_draw_test(struct shader_test_info *test_info)
+{
+ struct shader_test_priv test_priv;
+ amdgpu_context_handle context_handle;
+ struct shader_test_bo *ps_bo = &(test_priv.shader_draw.ps_bo);
+ struct shader_test_bo *vs_bo = &(test_priv.shader_draw.vs_bo);
+ struct shader_test_bo *dst = &(test_priv.dst);
+ struct shader_test_bo *cmd = &(test_priv.cmd);
+ amdgpu_bo_handle resources[4];
+ uint8_t *ptr_dst;
+ uint32_t *ptr_cmd;
+ int i, r;
+ struct amdgpu_cs_request ibs_request = {0};
+ struct amdgpu_cs_ib_info ib_info = {0};
+ struct amdgpu_cs_fence fence_status = {0};
+ uint32_t expired;
+ amdgpu_bo_list_handle bo_list;
+ uint8_t cptr[16];
+
+ memset(&test_priv, 0, sizeof(test_priv));
+ test_priv.info = test_info;
+
+ r = amdgpu_cs_ctx_create(test_info->device_handle, &context_handle);
+ CU_ASSERT_EQUAL(r, 0);
+
+ ps_bo->size = 0x2000;
+ ps_bo->heap = AMDGPU_GEM_DOMAIN_VRAM;
+ r = shader_test_bo_alloc(test_info->device_handle, ps_bo);
+ CU_ASSERT_EQUAL(r, 0);
+ memset(ps_bo->ptr, 0, ps_bo->size);
+
+ vs_bo->size = 4096;
+ vs_bo->heap = AMDGPU_GEM_DOMAIN_VRAM;
+ r = shader_test_bo_alloc(test_info->device_handle, vs_bo);
+ CU_ASSERT_EQUAL(r, 0);
+ memset(vs_bo->ptr, 0, vs_bo->size);
+
+ test_priv.shader_draw.ps_type = PS_CONST;
+ amdgpu_draw_load_ps_shader(&test_priv);
+
+ test_priv.shader_draw.vs_type = VS_RECTPOSTEXFAST;
+ amdgpu_draw_load_vs_shader(&test_priv);
+
+ cmd->size = 4096;
+ cmd->heap = AMDGPU_GEM_DOMAIN_GTT;
+ r = shader_test_bo_alloc(test_info->device_handle, cmd);
+ CU_ASSERT_EQUAL(r, 0);
+ ptr_cmd = cmd->ptr;
+ memset(ptr_cmd, 0, cmd->size);
+
+ dst->size = 0x4000;
+ dst->heap = AMDGPU_GEM_DOMAIN_VRAM;
+ r = shader_test_bo_alloc(test_info->device_handle, dst);
+ CU_ASSERT_EQUAL(r, 0);
+
+ amdgpu_draw_init(&test_priv);
+
+ amdgpu_draw_setup_and_write_drawblt_surf_info(&test_priv);
+
+ amdgpu_draw_setup_and_write_drawblt_state(&test_priv);
+
+ amdgpu_draw_vs_RectPosTexFast_write2hw(&test_priv);
+
+ amdgpu_draw_ps_write2hw(&test_priv);
+
+ i = test_priv.cmd_curr;
+ /* ps constant data */
+ ptr_cmd[i++] = PACKET3(PACKET3_SET_SH_REG, 4);
+ ptr_cmd[i++] = 0xc;
+ ptr_cmd[i++] = 0x33333333;
+ ptr_cmd[i++] = 0x33333333;
+ ptr_cmd[i++] = 0x33333333;
+ ptr_cmd[i++] = 0x33333333;
+ test_priv.cmd_curr = i;
+
+ amdgpu_draw_draw(&test_priv);
+
+ i = test_priv.cmd_curr;
+ while (i & 7)
+ ptr_cmd[i++] = 0xffff1000; /* type3 nop packet */
+ test_priv.cmd_curr = i;
+
+ i = 0;
+ resources[i++] = dst->bo;
+ resources[i++] = ps_bo->bo;
+ resources[i++] = vs_bo->bo;
+ resources[i++] = cmd->bo;
+ r = amdgpu_bo_list_create(test_info->device_handle, i, resources, NULL, &bo_list);
+ CU_ASSERT_EQUAL(r, 0);
+
+ ib_info.ib_mc_address = cmd->mc_address;
+ ib_info.size = test_priv.cmd_curr;
+ ibs_request.ip_type = test_info->ip;
+ ibs_request.ring = test_info->ring;
+ ibs_request.resources = bo_list;
+ ibs_request.number_of_ibs = 1;
+ ibs_request.ibs = &ib_info;
+ ibs_request.fence_info.handle = NULL;
+
+ /* submit CS */
+ r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_bo_list_destroy(bo_list);
+ CU_ASSERT_EQUAL(r, 0);
+
+ fence_status.ip_type = test_info->ip;
+ fence_status.ip_instance = 0;
+ fence_status.ring = test_info->ring;
+ fence_status.context = context_handle;
+ fence_status.fence = ibs_request.seq_no;
+
+ /* wait for IB accomplished */
+ r = amdgpu_cs_query_fence_status(&fence_status,
+ AMDGPU_TIMEOUT_INFINITE,
+ 0, &expired);
+ CU_ASSERT_EQUAL(r, 0);
+ CU_ASSERT_EQUAL(expired, true);
+
+ /* verify if memset test result meets with expected */
+ i = 0;
+ ptr_dst = dst->ptr;
+ memset(cptr, 0x33, 16);
+ CU_ASSERT_EQUAL(memcmp(ptr_dst + i, cptr, 16), 0);
+ i = dst->size - 16;
+ CU_ASSERT_EQUAL(memcmp(ptr_dst + i, cptr, 16), 0);
+ i = dst->size / 2;
+ CU_ASSERT_EQUAL(memcmp(ptr_dst + i, cptr, 16), 0);
+
+ r = shader_test_bo_free(dst);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = shader_test_bo_free(cmd);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = shader_test_bo_free(ps_bo);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = shader_test_bo_free(vs_bo);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_cs_ctx_free(context_handle);
+ CU_ASSERT_EQUAL(r, 0);
+}
+
+static void amdgpu_memcpy_draw_test(struct shader_test_info *test_info)
+{
+ struct shader_test_priv test_priv;
+ amdgpu_context_handle context_handle;
+ struct shader_test_bo *ps_bo = &(test_priv.shader_draw.ps_bo);
+ struct shader_test_bo *vs_bo = &(test_priv.shader_draw.vs_bo);
+ struct shader_test_bo *src = &(test_priv.src);
+ struct shader_test_bo *dst = &(test_priv.dst);
+ struct shader_test_bo *cmd = &(test_priv.cmd);
+ amdgpu_bo_handle resources[5];
+ uint8_t *ptr_dst;
+ uint8_t *ptr_src;
+ uint32_t *ptr_cmd;
+ int i, r;
+ struct amdgpu_cs_request ibs_request = {0};
+ struct amdgpu_cs_ib_info ib_info = {0};
+ uint32_t hang_state, hangs;
+ uint32_t expired;
+ amdgpu_bo_list_handle bo_list;
+ struct amdgpu_cs_fence fence_status = {0};
+
+ memset(&test_priv, 0, sizeof(test_priv));
+ test_priv.info = test_info;
+ test_priv.cmd.size = 4096;
+ test_priv.cmd.heap = AMDGPU_GEM_DOMAIN_GTT;
+
+ ps_bo->heap = AMDGPU_GEM_DOMAIN_VRAM;
+ test_priv.shader_draw.ps_type = PS_TEX;
+ vs_bo->size = 4096;
+ vs_bo->heap = AMDGPU_GEM_DOMAIN_VRAM;
+ test_priv.shader_draw.vs_type = VS_RECTPOSTEXFAST;
+ test_priv.src.heap = AMDGPU_GEM_DOMAIN_VRAM;
+ test_priv.dst.heap = AMDGPU_GEM_DOMAIN_VRAM;
+ if (test_info->hang_slow) {
+ test_priv.shader_draw.ps_bo.size = 16*1024*1024;
+ test_priv.src.size = 0x4000000;
+ test_priv.dst.size = 0x4000000;
+ } else {
+ test_priv.shader_draw.ps_bo.size = 0x2000;
+ test_priv.src.size = 0x4000;
+ test_priv.dst.size = 0x4000;
+ }
+
+ r = amdgpu_cs_ctx_create(test_info->device_handle, &context_handle);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = shader_test_bo_alloc(test_info->device_handle, ps_bo);
+ CU_ASSERT_EQUAL(r, 0);
+ memset(ps_bo->ptr, 0, ps_bo->size);
+
+ r = shader_test_bo_alloc(test_info->device_handle, vs_bo);
+ CU_ASSERT_EQUAL(r, 0);
+ memset(vs_bo->ptr, 0, vs_bo->size);
+
+ amdgpu_draw_load_ps_shader(&test_priv);
+ amdgpu_draw_load_vs_shader(&test_priv);
+
+ r = shader_test_bo_alloc(test_info->device_handle, cmd);
+ CU_ASSERT_EQUAL(r, 0);
+ ptr_cmd = cmd->ptr;
+ memset(ptr_cmd, 0, cmd->size);
+
+ r = shader_test_bo_alloc(test_info->device_handle, src);
+ CU_ASSERT_EQUAL(r, 0);
+ ptr_src = src->ptr;
+ memset(ptr_src, 0x55, src->size);
+
+ r = shader_test_bo_alloc(test_info->device_handle, dst);
+ CU_ASSERT_EQUAL(r, 0);
+
+ amdgpu_draw_init(&test_priv);
+
+ amdgpu_draw_setup_and_write_drawblt_surf_info(&test_priv);
+
+ amdgpu_draw_setup_and_write_drawblt_state(&test_priv);
+
+ amdgpu_draw_vs_RectPosTexFast_write2hw(&test_priv);
+
+ amdgpu_draw_ps_write2hw(&test_priv);
+
+ // write ps user constant data
+ i = test_priv.cmd_curr;
+ ptr_cmd[i++] = PACKET3(PACKET3_SET_SH_REG, 8);
+ switch (test_info->version) {
+ case AMDGPU_TEST_GFX_V9:
+ ptr_cmd[i++] = 0xc;
+ ptr_cmd[i++] = src->mc_address >> 8;
+ ptr_cmd[i++] = src->mc_address >> 40 | 0x10e00000;
+ ptr_cmd[i++] = test_info->hang_slow ? 0x1ffcfff : 0x7c01f;
+ ptr_cmd[i++] = 0x90500fac;
+ ptr_cmd[i++] = test_info->hang_slow ? 0x1ffe000 : 0x3e000;
+ i += 3;
+ break;
+ case AMDGPU_TEST_GFX_V10:
+ ptr_cmd[i++] = 0xc;
+ ptr_cmd[i++] = src->mc_address >> 8;
+ ptr_cmd[i++] = src->mc_address >> 40 | 0xc4b00000;
+ ptr_cmd[i++] = test_info->hang_slow ? 0x81ffc1ff : 0x8007c007;
+ ptr_cmd[i++] = 0x90500fac;
+ i += 2;
+ ptr_cmd[i++] = test_info->hang_slow ? 0 : 0x400;
+ i++;
+ break;
+ }
+
+ ptr_cmd[i++] = PACKET3(PACKET3_SET_SH_REG, 4);
+ ptr_cmd[i++] = 0x14;
+ ptr_cmd[i++] = 0x92;
+ i += 3;
+
+ ptr_cmd[i++] = PACKET3(PACKET3_SET_CONTEXT_REG, 1);
+ ptr_cmd[i++] = 0x191;
+ ptr_cmd[i++] = 0;
+ test_priv.cmd_curr = i;
+
+ amdgpu_draw_draw(&test_priv);
+
+ i = test_priv.cmd_curr;
+ while (i & 7)
+ ptr_cmd[i++] = 0xffff1000; /* type3 nop packet */
+ test_priv.cmd_curr = i;
+
+ i = 0;
+ resources[i++] = dst->bo;
+ resources[i++] = src->bo;
+ resources[i++] = ps_bo->bo;
+ resources[i++] = vs_bo->bo;
+ resources[i++] = cmd->bo;
+ r = amdgpu_bo_list_create(test_info->device_handle, i, resources, NULL, &bo_list);
+ CU_ASSERT_EQUAL(r, 0);
+
+ ib_info.ib_mc_address = cmd->mc_address;
+ ib_info.size = test_priv.cmd_curr;
+ ibs_request.ip_type = test_info->ip;
+ ibs_request.ring = test_info->ring;
+ ibs_request.resources = bo_list;
+ ibs_request.number_of_ibs = 1;
+ ibs_request.ibs = &ib_info;
+ ibs_request.fence_info.handle = NULL;
+ r = amdgpu_cs_submit(context_handle, 0, &ibs_request, 1);
+ CU_ASSERT_EQUAL(r, 0);
+
+ fence_status.ip_type = test_info->ip;
+ fence_status.ip_instance = 0;
+ fence_status.ring = test_info->ring;
+ fence_status.context = context_handle;
+ fence_status.fence = ibs_request.seq_no;
+
+ /* wait for IB accomplished */
+ r = amdgpu_cs_query_fence_status(&fence_status,
+ AMDGPU_TIMEOUT_INFINITE,
+ 0, &expired);
+ if (!test_info->hang) {
+ CU_ASSERT_EQUAL(r, 0);
+ CU_ASSERT_EQUAL(expired, true);
+
+ /* verify if memcpy test result meets with expected */
+ i = 0;
+ ptr_dst = dst->ptr;
+ CU_ASSERT_EQUAL(memcmp(ptr_dst + i, ptr_src + i, 16), 0);
+ i = dst->size - 16;
+ CU_ASSERT_EQUAL(memcmp(ptr_dst + i, ptr_src + i, 16), 0);
+ i = dst->size / 2;
+ CU_ASSERT_EQUAL(memcmp(ptr_dst + i, ptr_src + i, 16), 0);
+ } else {
+ r = amdgpu_cs_query_reset_state(context_handle, &hang_state, &hangs);
+ CU_ASSERT_EQUAL(r, 0);
+ CU_ASSERT_EQUAL(hang_state, AMDGPU_CTX_UNKNOWN_RESET);
+ }
+
+ r = amdgpu_bo_list_destroy(bo_list);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = shader_test_bo_free(src);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = shader_test_bo_free(dst);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = shader_test_bo_free(cmd);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = shader_test_bo_free(ps_bo);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = shader_test_bo_free(vs_bo);
+ CU_ASSERT_EQUAL(r, 0);
+
+ r = amdgpu_cs_ctx_free(context_handle);
+ CU_ASSERT_EQUAL(r, 0);
+}
+
+static void shader_test_draw_cb(struct shader_test_info *test_info)
+{
+ amdgpu_memset_draw_test(test_info);
+ amdgpu_memcpy_draw_test(test_info);
+}
+
+static void shader_test_draw_hang_cb(struct shader_test_info *test_info)
+{
+ test_info->hang = 0;
+ amdgpu_memcpy_draw_test(test_info);
+
+ test_info->hang = 1;
+ amdgpu_memcpy_draw_test(test_info);
+
+ test_info->hang = 0;
+ amdgpu_memcpy_draw_test(test_info);
+}
+
+static void shader_test_draw_hang_slow_cb(struct shader_test_info *test_info)
+{
+ test_info->hang = 0;
+ test_info->hang_slow = 0;
+ amdgpu_memcpy_draw_test(test_info);
+
+ test_info->hang = 1;
+ test_info->hang_slow = 1;
+ amdgpu_memcpy_draw_test(test_info);
+
+ test_info->hang = 0;
+ test_info->hang_slow = 0;
+ amdgpu_memcpy_draw_test(test_info);
+}
+
+
+void amdgpu_test_draw_helper(amdgpu_device_handle device_handle)
+{
+ shader_test_for_each(device_handle, AMDGPU_HW_IP_GFX, shader_test_draw_cb);
+}
+
+void amdgpu_test_draw_hang_helper(amdgpu_device_handle device_handle)
+{
+ shader_test_for_each(device_handle, AMDGPU_HW_IP_GFX, shader_test_draw_hang_cb);
+}
+
+void amdgpu_test_draw_hang_slow_helper(amdgpu_device_handle device_handle)
+{
+ shader_test_for_each(device_handle, AMDGPU_HW_IP_GFX, shader_test_draw_hang_slow_cb);
+}