summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-04-12 01:32:12 +0100
committerDave Airlie <airlied@redhat.com>2017-05-24 06:53:18 +0100
commitf8bef89fc5aecb6b88ea701d443579e2e21e3b18 (patch)
treeeda395ecc876157ccd3e762ff64ca14d8f9c6a7a
parentd3bd12b471d153bab3e96d6a4e87447774fad883 (diff)
amdgpu: add semaphore supportdrm-syncobj
-rw-r--r--amdgpu/amdgpu.h26
-rw-r--r--amdgpu/amdgpu_cs.c119
-rw-r--r--include/drm/amdgpu_drm.h6
3 files changed, 142 insertions, 9 deletions
diff --git a/amdgpu/amdgpu.h b/amdgpu/amdgpu.h
index 55884b24..73c20687 100644
--- a/amdgpu/amdgpu.h
+++ b/amdgpu/amdgpu.h
@@ -365,6 +365,16 @@ struct amdgpu_cs_request {
struct amdgpu_cs_fence_info fence_info;
};
+struct amdgpu_cs_request_syncobj {
+ /*
+ *
+ */
+ uint32_t number_in_syncobj;
+ uint32_t *in_syncobj;
+ uint32_t number_out_syncobj;
+ uint32_t *out_syncobj;
+};
+
/**
* Structure which provide information about GPU VM MC Address space
* alignments requirements
@@ -882,6 +892,12 @@ int amdgpu_cs_submit(amdgpu_context_handle context,
struct amdgpu_cs_request *ibs_request,
uint32_t number_of_requests);
+int amdgpu_cs_submit_syncobj(amdgpu_context_handle context,
+ uint64_t flags,
+ struct amdgpu_cs_request *ibs_request,
+ struct amdgpu_cs_request_syncobj *ibs_syncobj,
+ uint32_t number_of_requests);
+
/**
* Query status of Command Buffer Submission
*
@@ -1301,4 +1317,14 @@ int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem);
*/
const char *amdgpu_get_marketing_name(amdgpu_device_handle dev);
+int amdgpu_cs_create_syncobj(amdgpu_device_handle dev,
+ uint32_t *syncobj);
+int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
+ uint32_t syncobj,
+ int *shared_fd);
+int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
+ int shared_fd,
+ uint32_t *syncobj);
+int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev,
+ uint32_t syncobj);
#endif /* #ifdef _AMDGPU_H_ */
diff --git a/amdgpu/amdgpu_cs.c b/amdgpu/amdgpu_cs.c
index fb5b3a8c..9d2ff8e2 100644
--- a/amdgpu/amdgpu_cs.c
+++ b/amdgpu/amdgpu_cs.c
@@ -170,7 +170,8 @@ int amdgpu_cs_query_reset_state(amdgpu_context_handle context,
* \sa amdgpu_cs_submit()
*/
static int amdgpu_cs_submit_one(amdgpu_context_handle context,
- struct amdgpu_cs_request *ibs_request)
+ struct amdgpu_cs_request *ibs_request,
+ struct amdgpu_cs_request_syncobj *syncobj_request)
{
union drm_amdgpu_cs cs;
uint64_t *chunk_array;
@@ -178,10 +179,13 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
struct drm_amdgpu_cs_chunk_data *chunk_data;
struct drm_amdgpu_cs_chunk_dep *dependencies = NULL;
struct drm_amdgpu_cs_chunk_dep *sem_dependencies = NULL;
+ struct drm_amdgpu_cs_chunk_sem *in_syncobj_dependencies = NULL;
+ struct drm_amdgpu_cs_chunk_sem *out_syncobj_dependencies = NULL;
struct list_head *sem_list;
amdgpu_semaphore_handle sem, tmp;
- uint32_t i, size, sem_count = 0;
+ uint32_t i, j, size, sem_count = 0;
bool user_fence;
+ uint32_t sem_size = 0;
int r = 0;
if (ibs_request->ip_type >= AMDGPU_HW_IP_NUM)
@@ -196,7 +200,11 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
}
user_fence = (ibs_request->fence_info.handle != NULL);
- size = ibs_request->number_of_ibs + (user_fence ? 2 : 1) + 1;
+ if (syncobj_request) {
+ sem_size += syncobj_request->number_in_syncobj ? 1 : 0;
+ sem_size += syncobj_request->number_out_syncobj ? 1 : 0;
+ }
+ size = ibs_request->number_of_ibs + (user_fence ? 2 : 1) + 1 + sem_size;
chunk_array = alloca(sizeof(uint64_t) * size);
chunks = alloca(sizeof(struct drm_amdgpu_cs_chunk) * size);
@@ -308,6 +316,45 @@ static int amdgpu_cs_submit_one(amdgpu_context_handle context,
chunks[i].chunk_data = (uint64_t)(uintptr_t)sem_dependencies;
}
+ if (syncobj_request) {
+ if (syncobj_request->number_in_syncobj) {
+ in_syncobj_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * syncobj_request->number_in_syncobj);
+ if (!in_syncobj_dependencies) {
+ r = -ENOMEM;
+ goto error_unlock;
+ }
+ for (j = 0; j < syncobj_request->number_in_syncobj; j++) {
+ struct drm_amdgpu_cs_chunk_sem *dep = &in_syncobj_dependencies[j];
+ dep->handle = syncobj_request->in_syncobj[j];
+ }
+ i = cs.in.num_chunks++;
+
+ /* dependencies chunk */
+ chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_SYNCOBJ_IN;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * syncobj_request->number_in_syncobj;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)in_syncobj_dependencies;
+ }
+ if (syncobj_request->number_out_syncobj) {
+ out_syncobj_dependencies = malloc(sizeof(struct drm_amdgpu_cs_chunk_sem) * syncobj_request->number_out_syncobj);
+ if (!out_syncobj_dependencies) {
+ r = -ENOMEM;
+ goto error_unlock;
+ }
+ for (j = 0; j < syncobj_request->number_out_syncobj; j++) {
+ struct drm_amdgpu_cs_chunk_sem *dep = &out_syncobj_dependencies[j];
+ dep->handle = syncobj_request->out_syncobj[j];
+ }
+ i = cs.in.num_chunks++;
+
+ /* dependencies chunk */
+ chunk_array[i] = (uint64_t)(uintptr_t)&chunks[i];
+ chunks[i].chunk_id = AMDGPU_CHUNK_ID_SYNCOBJ_OUT;
+ chunks[i].length_dw = sizeof(struct drm_amdgpu_cs_chunk_sem) / 4 * syncobj_request->number_out_syncobj;
+ chunks[i].chunk_data = (uint64_t)(uintptr_t)out_syncobj_dependencies;
+ }
+ }
+
r = drmCommandWriteRead(context->dev->fd, DRM_AMDGPU_CS,
&cs, sizeof(cs));
if (r)
@@ -319,17 +366,20 @@ error_unlock:
pthread_mutex_unlock(&context->sequence_mutex);
free(dependencies);
free(sem_dependencies);
+ free(in_syncobj_dependencies);
+ free(out_syncobj_dependencies);
return r;
}
-int amdgpu_cs_submit(amdgpu_context_handle context,
- uint64_t flags,
- struct amdgpu_cs_request *ibs_request,
- uint32_t number_of_requests)
+int amdgpu_cs_submit_syncobj(amdgpu_context_handle context,
+ uint64_t flags,
+ struct amdgpu_cs_request *ibs_request,
+ struct amdgpu_cs_request_syncobj *ibs_syncobj,
+ uint32_t number_of_requests)
{
uint32_t i;
int r;
-
+ bool has_syncobj = ibs_syncobj ? true : false;
if (NULL == context)
return -EINVAL;
if (NULL == ibs_request)
@@ -337,15 +387,28 @@ int amdgpu_cs_submit(amdgpu_context_handle context,
r = 0;
for (i = 0; i < number_of_requests; i++) {
- r = amdgpu_cs_submit_one(context, ibs_request);
+ r = amdgpu_cs_submit_one(context, ibs_request, has_syncobj ? ibs_syncobj : NULL);
if (r)
break;
ibs_request++;
+ if (has_syncobj)
+ ibs_syncobj++;
}
return r;
}
+int amdgpu_cs_submit(amdgpu_context_handle context,
+ uint64_t flags,
+ struct amdgpu_cs_request *ibs_request,
+ uint32_t number_of_requests)
+{
+ return amdgpu_cs_submit_syncobj(context, flags,
+ ibs_request, NULL,
+ number_of_requests);
+}
+
+
/**
* Calculate absolute timeout.
*
@@ -542,3 +605,41 @@ int amdgpu_cs_destroy_semaphore(amdgpu_semaphore_handle sem)
{
return amdgpu_cs_unreference_sem(sem);
}
+
+int amdgpu_cs_create_syncobj(amdgpu_device_handle dev,
+ uint32_t *handle)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjCreate(dev->fd, 0, handle);
+}
+
+int amdgpu_cs_destroy_syncobj(amdgpu_device_handle dev,
+ uint32_t handle)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjDestroy(dev->fd, handle);
+}
+
+int amdgpu_cs_export_syncobj(amdgpu_device_handle dev,
+ uint32_t handle,
+ int *shared_fd)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjHandleToFD(dev->fd, handle, shared_fd);
+}
+
+int amdgpu_cs_import_syncobj(amdgpu_device_handle dev,
+ int shared_fd,
+ uint32_t *handle)
+{
+ if (NULL == dev)
+ return -EINVAL;
+
+ return drmSyncobjFDToHandle(dev->fd, shared_fd, handle);
+}
diff --git a/include/drm/amdgpu_drm.h b/include/drm/amdgpu_drm.h
index 516a9f28..878eb2b4 100644
--- a/include/drm/amdgpu_drm.h
+++ b/include/drm/amdgpu_drm.h
@@ -413,6 +413,8 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_IB 0x01
#define AMDGPU_CHUNK_ID_FENCE 0x02
#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
+#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
+#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
@@ -480,6 +482,10 @@ struct drm_amdgpu_cs_chunk_fence {
__u32 offset;
};
+struct drm_amdgpu_cs_chunk_sem {
+ uint32_t handle;
+};
+
struct drm_amdgpu_cs_chunk_data {
union {
struct drm_amdgpu_cs_chunk_ib ib_data;