diff options
author | Nicolai Hähnle <nicolai.haehnle@amd.com> | 2017-10-22 17:38:51 +0200 |
---|---|---|
committer | Nicolai Hähnle <nicolai.haehnle@amd.com> | 2017-11-06 11:09:47 +0100 |
commit | 221fdaa1cac855ffa032ae0b166907412911cedd (patch) | |
tree | ce3a93be304985d8ce9d1ec523127b10e55f3ff6 | |
parent | 11b7cb80b1f2378f848c619a8881491fd7702bed (diff) |
gallium/u_threaded: avoid syncs for get_query_result
Queries should still get marked as flushed when flushes are executed
asynchronously in the driver thread.
To this end, the management of the unflushed_queries list is moved into
the driver thread.
Reviewed-by: Marek Olšák <marek.olsak@amd.com>
-rw-r--r-- | src/gallium/auxiliary/util/u_threaded_context.c | 65 |
1 files changed, 48 insertions, 17 deletions
diff --git a/src/gallium/auxiliary/util/u_threaded_context.c b/src/gallium/auxiliary/util/u_threaded_context.c index 0bb645e852..4908ea8a7b 100644 --- a/src/gallium/auxiliary/util/u_threaded_context.c +++ b/src/gallium/auxiliary/util/u_threaded_context.c @@ -328,6 +328,11 @@ tc_create_batch_query(struct pipe_context *_pipe, unsigned num_queries, static void tc_call_destroy_query(struct pipe_context *pipe, union tc_payload *payload) { + struct threaded_query *tq = threaded_query(payload->query); + + if (tq->head_unflushed.next) + LIST_DEL(&tq->head_unflushed); + pipe->destroy_query(pipe, payload->query); } @@ -335,10 +340,6 @@ static void tc_destroy_query(struct pipe_context *_pipe, struct pipe_query *query) { struct threaded_context *tc = threaded_context(_pipe); - struct threaded_query *tq = threaded_query(query); - - if (tq->head_unflushed.next) - LIST_DEL(&tq->head_unflushed); tc_add_small_call(tc, TC_CALL_destroy_query)->query = query; } @@ -359,10 +360,21 @@ tc_begin_query(struct pipe_context *_pipe, struct pipe_query *query) return true; /* we don't care about the return value for this call */ } +struct tc_end_query_payload { + struct threaded_context *tc; + struct pipe_query *query; +}; + static void tc_call_end_query(struct pipe_context *pipe, union tc_payload *payload) { - pipe->end_query(pipe, payload->query); + struct tc_end_query_payload *p = (struct tc_end_query_payload *)payload; + struct threaded_query *tq = threaded_query(p->query); + + if (!tq->head_unflushed.next) + LIST_ADD(&tq->head_unflushed, &p->tc->unflushed_queries); + + pipe->end_query(pipe, p->query); } static bool @@ -370,13 +382,15 @@ tc_end_query(struct pipe_context *_pipe, struct pipe_query *query) { struct threaded_context *tc = threaded_context(_pipe); struct threaded_query *tq = threaded_query(query); - union tc_payload *payload = tc_add_small_call(tc, TC_CALL_end_query); + struct tc_end_query_payload *payload = + tc_add_struct_typed_call(tc, TC_CALL_end_query, tc_end_query_payload); + + tc_add_small_call(tc, TC_CALL_end_query); + payload->tc = tc; payload->query = query; tq->flushed = false; - if (!tq->head_unflushed.next) - LIST_ADD(&tq->head_unflushed, &tc->unflushed_queries); return true; /* we don't care about the return value for this call */ } @@ -397,8 +411,10 @@ tc_get_query_result(struct pipe_context *_pipe, if (success) { tq->flushed = true; - if (tq->head_unflushed.next) + if (tq->head_unflushed.next) { + /* This is safe because it can only happen after we sync'd. */ LIST_DEL(&tq->head_unflushed); + } } return success; } @@ -1813,11 +1829,27 @@ tc_create_video_buffer(struct pipe_context *_pipe, */ struct tc_flush_payload { + struct threaded_context *tc; struct pipe_fence_handle *fence; unsigned flags; }; static void +tc_flush_queries(struct threaded_context *tc) +{ + struct threaded_query *tq, *tmp; + LIST_FOR_EACH_ENTRY_SAFE(tq, tmp, &tc->unflushed_queries, head_unflushed) { + LIST_DEL(&tq->head_unflushed); + + /* Memory release semantics: due to a possible race with + * tc_get_query_result, we must ensure that the linked list changes + * are visible before setting tq->flushed. + */ + p_atomic_set(&tq->flushed, true); + } +} + +static void tc_call_flush(struct pipe_context *pipe, union tc_payload *payload) { struct tc_flush_payload *p = (struct tc_flush_payload *)payload; @@ -1825,6 +1857,9 @@ tc_call_flush(struct pipe_context *pipe, union tc_payload *payload) pipe->flush(pipe, p->fence ? &p->fence : NULL, p->flags); screen->fence_reference(screen, &p->fence, NULL); + + if (!(p->flags & PIPE_FLUSH_DEFERRED)) + tc_flush_queries(p->tc); } static void @@ -1834,7 +1869,6 @@ tc_flush(struct pipe_context *_pipe, struct pipe_fence_handle **fence, struct threaded_context *tc = threaded_context(_pipe); struct pipe_context *pipe = tc->pipe; struct pipe_screen *screen = pipe->screen; - struct threaded_query *tq, *tmp; bool async = flags & PIPE_FLUSH_DEFERRED; if (flags & PIPE_FLUSH_ASYNC) { @@ -1870,6 +1904,7 @@ tc_flush(struct pipe_context *_pipe, struct pipe_fence_handle **fence, struct tc_flush_payload *p = tc_add_struct_typed_call(tc, TC_CALL_flush, tc_flush_payload); + p->tc = tc; p->fence = fence ? *fence : NULL; p->flags = flags | TC_FLUSH_ASYNC; @@ -1879,15 +1914,11 @@ tc_flush(struct pipe_context *_pipe, struct pipe_fence_handle **fence, } out_of_memory: - if (!(flags & PIPE_FLUSH_DEFERRED)) { - LIST_FOR_EACH_ENTRY_SAFE(tq, tmp, &tc->unflushed_queries, head_unflushed) { - tq->flushed = true; - LIST_DEL(&tq->head_unflushed); - } - } - tc_sync_msg(tc, flags & PIPE_FLUSH_END_OF_FRAME ? "end of frame" : flags & PIPE_FLUSH_DEFERRED ? "deferred fence" : "normal"); + + if (!(flags & PIPE_FLUSH_DEFERRED)) + tc_flush_queries(tc); pipe->flush(pipe, fence, flags); } |