summaryrefslogtreecommitdiff
path: root/freedreno
diff options
context:
space:
mode:
authorRob Clark <robclark@freedesktop.org>2015-07-21 11:57:00 -0400
committerRob Clark <robclark@freedesktop.org>2015-08-17 10:35:08 -0400
commit9e34ee4f75ef559ff3a3c6d4b8f285453eea1f29 (patch)
treece325f8b90e683d24f749a3f9313fca4d5c1b519 /freedreno
parent2fa58ef8f43b41a6d12396ff637f09860665072f (diff)
freedreno/msm: fix issue where same bo is on multiple rings
It should be a less common case, but it is possible for a single bo to be on multiple rings, for example when sharing a buffer across multiple pipe_context's created from same pipe_screen. So rather than completely fall over in this case, fallback to slow-path of looping over all bo's in the ring's bo-table (but retain the fast- path of constant-lookup for the first ring the buffer is on). Signed-off-by: Rob Clark <robclark@freedesktop.org>
Diffstat (limited to 'freedreno')
-rw-r--r--freedreno/msm/msm_bo.c4
-rw-r--r--freedreno/msm/msm_priv.h15
-rw-r--r--freedreno/msm/msm_ringbuffer.c68
3 files changed, 58 insertions, 29 deletions
diff --git a/freedreno/msm/msm_bo.c b/freedreno/msm/msm_bo.c
index 3f5b6d07..6dc3776b 100644
--- a/freedreno/msm/msm_bo.c
+++ b/freedreno/msm/msm_bo.c
@@ -129,7 +129,6 @@ drm_private struct fd_bo * msm_bo_from_handle(struct fd_device *dev,
{
struct msm_bo *msm_bo;
struct fd_bo *bo;
- unsigned i;
msm_bo = calloc(1, sizeof(*msm_bo));
if (!msm_bo)
@@ -139,8 +138,5 @@ drm_private struct fd_bo * msm_bo_from_handle(struct fd_device *dev,
bo->funcs = &funcs;
bo->fd = -1;
- for (i = 0; i < ARRAY_SIZE(msm_bo->list); i++)
- list_inithead(&msm_bo->list[i]);
-
return bo;
}
diff --git a/freedreno/msm/msm_priv.h b/freedreno/msm/msm_priv.h
index 94d23579..637cb521 100644
--- a/freedreno/msm/msm_priv.h
+++ b/freedreno/msm/msm_priv.h
@@ -71,8 +71,19 @@ struct msm_bo {
struct fd_bo base;
uint64_t offset;
uint64_t presumed;
- uint32_t indexp1[FD_PIPE_MAX]; /* index plus 1 */
- struct list_head list[FD_PIPE_MAX];
+ /* in the common case, a bo won't be referenced by more than a single
+ * (parent) ring[*]. So to avoid looping over all the bo's in the
+ * reloc table to find the idx of a bo that might already be in the
+ * table, we cache the idx in the bo. But in order to detect the
+ * slow-path where bo is ref'd in multiple rb's, we also must track
+ * the current_ring for which the idx is valid. See bo2idx().
+ *
+ * [*] in case multiple ringbuffers, ie. one toplevel and other rb(s)
+ * used for IB target(s), the toplevel rb is the parent which is
+ * tracking bo's for the submit
+ */
+ struct fd_ringbuffer *current_ring;
+ uint32_t idx;
};
static inline struct msm_bo * to_msm_bo(struct fd_bo *x)
diff --git a/freedreno/msm/msm_ringbuffer.c b/freedreno/msm/msm_ringbuffer.c
index 2798c3fd..842574ec 100644
--- a/freedreno/msm/msm_ringbuffer.c
+++ b/freedreno/msm/msm_ringbuffer.c
@@ -39,8 +39,6 @@ struct msm_ringbuffer {
struct fd_ringbuffer base;
struct fd_bo *ring_bo;
- struct list_head submit_list;
-
/* submit ioctl related tables: */
struct {
/* bo's table: */
@@ -56,11 +54,17 @@ struct msm_ringbuffer {
uint32_t nr_relocs, max_relocs;
} submit;
+ /* should have matching entries in submit.bos: */
+ struct fd_bo **bos;
+ uint32_t nr_bos, max_bos;
+
/* should have matching entries in submit.cmds: */
struct fd_ringbuffer **rings;
uint32_t nr_rings, max_rings;
};
+static pthread_mutex_t idx_lock = PTHREAD_MUTEX_INITIALIZER;
+
static void *grow(void *ptr, uint32_t nr, uint32_t *max, uint32_t sz)
{
if ((nr + 1) > *max) {
@@ -83,27 +87,47 @@ static inline struct msm_ringbuffer * to_msm_ringbuffer(struct fd_ringbuffer *x)
return (struct msm_ringbuffer *)x;
}
+static uint32_t append_bo(struct fd_ringbuffer *ring, struct fd_bo *bo)
+{
+ struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
+ uint32_t idx;
+
+ idx = APPEND(&msm_ring->submit, bos);
+ idx = APPEND(msm_ring, bos);
+
+ msm_ring->submit.bos[idx].flags = 0;
+ msm_ring->submit.bos[idx].handle = bo->handle;
+ msm_ring->submit.bos[idx].presumed = to_msm_bo(bo)->presumed;
+
+ msm_ring->bos[idx] = fd_bo_ref(bo);
+
+ return idx;
+}
+
/* add (if needed) bo, return idx: */
static uint32_t bo2idx(struct fd_ringbuffer *ring, struct fd_bo *bo, uint32_t flags)
{
struct msm_ringbuffer *msm_ring = to_msm_ringbuffer(ring);
struct msm_bo *msm_bo = to_msm_bo(bo);
- int id = ring->pipe->id;
uint32_t idx;
- if (!msm_bo->indexp1[id]) {
- struct list_head *list = &msm_bo->list[id];
- idx = APPEND(&msm_ring->submit, bos);
- msm_ring->submit.bos[idx].flags = 0;
- msm_ring->submit.bos[idx].handle = bo->handle;
- msm_ring->submit.bos[idx].presumed = msm_bo->presumed;
- msm_bo->indexp1[id] = idx + 1;
-
- assert(LIST_IS_EMPTY(list));
- fd_bo_ref(bo);
- list_addtail(list, &msm_ring->submit_list);
+ pthread_mutex_lock(&idx_lock);
+ if (!msm_bo->current_ring) {
+ idx = append_bo(ring, bo);
+ msm_bo->current_ring = ring;
+ msm_bo->idx = idx;
+ } else if (msm_bo->current_ring == ring) {
+ idx = msm_bo->idx;
} else {
- idx = msm_bo->indexp1[id] - 1;
+ /* slow-path: */
+ for (idx = 0; idx < msm_ring->nr_bos; idx++)
+ if (msm_ring->bos[idx] == bo)
+ break;
+ if (idx == msm_ring->nr_bos) {
+ /* not found */
+ idx = append_bo(ring, bo);
+ }
}
+ pthread_mutex_unlock(&idx_lock);
if (flags & FD_RELOC_READ)
msm_ring->submit.bos[idx].flags |= MSM_SUBMIT_BO_READ;
if (flags & FD_RELOC_WRITE)
@@ -193,6 +217,8 @@ static void flush_reset(struct fd_ringbuffer *ring)
msm_ring->submit.nr_relocs = 0;
msm_ring->submit.nr_cmds = 0;
msm_ring->submit.nr_bos = 0;
+ msm_ring->nr_rings = 0;
+ msm_ring->nr_bos = 0;
}
static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start)
@@ -202,9 +228,8 @@ static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start
struct drm_msm_gem_submit req = {
.pipe = to_msm_pipe(ring->pipe)->pipe,
};
- struct msm_bo *msm_bo = NULL, *tmp;
uint32_t i, submit_offset, size;
- int ret, id = ring->pipe->id;
+ int ret;
submit_offset = offset_bytes(last_start, ring->start);
size = offset_bytes(ring->cur, last_start);
@@ -242,10 +267,9 @@ static int msm_ringbuffer_flush(struct fd_ringbuffer *ring, uint32_t *last_start
}
}
- LIST_FOR_EACH_ENTRY_SAFE(msm_bo, tmp, &msm_ring->submit_list, list[id]) {
- struct list_head *list = &msm_bo->list[id];
- list_delinit(list);
- msm_bo->indexp1[id] = 0;
+ for (i = 0; i < msm_ring->nr_bos; i++) {
+ struct msm_bo *msm_bo = to_msm_bo(msm_ring->bos[i]);
+ msm_bo->current_ring = NULL;
fd_bo_del(&msm_bo->base);
}
@@ -338,8 +362,6 @@ drm_private struct fd_ringbuffer * msm_ringbuffer_new(struct fd_pipe *pipe,
ring = &msm_ring->base;
ring->funcs = &funcs;
- list_inithead(&msm_ring->submit_list);
-
msm_ring->ring_bo = fd_bo_new(pipe->dev, size, 0);
if (!msm_ring->ring_bo) {
ERROR_MSG("ringbuffer allocation failed");