summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2007-11-27 15:03:31 -0800
committerEric Anholt <eric@anholt.net>2007-11-27 15:03:31 -0800
commit230dec40b9fb79c4b6d41d1941eeebf7120d120b (patch)
tree1ad5746e990a25a79f47796013f4b6d6611f3124 /src
parente5a28133d88e38b6fa638ad4bc2a20ff61cd8307 (diff)
[965] Bring back no-backing-store hack for classic-mode performance.
This is not the complete version of the previous hack -- most importantly, the NO_FENCE_SUBDATA is missing. However, the previous implementation of that lacked some required synchronization for the state cache. This improves openarena performance by almost 50%, to about a quarter of what it was before bufmgr conversion.
Diffstat (limited to 'src')
-rw-r--r--src/mesa/drivers/dri/common/dri_bufmgr.h4
-rw-r--r--src/mesa/drivers/dri/common/dri_bufmgr_fake.c46
-rw-r--r--src/mesa/drivers/dri/i965/brw_draw.c38
-rw-r--r--src/mesa/drivers/dri/i965/brw_state_pool.c18
4 files changed, 79 insertions, 27 deletions
diff --git a/src/mesa/drivers/dri/common/dri_bufmgr.h b/src/mesa/drivers/dri/common/dri_bufmgr.h
index f1863c7056..1290fa956b 100644
--- a/src/mesa/drivers/dri/common/dri_bufmgr.h
+++ b/src/mesa/drivers/dri/common/dri_bufmgr.h
@@ -202,6 +202,10 @@ dri_bufmgr *dri_bufmgr_fake_init(unsigned long low_offset, void *low_virtual,
int (*fence_wait)(void *private,
unsigned int cookie),
void *driver_priv);
+void dri_bo_fake_disable_backing_store(dri_bo *bo,
+ void (*invalidate_cb)(dri_bo *bo,
+ void *ptr),
+ void *ptr);
void dri_bufmgr_destroy(dri_bufmgr *bufmgr);
dri_bo *dri_ttm_bo_create_from_handle(dri_bufmgr *bufmgr, const char *name,
unsigned int handle);
diff --git a/src/mesa/drivers/dri/common/dri_bufmgr_fake.c b/src/mesa/drivers/dri/common/dri_bufmgr_fake.c
index c31bd7c6b0..9081f23e4a 100644
--- a/src/mesa/drivers/dri/common/dri_bufmgr_fake.c
+++ b/src/mesa/drivers/dri/common/dri_bufmgr_fake.c
@@ -181,7 +181,7 @@ typedef struct _dri_bo_fake {
struct block *block;
void *backing_store;
- void (*invalidate_cb)(dri_bufmgr *bufmgr, void * );
+ void (*invalidate_cb)(dri_bo *bo, void *ptr);
void *invalidate_ptr;
} dri_bo_fake;
@@ -318,9 +318,9 @@ static void
free_backing_store(dri_bo *bo)
{
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
- assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
if (bo_fake->backing_store) {
+ assert(!(bo_fake->flags & (BM_PINNED|BM_NO_BACKING_STORE)));
ALIGN_FREE(bo_fake->backing_store);
bo_fake->backing_store = NULL;
}
@@ -329,11 +329,10 @@ free_backing_store(dri_bo *bo)
static void
set_dirty(dri_bo *bo)
{
- dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
- if (bo_fake->flags & BM_NO_BACKING_STORE)
- bo_fake->invalidate_cb(&bufmgr_fake->bufmgr, bo_fake->invalidate_ptr);
+ if (bo_fake->flags & BM_NO_BACKING_STORE && bo_fake->invalidate_cb != NULL)
+ bo_fake->invalidate_cb(bo, bo_fake->invalidate_ptr);
assert(!(bo_fake->flags & BM_PINNED));
@@ -678,6 +677,43 @@ dri_fake_bo_unreference(dri_bo *bo)
}
/**
+ * Set the buffer as not requiring backing store, and instead get the callback
+ * invoked whenever it would be set dirty.
+ *
+ * \param dont_fence_subdata Disables waiting for idle before mapping the
+ * buffer.
+ */
+void dri_bo_fake_disable_backing_store(dri_bo *bo,
+ void (*invalidate_cb)(dri_bo *bo,
+ void *ptr),
+ void *ptr)
+{
+ dri_bufmgr_fake *bufmgr_fake = (dri_bufmgr_fake *)bo->bufmgr;
+ dri_bo_fake *bo_fake = (dri_bo_fake *)bo;
+
+ _glthread_LOCK_MUTEX(bufmgr_fake->mutex);
+
+ if (bo_fake->backing_store)
+ free_backing_store(bo);
+
+ bo_fake->flags |= BM_NO_BACKING_STORE;
+
+ DBG("disable_backing_store set buf %d dirty\n", bo_fake->id);
+ bo_fake->dirty = 1;
+ bo_fake->invalidate_cb = invalidate_cb;
+ bo_fake->invalidate_ptr = ptr;
+
+ /* Note that it is invalid right from the start. Also note
+ * invalidate_cb is called with the bufmgr locked, so cannot
+ * itself make bufmgr calls.
+ */
+ if (invalidate_cb != NULL)
+ invalidate_cb(bo, ptr);
+
+ _glthread_UNLOCK_MUTEX(bufmgr_fake->mutex);
+}
+
+/**
* Map a buffer into bo->virtual, allocating either card memory space (If
* BM_NO_BACKING_STORE or BM_PINNED) or backing store, as necessary.
*/
diff --git a/src/mesa/drivers/dri/i965/brw_draw.c b/src/mesa/drivers/dri/i965/brw_draw.c
index 223a1c1c9d..87e2202029 100644
--- a/src/mesa/drivers/dri/i965/brw_draw.c
+++ b/src/mesa/drivers/dri/i965/brw_draw.c
@@ -447,13 +447,6 @@ void brw_draw_prims( GLcontext *ctx,
}
}
-#if 0
-static void brw_invalidate_vbo_cb( struct intel_context *intel, void *ptr )
-{
- /* nothing to do, we don't rely on the contents being preserved */
-}
-#endif
-
void brw_draw_init( struct brw_context *brw )
{
GLcontext *ctx = &brw->intel.ctx;
@@ -469,23 +462,24 @@ void brw_draw_init( struct brw_context *brw )
for (i = 0; i < BRW_NR_UPLOAD_BUFS; i++) {
brw->vb.upload.vbo[i] = ctx->Driver.NewBufferObject(ctx, 1, GL_ARRAY_BUFFER_ARB);
-#if 0
- /* NOTE: These are set to no-backing-store.
+ ctx->Driver.BufferData(ctx,
+ GL_ARRAY_BUFFER_ARB,
+ BRW_UPLOAD_INIT_SIZE,
+ NULL,
+ GL_DYNAMIC_DRAW_ARB,
+ brw->vb.upload.vbo[i]);
+
+ /* Set the internal VBOs to no-backing-store. We only use them as a
+ * temporary within a brw_try_draw_prims while the lock is held.
*/
- bmBufferSetInvalidateCB(&brw->intel,
- intel_bufferobj_buffer(intel_buffer_object(brw->vb.upload.vbo[i])),
- brw_invalidate_vbo_cb,
- &brw->intel,
- GL_TRUE);
-#endif
- }
+ if (!brw->intel.intelScreen->ttm) {
+ struct intel_buffer_object *intel_bo =
+ intel_buffer_object(brw->vb.upload.vbo[i]);
- ctx->Driver.BufferData( ctx,
- GL_ARRAY_BUFFER_ARB,
- BRW_UPLOAD_INIT_SIZE,
- NULL,
- GL_DYNAMIC_DRAW_ARB,
- brw->vb.upload.vbo[0] );
+ dri_bo_fake_disable_backing_store(intel_bufferobj_buffer(intel_bo),
+ NULL, NULL);
+ }
+ }
}
void brw_draw_destroy( struct brw_context *brw )
diff --git a/src/mesa/drivers/dri/i965/brw_state_pool.c b/src/mesa/drivers/dri/i965/brw_state_pool.c
index 63daa9da34..0826d1ded3 100644
--- a/src/mesa/drivers/dri/i965/brw_state_pool.c
+++ b/src/mesa/drivers/dri/i965/brw_state_pool.c
@@ -70,6 +70,14 @@ void brw_invalidate_pool( struct intel_context *intel,
brw_clear_all_caches(pool->brw);
}
+static void
+brw_invalidate_pool_cb(dri_bo *bo, void *ptr)
+{
+ struct brw_mem_pool *pool = ptr;
+ struct brw_context *brw = pool->brw;
+
+ brw_invalidate_pool(&brw->intel, pool);
+}
static void brw_init_pool( struct brw_context *brw,
GLuint pool_id,
@@ -84,6 +92,16 @@ static void brw_init_pool( struct brw_context *brw,
pool->buffer = dri_bo_alloc(brw->intel.intelScreen->bufmgr,
(pool_id == BRW_GS_POOL) ? "GS pool" : "SS pool",
size, 4096, DRM_BO_FLAG_MEM_TT);
+
+ /* Disable the backing store for the state cache. It's not worth the
+ * cost of keeping a backing store copy, since we can just regenerate
+ * the contents at approximately the same cost as the memcpy, and only
+ * if the contents are lost.
+ */
+ if (!brw->intel.intelScreen->ttm) {
+ dri_bo_fake_disable_backing_store(pool->buffer, brw_invalidate_pool_cb,
+ pool);
+ }
}
static void brw_destroy_pool( struct brw_context *brw,