summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2008-03-30 16:02:45 +0200
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2008-03-30 16:02:45 +0200
commit6c80f5876b0b00e6ba958e8aff9e31079cbcd681 (patch)
tree187bbdc6850964492e14a660c6f9d0b05cddb661
parenta0e133114bbde5c1f7e6ea20613b513f7fbbbdae (diff)
Replace the batchpool with a slab pool and use it also for
textures. This works around the TTM allocation granularity problem, and the excessive memory usage problem with the old batchpool with a minimal CPU overhead. Also add a user-space fence manager to reduce the number of kernel calls needed to check for signaled fences.
-rw-r--r--src/mesa/drivers/dri/i915tex/Makefile3
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_batchbuffer.c17
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_batchbuffer.h8
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_blit.c9
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_buffer_objects.c2
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_buffers.c8
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_context.c33
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_pixel_copy.c10
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_pixel_draw.c6
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_pixel_read.c2
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_regions.c18
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_screen.c104
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_screen.h17
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_tex_copy.c4
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_tex_image.c5
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_tex_validate.c9
-rw-r--r--src/mesa/drivers/dri/i915tex/intel_tris.c8
-rw-r--r--src/mesa/drivers/dri/i915tex/ws_dri_batchpool.c457
-rw-r--r--src/mesa/drivers/dri/i915tex/ws_dri_bufmgr.c118
-rw-r--r--src/mesa/drivers/dri/i915tex/ws_dri_bufmgr.h13
-rw-r--r--src/mesa/drivers/dri/i915tex/ws_dri_bufpool.h16
-rw-r--r--src/mesa/drivers/dri/i915tex/ws_dri_fencemgr.c372
-rw-r--r--src/mesa/drivers/dri/i915tex/ws_dri_fencemgr.h115
-rw-r--r--src/mesa/drivers/dri/i915tex/ws_dri_slabpool.c916
24 files changed, 1596 insertions, 674 deletions
diff --git a/src/mesa/drivers/dri/i915tex/Makefile b/src/mesa/drivers/dri/i915tex/Makefile
index 9f8d324369..0546373109 100644
--- a/src/mesa/drivers/dri/i915tex/Makefile
+++ b/src/mesa/drivers/dri/i915tex/Makefile
@@ -51,9 +51,10 @@ DRIVER_SOURCES = \
intel_tris.c \
intel_fbo.c \
intel_depthstencil.c \
- ws_dri_batchpool.c \
ws_dri_drmpool.c \
ws_dri_mallocpool.c \
+ ws_dri_slabpool.c \
+ ws_dri_fencemgr.c \
ws_dri_bufmgr.c
C_SOURCES = \
diff --git a/src/mesa/drivers/dri/i915tex/intel_batchbuffer.c b/src/mesa/drivers/dri/i915tex/intel_batchbuffer.c
index 5343aa3c11..6b54bcfeac 100644
--- a/src/mesa/drivers/dri/i915tex/intel_batchbuffer.c
+++ b/src/mesa/drivers/dri/i915tex/intel_batchbuffer.c
@@ -70,9 +70,9 @@ intel_batchbuffer_reset(struct intel_batchbuffer *batch)
* Add the batchbuffer to the validate list.
*/
- driBOAddListItem(batch->list, batch->buffer,
- DRM_BO_FLAG_WRITE | DRM_BO_FLAG_MEM_TT,
- DRM_BO_FLAG_WRITE | DRM_BO_MASK_MEM,
+ driBOAddListItem(batch->list, batch->buffer,
+ DRM_BO_FLAG_EXE | DRM_BO_FLAG_MEM_TT,
+ DRM_BO_FLAG_EXE | DRM_BO_MASK_MEM,
&batch->dest_location, &batch->node);
req = &batch->node->bo_arg.d.req.bo_req;
@@ -138,8 +138,7 @@ intel_batchbuffer_free(struct intel_batchbuffer *batch)
if (batch->last_fence) {
driFenceFinish(batch->last_fence,
DRM_FENCE_TYPE_EXE, GL_FALSE);
- driFenceUnReference(batch->last_fence);
- batch->last_fence = NULL;
+ driFenceUnReference(&batch->last_fence);
}
if (batch->map) {
driBOUnmap(batch->buffer);
@@ -325,7 +324,7 @@ do_flush_locked(struct intel_batchbuffer *batch,
*/
if (batch->last_fence)
- driFenceUnReference(batch->last_fence);
+ driFenceUnReference(&batch->last_fence);
batch->last_fence = NULL;
return NULL;
}
@@ -336,12 +335,12 @@ do_flush_locked(struct intel_batchbuffer *batch,
fence.flags = ea.fence_arg.flags;
fence.signaled = ea.fence_arg.signaled;
- fo = driBOFenceUserList(batch->intel->driFd, batch->list,
+ fo = driBOFenceUserList(batch->intel->intelScreen->mgr, batch->list,
"SuperFence", &fence);
if (driFenceType(fo) & DRM_I915_FENCE_TYPE_RW) {
if (batch->last_fence)
- driFenceUnReference(batch->last_fence);
+ driFenceUnReference(&batch->last_fence);
/*
* FIXME: Context last fence??
*/
@@ -406,7 +405,7 @@ intel_batchbuffer_finish(struct intel_batchbuffer *batch)
{
struct _DriFenceObject *fence = intel_batchbuffer_flush(batch);
driFenceFinish(fence, driFenceType(fence), GL_FALSE);
- driFenceUnReference(fence);
+ driFenceUnReference(&fence);
}
void
diff --git a/src/mesa/drivers/dri/i915tex/intel_batchbuffer.h b/src/mesa/drivers/dri/i915tex/intel_batchbuffer.h
index b437e94645..9e4b8043bf 100644
--- a/src/mesa/drivers/dri/i915tex/intel_batchbuffer.h
+++ b/src/mesa/drivers/dri/i915tex/intel_batchbuffer.h
@@ -100,10 +100,14 @@ static INLINE void
intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
GLuint sz, GLuint flags)
{
+ struct _DriFenceObject *fence;
+
assert(sz < batch->size - 8);
if (intel_batchbuffer_space(batch) < sz ||
- (batch->flags != 0 && flags != 0 && batch->flags != flags))
- driFenceUnReference(intel_batchbuffer_flush(batch));
+ (batch->flags != 0 && flags != 0 && batch->flags != flags)) {
+ fence = intel_batchbuffer_flush(batch);
+ driFenceUnReference(&fence);
+ }
batch->flags |= flags;
}
diff --git a/src/mesa/drivers/dri/i915tex/intel_blit.c b/src/mesa/drivers/dri/i915tex/intel_blit.c
index 0401449137..3ae51a8fb8 100644
--- a/src/mesa/drivers/dri/i915tex/intel_blit.c
+++ b/src/mesa/drivers/dri/i915tex/intel_blit.c
@@ -68,8 +68,7 @@ intelCopyBuffer(const __DRIdrawablePrivate * dPriv,
if (intel->last_swap_fence) {
driFenceFinish(intel->last_swap_fence, DRM_FENCE_TYPE_EXE, GL_TRUE);
- driFenceUnReference(intel->last_swap_fence);
- intel->last_swap_fence = NULL;
+ driFenceUnReference(&intel->last_swap_fence);
}
intel->last_swap_fence = intel->second_swap_fence;
intel->second_swap_fence = intel->first_swap_fence;
@@ -154,7 +153,7 @@ intelCopyBuffer(const __DRIdrawablePrivate * dPriv,
}
if (intel->first_swap_fence)
- driFenceUnReference(intel->first_swap_fence);
+ driFenceUnReference(&intel->first_swap_fence);
intel->first_swap_fence = intel_batchbuffer_flush(intel->batch);
}
@@ -380,6 +379,7 @@ intelClearWithBlit(GLcontext * ctx, GLbitfield mask)
GLint cx, cy, cw, ch;
drm_clip_rect_t clear;
int i;
+ struct _DriFenceObject *fence;
/* Get clear bounds after locking */
cx = fb->_Xmin;
@@ -509,7 +509,8 @@ intelClearWithBlit(GLcontext * ctx, GLbitfield mask)
}
}
}
- driFenceUnReference(intel_batchbuffer_flush(intel->batch));
+ fence = intel_batchbuffer_flush(intel->batch);
+ driFenceUnReference(&fence);
}
UNLOCK_HARDWARE(intel);
diff --git a/src/mesa/drivers/dri/i915tex/intel_buffer_objects.c b/src/mesa/drivers/dri/i915tex/intel_buffer_objects.c
index e59ffe5493..7aa19b5a4e 100644
--- a/src/mesa/drivers/dri/i915tex/intel_buffer_objects.c
+++ b/src/mesa/drivers/dri/i915tex/intel_buffer_objects.c
@@ -57,7 +57,7 @@ intel_bufferobj_select(struct intel_context *intel,
/*
* Pixel buffer objects.
*/
- obj->pool = intel->intelScreen->regionPool;
+ obj->pool = intel->intelScreen->drmPool;
switch(usage) {
/*
* Enable when performance-tested more thoroughly, and / or when
diff --git a/src/mesa/drivers/dri/i915tex/intel_buffers.c b/src/mesa/drivers/dri/i915tex/intel_buffers.c
index 2e4726537f..fc902e53b4 100644
--- a/src/mesa/drivers/dri/i915tex/intel_buffers.c
+++ b/src/mesa/drivers/dri/i915tex/intel_buffers.c
@@ -375,6 +375,7 @@ intelClearWithTris(struct intel_context *intel, GLbitfield mask)
GLcontext *ctx = &intel->ctx;
struct gl_framebuffer *fb = ctx->DrawBuffer;
drm_clip_rect_t clear;
+ struct _DriFenceObject *fence;
if (INTEL_DEBUG & DEBUG_BLIT)
_mesa_printf("%s 0x%x\n", __FUNCTION__, mask);
@@ -467,7 +468,8 @@ intelClearWithTris(struct intel_context *intel, GLbitfield mask)
}
intel->vtbl.leave_meta_state(intel);
- driFenceUnReference(intel_batchbuffer_flush(intel->batch));
+ fence = intel_batchbuffer_flush(intel->batch);
+ driFenceUnReference(&fence);
}
UNLOCK_HARDWARE(intel);
}
@@ -684,6 +686,7 @@ intelScheduleSwap(const __DRIdrawablePrivate * dPriv, GLboolean *missed_target)
unsigned int target;
drm_i915_vblank_swap_t swap;
GLboolean ret;
+ struct _DriFenceObject *fence;
if (!intel_fb->vblank_flags ||
(intel_fb->vblank_flags & VBLANK_FLAG_NO_IRQ) ||
@@ -707,7 +710,8 @@ intelScheduleSwap(const __DRIdrawablePrivate * dPriv, GLboolean *missed_target)
LOCK_HARDWARE(intel);
- driFenceUnReference(intel_batchbuffer_flush(intel->batch));
+ fence = intel_batchbuffer_flush(intel->batch);
+ driFenceUnReference(&fence);
if ( intel_fb->pf_active ) {
swap.seqtype |= DRM_VBLANK_FLIP;
diff --git a/src/mesa/drivers/dri/i915tex/intel_context.c b/src/mesa/drivers/dri/i915tex/intel_context.c
index 301c5d5811..0f842f582d 100644
--- a/src/mesa/drivers/dri/i915tex/intel_context.c
+++ b/src/mesa/drivers/dri/i915tex/intel_context.c
@@ -264,14 +264,17 @@ void
intelFlush(GLcontext * ctx)
{
struct intel_context *intel = intel_context(ctx);
+ struct _DriFenceObject *fence;
if (intel->Fallback)
_swrast_flush(ctx);
INTEL_FIREVERTICES(intel);
- if (intel->batch->map != intel->batch->ptr)
- driFenceUnReference(intel_batchbuffer_flush(intel->batch));
+ if (intel->batch->map != intel->batch->ptr) {
+ fence = intel_batchbuffer_flush(intel->batch);
+ driFenceUnReference(&fence);
+ }
/* XXX: Need to do an MI_FLUSH here.
*/
@@ -310,10 +313,9 @@ intelFinish(GLcontext * ctx)
struct intel_context *intel = intel_context(ctx);
intelFlush(ctx);
if (intel->batch->last_fence) {
- driFenceFinish(intel->batch->last_fence,
+ driFenceFinish(intel->batch->last_fence,
driFenceType(intel->batch->last_fence), GL_FALSE);
- driFenceUnReference(intel->batch->last_fence);
- intel->batch->last_fence = NULL;
+ driFenceUnReference(&intel->batch->last_fence);
}
intelCheckFrontRotate(ctx);
}
@@ -354,15 +356,7 @@ intelInitContext(struct intel_context *intel,
drmI830Sarea *saPriv = (drmI830Sarea *)
(((GLubyte *) sPriv->pSAREA) + intelScreen->sarea_priv_offset);
int fthrottle_mode;
- GLboolean havePools;
-
- DRM_LIGHT_LOCK(sPriv->fd, &sPriv->pSAREA->lock, driContextPriv->hHWContext);
- havePools = intelCreatePools(intelScreen);
- DRM_UNLOCK(sPriv->fd, &sPriv->pSAREA->lock, driContextPriv->hHWContext);
- if (!havePools)
- return GL_FALSE;
-
if (!_mesa_initialize_context(&intel->ctx,
mesaVis, shareCtx,
functions, (void *) intel))
@@ -530,13 +524,11 @@ intelDestroyContext(__DRIcontextPrivate * driContextPriv)
if (intel->last_swap_fence) {
driFenceFinish(intel->last_swap_fence, DRM_FENCE_TYPE_EXE, GL_TRUE);
- driFenceUnReference(intel->last_swap_fence);
- intel->last_swap_fence = NULL;
+ driFenceUnReference(&intel->last_swap_fence);
}
if (intel->first_swap_fence) {
driFenceFinish(intel->first_swap_fence, DRM_FENCE_TYPE_EXE, GL_TRUE);
- driFenceUnReference(intel->first_swap_fence);
- intel->first_swap_fence = NULL;
+ driFenceUnReference(&intel->first_swap_fence);
}
@@ -677,6 +669,7 @@ intelContendedLock(struct intel_context *intel, GLuint flags)
sarea->height != intel->height ||
sarea->rotation != intel->current_rotation) {
int numClipRects = intel->numClipRects;
+ struct _DriFenceObject *fence;
/*
* FIXME: Really only need to do this when drawing to a
@@ -693,8 +686,10 @@ intelContendedLock(struct intel_context *intel, GLuint flags)
INTEL_FIREVERTICES(intel);
- if (intel->batch->map != intel->batch->ptr)
- driFenceUnReference(intel_batchbuffer_flush(intel->batch));
+ if (intel->batch->map != intel->batch->ptr) {
+ fence = intel_batchbuffer_flush(intel->batch);
+ driFenceUnReference(&fence);
+ }
intel->numClipRects = numClipRects;
diff --git a/src/mesa/drivers/dri/i915tex/intel_pixel_copy.c b/src/mesa/drivers/dri/i915tex/intel_pixel_copy.c
index 16db625435..867857ef46 100644
--- a/src/mesa/drivers/dri/i915tex/intel_pixel_copy.c
+++ b/src/mesa/drivers/dri/i915tex/intel_pixel_copy.c
@@ -114,8 +114,9 @@ do_texture_copypixels(GLcontext * ctx,
struct intel_region *src = copypix_src_region(intel, type);
GLenum src_format;
GLenum src_type;
+ struct _DriFenceObject *fence;
- DBG("%s %d,%d %dx%d --> %d,%d\n", __FUNCTION__,
+ DBG("%s %d,%d %dx%d --> %d,%d\n", __FUNCTION__,
srcx, srcy, width, height, dstx, dsty);
if (!src || !dst || type != GL_COLOR)
@@ -230,7 +231,8 @@ do_texture_copypixels(GLcontext * ctx,
out:
intel->vtbl.leave_meta_state(intel);
- driFenceUnReference(intel_batchbuffer_flush(intel->batch));
+ fence = intel_batchbuffer_flush(intel->batch);
+ driFenceUnReference(&fence);
}
UNLOCK_HARDWARE(intel);
@@ -254,6 +256,7 @@ do_blit_copypixels(GLcontext * ctx,
struct intel_context *intel = intel_context(ctx);
struct intel_region *dst = intel_drawbuf_region(intel);
struct intel_region *src = copypix_src_region(intel, type);
+ struct _DriFenceObject *fence;
/* Copypixels can be more than a straight copy. Ensure all the
* extra operations are disabled:
@@ -353,7 +356,8 @@ do_blit_copypixels(GLcontext * ctx,
}
out:
- driFenceUnReference(intel_batchbuffer_flush(intel->batch));
+ fence = intel_batchbuffer_flush(intel->batch);
+ driFenceUnReference(&fence);
}
UNLOCK_HARDWARE(intel);
diff --git a/src/mesa/drivers/dri/i915tex/intel_pixel_draw.c b/src/mesa/drivers/dri/i915tex/intel_pixel_draw.c
index 959498f4f8..65c1442fcd 100644
--- a/src/mesa/drivers/dri/i915tex/intel_pixel_draw.c
+++ b/src/mesa/drivers/dri/i915tex/intel_pixel_draw.c
@@ -59,6 +59,7 @@ do_texture_drawpixels(GLcontext * ctx,
struct intel_buffer_object *src = intel_buffer_object(unpack->BufferObj);
GLuint rowLength = unpack->RowLength ? unpack->RowLength : width;
GLuint src_offset;
+ struct _DriFenceObject *fence;
if (INTEL_DEBUG & DEBUG_PIXEL)
fprintf(stderr, "%s\n", __FUNCTION__);
@@ -179,7 +180,8 @@ do_texture_drawpixels(GLcontext * ctx,
srcx, srcx + width, srcy + height, srcy);
out:
intel->vtbl.leave_meta_state(intel);
- driFenceUnReference(intel_batchbuffer_flush(intel->batch));
+ fence = intel_batchbuffer_flush(intel->batch);
+ driFenceUnReference(&fence);
}
UNLOCK_HARDWARE(intel);
return GL_TRUE;
@@ -330,7 +332,7 @@ do_blit_drawpixels(GLcontext * ctx,
if (fence) {
driFenceFinish(fence, driFenceType(fence), GL_FALSE);
- driFenceUnReference(fence);
+ driFenceUnReference(&fence);
}
if (INTEL_DEBUG & DEBUG_PIXEL)
diff --git a/src/mesa/drivers/dri/i915tex/intel_pixel_read.c b/src/mesa/drivers/dri/i915tex/intel_pixel_read.c
index 8ebe93a433..28a7a6c4cc 100644
--- a/src/mesa/drivers/dri/i915tex/intel_pixel_read.c
+++ b/src/mesa/drivers/dri/i915tex/intel_pixel_read.c
@@ -282,7 +282,7 @@ do_blit_readpixels(GLcontext * ctx,
if (fence) {
driFenceFinish(fence, driFenceType(fence), GL_FALSE);
- driFenceUnReference(fence);
+ driFenceUnReference(&fence);
}
if (INTEL_DEBUG & DEBUG_PIXEL)
diff --git a/src/mesa/drivers/dri/i915tex/intel_regions.c b/src/mesa/drivers/dri/i915tex/intel_regions.c
index 29a33a002f..fc580bd410 100644
--- a/src/mesa/drivers/dri/i915tex/intel_regions.c
+++ b/src/mesa/drivers/dri/i915tex/intel_regions.c
@@ -98,8 +98,8 @@ intel_region_alloc(intelScreenPrivate *intelScreen,
region->height = height; /* needed? */
region->refcount = 1;
- driGenBuffers(intelScreen->regionPool,
- "region", 1, &region->buffer, 64, 0, 0);
+ driGenBuffers(intelScreen->surfacePool,
+ "surface", 1, &region->buffer, 64, 0, 0);
driBOData(region->buffer, pitch * cpp * height, NULL, NULL, 0);
return region;
}
@@ -164,7 +164,7 @@ intel_region_alloc_by_ref(intelScreenPrivate *intelScreen,
region->pitch = pitch;
region->height = height;
- driGenBuffers(intelScreen->regionPool,
+ driGenBuffers(intelScreen->drmPool,
name, 1, &region->buffer, 64,
0, 0);
driBOSetReferenced(region->buffer, handle);
@@ -369,7 +369,7 @@ intel_region_release_pbo(intelScreenPrivate *intelScreen,
driBOUnReference(region->buffer);
region->buffer = NULL;
- driGenBuffers(intelScreen->regionPool,
+ driGenBuffers(intelScreen->surfacePool,
"region", 1, &region->buffer, 64, 0, 0);
driBOData(region->buffer,
@@ -384,6 +384,7 @@ intel_region_cow(intelScreenPrivate *intelScreen, struct intel_region *region)
{
struct intel_context *intel = intelScreenContext(intelScreen);
struct intel_buffer_object *pbo = region->pbo;
+ struct _DriFenceObject *fence;
if (intel == NULL)
return;
@@ -397,7 +398,8 @@ intel_region_cow(intelScreenPrivate *intelScreen, struct intel_region *region)
/* Now blit from the texture buffer to the new buffer:
*/
- driFenceUnReference(intel_batchbuffer_flush(intel->batch));
+ fence = intel_batchbuffer_flush(intel->batch);
+ driFenceUnReference(&fence);
if (!intel->locked) {
LOCK_HARDWARE(intel);
@@ -411,7 +413,8 @@ intel_region_cow(intelScreenPrivate *intelScreen, struct intel_region *region)
region->pitch, region->height,
GL_COPY);
- driFenceUnReference(intel_batchbuffer_flush(intel->batch));
+ fence = intel_batchbuffer_flush(intel->batch);
+ driFenceUnReference(&fence);
UNLOCK_HARDWARE(intel);
}
else {
@@ -425,7 +428,8 @@ intel_region_cow(intelScreenPrivate *intelScreen, struct intel_region *region)
region->pitch, region->height,
GL_COPY);
- driFenceUnReference(intel_batchbuffer_flush(intel->batch));
+ fence = intel_batchbuffer_flush(intel->batch);
+ driFenceUnReference(&fence);
}
}
diff --git a/src/mesa/drivers/dri/i915tex/intel_screen.c b/src/mesa/drivers/dri/i915tex/intel_screen.c
index 13c7b97a34..8e16f8dd39 100644
--- a/src/mesa/drivers/dri/i915tex/intel_screen.c
+++ b/src/mesa/drivers/dri/i915tex/intel_screen.c
@@ -330,45 +330,76 @@ intelUpdateScreenFromSAREA(intelScreenPrivate * intelScreen,
intelPrintSAREA(sarea);
}
-GLboolean
+static GLboolean
intelCreatePools(intelScreenPrivate *intelScreen)
{
- unsigned batchPoolSize = 1024*1024;
__DRIscreenPrivate * sPriv = intelScreen->driScrnPriv;
- if (intelScreen->havePools)
- return GL_TRUE;
-
- batchPoolSize /= intelScreen->maxBatchSize;
- intelScreen->regionPool = driDRMPoolInit(sPriv->fd);
+ intelScreen->drmPool = driDRMPoolInit(sPriv->fd);
+ if (!intelScreen->drmPool) {
+ _mesa_printf("Failed creating DRM buffer object pool.\n");
+ return GL_FALSE;
+ }
- if (!intelScreen->regionPool)
- return GL_FALSE;
+ /*
+ * Create 6 size buckets with sizes
+ * 64, 128, 256, 512, 1024, 2048 bytes.
+ * Make slabs one page in size. Larger buffers will have a
+ * unique drmBO of their own.
+ */
- intelScreen->texPool = intelScreen->regionPool;
+ intelScreen->surfacePool = driSlabPoolInit(sPriv->fd,
+ DRM_BO_FLAG_READ |
+ DRM_BO_FLAG_WRITE |
+ DRM_BO_FLAG_MEM_TT,
+ DRM_BO_FLAG_READ |
+ DRM_BO_FLAG_WRITE |
+ DRM_BO_FLAG_MEM_TT,
+ 64, 6, 16, 4096, 0,
+ intelScreen->fMan);
+ if (!intelScreen->surfacePool) {
+ _mesa_printf("Failed creating Surface buffer object pool.\n");
+ goto out_err0;
+ }
+
+ /*
+ * Create a single size bucket. Buffer sizes are
+ * intelScreen->maxBatchSize. Slab sizes are 256 kB. This will
+ * waste on average 128kB per application, which should be rather OK.
+ * Smaller sizes will have a CPU impact due to more frequent
+ * slab allocation and freeing. Could be retested with linux
+ * 2.6.25 and upwards.
+ */
- intelScreen->batchPool = driBatchPoolInit(sPriv->fd,
- DRM_BO_FLAG_EXE |
- DRM_BO_FLAG_MEM_TT |
- DRM_BO_FLAG_MEM_LOCAL,
- intelScreen->maxBatchSize,
- batchPoolSize, 5, 0,
- "Batch Pool");
+ intelScreen->batchPool = driSlabPoolInit(sPriv->fd,
+ DRM_BO_FLAG_EXE |
+ DRM_BO_FLAG_MEM_TT,
+ DRM_BO_FLAG_EXE |
+ DRM_BO_FLAG_MEM_TT,
+ intelScreen->maxBatchSize,
+ 1, 40, 16*16384, 0,
+ intelScreen->fMan);
if (!intelScreen->batchPool) {
- fprintf(stderr, "Failed to initialize batch pool.\n");
- return GL_FALSE;
+ _mesa_printf("Failed creating Batch buffer object pool.\n");
+ goto out_err1;
}
intelScreen->mallocPool = driMallocPoolInit();
if (!intelScreen->mallocPool) {
- fprintf(stderr, "Failed to initialize malloc pool.\n");
- return GL_FALSE;
+ _mesa_printf("Failed creating Vertex buffer object pool.\n");
+ goto out_err2;
}
-
- intel_recreate_static_regions(intelScreen);
- intelScreen->havePools = GL_TRUE;
+ intel_recreate_static_regions(intelScreen);
return GL_TRUE;
+
+ out_err2:
+ driPoolTakeDown(intelScreen->batchPool);
+ out_err1:
+ driPoolTakeDown(intelScreen->surfacePool);
+ out_err0:
+ driPoolTakeDown(intelScreen->drmPool);
+ return GL_FALSE;
}
@@ -483,6 +514,20 @@ intelInitDriver(__DRIscreenPrivate * sPriv)
(*glx_enable_extension) (psc, "GLX_SGI_make_current_read");
}
+ intelScreen->mgr = driFenceMgrTTMInit(sPriv->fd);
+ if (!intelScreen->mgr) {
+ fprintf(stderr, "Failed to create fence manager.\n");
+ return GL_FALSE;
+ }
+ intelScreen->fMan = driInitFreeSlabManager(10, 10);
+ if (!intelScreen->mgr) {
+ fprintf(stderr, "Failed to create free slab manager.\n");
+ return GL_FALSE;
+ }
+
+ if (!intelCreatePools(intelScreen))
+ return GL_FALSE;
+
return GL_TRUE;
}
@@ -494,10 +539,13 @@ intelDestroyScreen(__DRIscreenPrivate * sPriv)
intelUnmapScreenRegions(intelScreen);
- if (intelScreen->havePools) {
- driPoolTakeDown(intelScreen->regionPool);
- driPoolTakeDown(intelScreen->batchPool);
- }
+ driPoolTakeDown(intelScreen->mallocPool);
+ driPoolTakeDown(intelScreen->batchPool);
+ driPoolTakeDown(intelScreen->surfacePool);
+ driPoolTakeDown(intelScreen->drmPool);
+
+ driFinishFreeSlabManager(intelScreen->fMan);
+ driFenceMgrUnReference(&intelScreen->mgr);
FREE(intelScreen);
sPriv->private = NULL;
}
diff --git a/src/mesa/drivers/dri/i915tex/intel_screen.h b/src/mesa/drivers/dri/i915tex/intel_screen.h
index 182091cb20..c2971a2918 100644
--- a/src/mesa/drivers/dri/i915tex/intel_screen.h
+++ b/src/mesa/drivers/dri/i915tex/intel_screen.h
@@ -84,12 +84,14 @@ typedef struct
driOptionCache optionCache;
struct _DriBufferPool *batchPool;
- struct _DriBufferPool *texPool;
- struct _DriBufferPool *regionPool;
+ struct _DriBufferPool *surfacePool;
+ struct _DriBufferPool *drmPool;
struct _DriBufferPool *mallocPool;
unsigned int maxBatchSize;
- GLboolean havePools;
unsigned batch_id;
+
+ struct _DriFenceMgr *mgr;
+ struct _DriFreeSlabManager *fMan;
} intelScreenPrivate;
@@ -116,18 +118,9 @@ extern void intelSwapBuffers(__DRIdrawablePrivate * dPriv);
extern void
intelCopySubBuffer(__DRIdrawablePrivate * dPriv, int x, int y, int w, int h);
-extern struct _DriBufferPool *driBatchPoolInit(int fd, uint64_t flags,
- unsigned long bufSize,
- unsigned numBufs,
- unsigned checkDelayed,
- unsigned pageAlignment,
- const char *name);
-extern struct _DriBufferPool *driMallocPoolInit(void);
extern struct intel_context *intelScreenContext(intelScreenPrivate *intelScreen);
extern void
intelUpdateScreenRotation(__DRIscreenPrivate * sPriv, drmI830Sarea * sarea);
-extern GLboolean
-intelCreatePools(intelScreenPrivate *intelScreen);
#endif
diff --git a/src/mesa/drivers/dri/i915tex/intel_tex_copy.c b/src/mesa/drivers/dri/i915tex/intel_tex_copy.c
index fbd5823c2b..0ca716e1e3 100644
--- a/src/mesa/drivers/dri/i915tex/intel_tex_copy.c
+++ b/src/mesa/drivers/dri/i915tex/intel_tex_copy.c
@@ -109,6 +109,7 @@ do_copy_texsubimage(struct intel_context *intel,
const GLint orig_x = x;
const GLint orig_y = y;
const struct gl_framebuffer *fb = ctx->DrawBuffer;
+ struct _DriFenceObject *fence;
if (_mesa_clip_to_region(fb->_Xmin, fb->_Ymin, fb->_Xmax, fb->_Ymax,
&x, &y, &width, &height)) {
@@ -148,7 +149,8 @@ do_copy_texsubimage(struct intel_context *intel,
x, y + height, dstx, dsty, width, height,
GL_COPY); /* ? */
- driFenceUnReference(intel_batchbuffer_flush(intel->batch));
+ fence = intel_batchbuffer_flush(intel->batch);
+ driFenceUnReference(&fence);
}
}
diff --git a/src/mesa/drivers/dri/i915tex/intel_tex_image.c b/src/mesa/drivers/dri/i915tex/intel_tex_image.c
index 306ac8e9fb..0109de790f 100644
--- a/src/mesa/drivers/dri/i915tex/intel_tex_image.c
+++ b/src/mesa/drivers/dri/i915tex/intel_tex_image.c
@@ -226,7 +226,7 @@ try_pbo_upload(struct intel_context *intel,
struct _DriBufferObject *dst_buffer =
intel_region_buffer(intel->intelScreen, intelImage->mt->region,
INTEL_WRITE_FULL);
-
+ struct _DriFenceObject *fence;
intelEmitCopyBlit(intel,
intelImage->mt->cpp,
@@ -235,7 +235,8 @@ try_pbo_upload(struct intel_context *intel,
0, 0, 0, 0, width, height,
GL_COPY);
- driFenceUnReference(intel_batchbuffer_flush(intel->batch));
+ fence = intel_batchbuffer_flush(intel->batch);
+ driFenceUnReference(&fence);
}
UNLOCK_HARDWARE(intel);
diff --git a/src/mesa/drivers/dri/i915tex/intel_tex_validate.c b/src/mesa/drivers/dri/i915tex/intel_tex_validate.c
index 261a29bd96..1eaad2b608 100644
--- a/src/mesa/drivers/dri/i915tex/intel_tex_validate.c
+++ b/src/mesa/drivers/dri/i915tex/intel_tex_validate.c
@@ -111,10 +111,9 @@ intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
GLuint face, i;
GLuint nr_faces = 0;
struct intel_texture_image *firstImage;
-
GLboolean need_flush = GL_FALSE;
- /* We know/require this is true by now:
+ /* We know/require this is true by now:
*/
assert(intelObj->base.Complete);
@@ -211,8 +210,10 @@ intel_finalize_mipmap_tree(struct intel_context *intel, GLuint unit)
}
}
- if (need_flush)
- driFenceUnReference(intel_batchbuffer_flush(intel->batch));
+ if (need_flush) {
+ struct _DriFenceObject *fence = intel_batchbuffer_flush(intel->batch);
+ driFenceUnReference(&fence);
+ }
return GL_TRUE;
}
diff --git a/src/mesa/drivers/dri/i915tex/intel_tris.c b/src/mesa/drivers/dri/i915tex/intel_tris.c
index 7fdea90ec5..738170fd33 100644
--- a/src/mesa/drivers/dri/i915tex/intel_tris.c
+++ b/src/mesa/drivers/dri/i915tex/intel_tris.c
@@ -96,7 +96,9 @@ intelStartInlinePrimitive(struct intel_context *intel,
* preamble.
*/
if (intel_batchbuffer_space(intel->batch) < 100) {
- driFenceUnReference(intel_batchbuffer_flush(intel->batch));
+ struct _DriFenceObject *fence;
+ fence = intel_batchbuffer_flush(intel->batch);
+ driFenceUnReference(&fence);
intel->vtbl.emit_state(intel);
}
@@ -129,9 +131,11 @@ intelWrapInlinePrimitive(struct intel_context *intel)
{
GLuint prim = intel->prim.primitive;
GLuint batchflags = intel->batch->flags;
+ struct _DriFenceObject *fence;
intel_flush_inline_primitive(intel);
- driFenceUnReference(intel_batchbuffer_flush(intel->batch));
+ fence = intel_batchbuffer_flush(intel->batch);
+ driFenceUnReference(&fence);
intelStartInlinePrimitive(intel, prim, batchflags); /* ??? */
}
diff --git a/src/mesa/drivers/dri/i915tex/ws_dri_batchpool.c b/src/mesa/drivers/dri/i915tex/ws_dri_batchpool.c
deleted file mode 100644
index 03e9e93462..0000000000
--- a/src/mesa/drivers/dri/i915tex/ws_dri_batchpool.c
+++ /dev/null
@@ -1,457 +0,0 @@
-/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
- */
-
-#include <xf86drm.h>
-#include <stdlib.h>
-#include <errno.h>
-#include "imports.h"
-#include "glthread.h"
-#include "ws_dri_bufpool.h"
-#include "ws_dri_bufmgr.h"
-#include "intel_screen.h"
-
-typedef struct
-{
- drmMMListHead head;
- struct _BPool *parent;
- struct _DriFenceObject *fence;
- unsigned long start;
- int unfenced;
- int mapped;
-} BBuf;
-
-typedef struct _BPool
-{
- _glthread_Mutex mutex;
- unsigned long bufSize;
- unsigned poolSize;
- unsigned numFree;
- unsigned numTot;
- unsigned numDelayed;
- unsigned checkDelayed;
- unsigned fenceType;
- drmMMListHead free;
- drmMMListHead delayed;
- drmMMListHead head;
- drmBO kernelBO;
- void *virtual;
- BBuf *bufs;
- const char *name;
-} BPool;
-
-
-static BPool *
-createBPool(int fd, unsigned long bufSize, unsigned numBufs, uint64_t flags,
- unsigned checkDelayed, unsigned pageAlignment, const char *name)
-{
- BPool *p = (BPool *) malloc(sizeof(*p));
- BBuf *buf;
- int i;
-
- if (!p)
- return NULL;
-
- p->bufs = (BBuf *) malloc(numBufs * sizeof(*p->bufs));
- if (!p->bufs) {
- free(p);
- return NULL;
- }
-
- DRMINITLISTHEAD(&p->free);
- DRMINITLISTHEAD(&p->head);
- DRMINITLISTHEAD(&p->delayed);
-
- p->numTot = numBufs;
- p->numFree = numBufs;
- p->bufSize = bufSize;
- p->numDelayed = 0;
- p->checkDelayed = checkDelayed;
- p->name = name;
-
- _glthread_INIT_MUTEX(p->mutex);
-
- if (drmBOCreate(fd, numBufs * bufSize, pageAlignment, NULL,
- flags, DRM_BO_HINT_DONT_FENCE, &p->kernelBO)) {
- free(p->bufs);
- free(p);
- return NULL;
- }
- if (drmBOMap(fd, &p->kernelBO, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0,
- &p->virtual)) {
- drmBOUnreference(fd, &p->kernelBO);
- free(p->bufs);
- free(p);
- return NULL;
- }
-
- /*
- * We unmap the buffer so that we can validate it later. Note that this is
- * just a synchronizing operation. The buffer will have a virtual mapping
- * until it is destroyed.
- */
-
- drmBOUnmap(fd, &p->kernelBO);
- p->fenceType = p->kernelBO.fenceFlags;
-
- buf = p->bufs;
- for (i = 0; i < numBufs; ++i) {
- buf->parent = p;
- buf->fence = NULL;
- buf->start = i * bufSize;
- buf->mapped = 0;
- buf->unfenced = 0;
- DRMLISTADDTAIL(&buf->head, &p->free);
- buf++;
- }
-
- return p;
-}
-
-
-static void
-pool_checkFree(BPool * p, int wait)
-{
- drmMMListHead *list, *prev;
- BBuf *buf;
- int signaled = 0;
- int i;
-
- list = p->delayed.next;
-
- /* Only examine the oldest 1/3 of delayed buffers:
- */
- if (p->numDelayed > 3) {
- for (i = 0; i < p->numDelayed; i += 3) {
- list = list->next;
- }
- }
-
- prev = list->prev;
- for (; list != &p->delayed; list = prev, prev = list->prev) {
-
- buf = DRMLISTENTRY(BBuf, list, head);
-
- if (!signaled) {
- if (wait) {
- driFenceFinish(buf->fence, p->kernelBO.fenceFlags, 0);
- signaled = 1;
- }
- else {
- signaled = driFenceSignaled(buf->fence, p->kernelBO.fenceFlags);
- }
- }
-
- if (!signaled)
- break;
-
- driFenceUnReference(buf->fence);
- buf->fence = NULL;
- DRMLISTDEL(list);
- p->numDelayed--;
- DRMLISTADD(list, &p->free);
- p->numFree++;
- }
-}
-
-static void *
-pool_create(struct _DriBufferPool *pool,
- unsigned long size, uint64_t flags, unsigned hint,
- unsigned alignment)
-{
- BPool *p = (BPool *) pool->data;
-
- drmMMListHead *item;
-
- if (alignment && (alignment != 4096))
- return NULL;
-
- _glthread_LOCK_MUTEX(p->mutex);
-
- if (size > p->bufSize) {
- _mesa_printf( "Requested size %lu, but fixed buffer size is %lu.\n",
- size, p->bufSize);
- _glthread_UNLOCK_MUTEX(p->mutex);
- return NULL;
- }
-
- if (p->numFree == 0) {
- pool_checkFree(p, GL_TRUE);
- }
-
- if (p->numFree == 0) {
- _mesa_printf( "Out of fixed size buffer objects: %s\n", p->name);
- _glthread_UNLOCK_MUTEX(p->mutex);
- return NULL;
- }
-
- item = p->free.next;
-
- if (item == &p->free) {
- _mesa_printf( "Fixed size buffer pool corruption\n");
- abort();
- }
-
-
- DRMLISTDEL(item);
- --p->numFree;
-
- _glthread_UNLOCK_MUTEX(p->mutex);
- return (void *) DRMLISTENTRY(BBuf, item, head);
-}
-
-
-static int
-pool_destroy(struct _DriBufferPool *pool, void *private)
-{
- BBuf *buf = (BBuf *) private;
- BPool *p = buf->parent;
-
- _glthread_LOCK_MUTEX(p->mutex);
-
- if (buf->fence) {
- DRMLISTADDTAIL(&buf->head, &p->delayed);
- p->numDelayed++;
- }
- else {
- buf->unfenced = 0;
- DRMLISTADD(&buf->head, &p->free);
- p->numFree++;
- }
-
- if ((p->numDelayed % p->checkDelayed) == 0)
- pool_checkFree(p, 0);
-
- _glthread_UNLOCK_MUTEX(p->mutex);
- return 0;
-}
-
-static int
-pool_waitIdle(struct _DriBufferPool *pool, void *private, int lazy)
-{
- BBuf *buf = (BBuf *) private;
- BPool *p = (BPool *) pool->data;
-
- driFenceFinish(buf->fence, p->kernelBO.fenceFlags, lazy);
- driFenceUnReference(buf->fence);
- buf->fence = NULL;
-
- return 0;
-}
-
-static int
-pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
- int hint, void **virtual)
-{
-
- BBuf *buf = (BBuf *) private;
- BPool *p = buf->parent;
-
-
- /*
- * Currently Mesa doesn't have any condition variables to resolve this
- * cleanly in a multithreading environment.
- * We bail out instead.
- */
-
- if (buf->mapped) {
- _mesa_printf( "Trying to map already mapped buffer object\n");
- BM_CKFATAL(-EINVAL);
- }
-
- if (buf->fence) {
- if (hint & DRM_BO_HINT_DONT_BLOCK)
- return -EBUSY;
- else
- pool_waitIdle(pool, private, 0);
- }
-
- buf->mapped = GL_TRUE;
- _glthread_LOCK_MUTEX(p->mutex);
- *virtual = (unsigned char *) p->virtual + buf->start;
- _glthread_UNLOCK_MUTEX(p->mutex);
- return 0;
-}
-
-static int
-pool_unmap(struct _DriBufferPool *pool, void *private)
-{
- BBuf *buf = (BBuf *) private;
-
- buf->mapped = 0;
- return 0;
-}
-
-static unsigned long
-pool_offset(struct _DriBufferPool *pool, void *private)
-{
- BBuf *buf = (BBuf *) private;
- BPool *p = buf->parent;
- unsigned long offset;
-
- driReadLockKernelBO();
- assert(p->kernelBO.flags & DRM_BO_FLAG_NO_MOVE);
- offset = p->kernelBO.offset + buf->start;
- driReadUnlockKernelBO();
-
- return offset;
-}
-
-static unsigned long
-pool_poolOffset(struct _DriBufferPool *pool, void *private)
-{
- BBuf *buf = (BBuf *) private;
-
- return buf->start;
-}
-
-static uint64_t
-pool_flags(struct _DriBufferPool *pool, void *private)
-{
- BPool *p = (BPool *) pool->data;
- uint64_t flags;
-
- driReadLockKernelBO();
- flags = p->kernelBO.flags;
- driReadUnlockKernelBO();
-
- return flags;
-}
-
-static unsigned long
-pool_size(struct _DriBufferPool *pool, void *private)
-{
- BPool *p = (BPool *) pool->data;
-
- return p->bufSize;
-}
-
-
-static int
-pool_fence(struct _DriBufferPool *pool, void *private,
- struct _DriFenceObject *fence)
-{
- BBuf *buf = (BBuf *) private;
- BPool *p = buf->parent;
-
- _glthread_LOCK_MUTEX(p->mutex);
- if (buf->fence) {
- driFenceUnReference(buf->fence);
- }
- buf->fence = fence;
- buf->unfenced = 0;
- driFenceReference(buf->fence);
- _glthread_UNLOCK_MUTEX(p->mutex);
-
- return 0;
-}
-
-static drmBO *
-pool_kernel(struct _DriBufferPool *pool, void *private)
-{
- BBuf *buf = (BBuf *) private;
- BPool *p = buf->parent;
-
- return &p->kernelBO;
-}
-
-static int
-pool_validate(struct _DriBufferPool *pool, void *private)
-{
- BBuf *buf = (BBuf *) private;
- BPool *p = buf->parent;
- _glthread_LOCK_MUTEX(p->mutex);
- buf->unfenced = GL_TRUE;
- _glthread_UNLOCK_MUTEX(p->mutex);
- return 0;
-}
-
-static void
-pool_takedown(struct _DriBufferPool *pool)
-{
- BPool *p = (BPool *) pool->data;
-
- /*
- * Wait on outstanding fences.
- */
-
- _glthread_LOCK_MUTEX(p->mutex);
- while ((p->numFree < p->numTot) && p->numDelayed) {
- _glthread_UNLOCK_MUTEX(p->mutex);
- sched_yield();
- pool_checkFree(p, GL_TRUE);
- _glthread_LOCK_MUTEX(p->mutex);
- }
-
- driReadLockKernelBO();
- drmBOUnreference(pool->fd, &p->kernelBO);
- driReadUnlockKernelBO();
-
- free(p->bufs);
- _glthread_UNLOCK_MUTEX(p->mutex);
- free(p);
- free(pool);
-}
-
-
-struct _DriBufferPool *
-driBatchPoolInit(int fd, uint64_t flags,
- unsigned long bufSize,
- unsigned numBufs, unsigned checkDelayed,
- unsigned pageAlignment,
- const char *name)
-{
- struct _DriBufferPool *pool;
-
- pool = (struct _DriBufferPool *) malloc(sizeof(*pool));
- if (!pool)
- return NULL;
-
- pool->data = createBPool(fd, bufSize, numBufs, flags, checkDelayed,
- pageAlignment, name);
- if (!pool->data)
- return NULL;
-
- pool->fd = fd;
- pool->map = &pool_map;
- pool->unmap = &pool_unmap;
- pool->destroy = &pool_destroy;
- pool->offset = &pool_offset;
- pool->poolOffset = &pool_poolOffset;
- pool->flags = &pool_flags;
- pool->size = &pool_size;
- pool->create = &pool_create;
- pool->fence = &pool_fence;
- pool->kernel = &pool_kernel;
- pool->validate = &pool_validate;
- pool->waitIdle = &pool_waitIdle;
- pool->takeDown = &pool_takedown;
- return pool;
-}
diff --git a/src/mesa/drivers/dri/i915tex/ws_dri_bufmgr.c b/src/mesa/drivers/dri/i915tex/ws_dri_bufmgr.c
index 1826b37d0e..97d7f3b7c7 100644
--- a/src/mesa/drivers/dri/i915tex/ws_dri_bufmgr.c
+++ b/src/mesa/drivers/dri/i915tex/ws_dri_bufmgr.c
@@ -38,6 +38,7 @@
#include "string.h"
#include "imports.h"
#include "ws_dri_bufpool.h"
+#include "ws_dri_fencemgr.h"
/*
* This lock is here to protect drmBO structs changing underneath us during a
@@ -275,15 +276,6 @@ void driReadUnlockKernelBO(void)
* buffer object pools.
*/
-typedef struct _DriFenceObject
-{
- int fd;
- _glthread_Mutex mutex;
- int refCount;
- const char *name;
- drmFence fence;
-} DriFenceObject;
-
typedef struct _DriBufferObject
{
DriBufferPool *pool;
@@ -318,92 +310,19 @@ bmError(int val, const char *file, const char *function, int line)
#endif
}
-unsigned
-driFenceType(DriFenceObject * fence)
-{
- unsigned ret;
-
- _glthread_LOCK_MUTEX(fence->mutex);
- ret = fence->fence.type;
- _glthread_UNLOCK_MUTEX(fence->mutex);
-
- return ret;
-}
-
-
-DriFenceObject *
-driFenceReference(DriFenceObject * fence)
-{
- _glthread_LOCK_MUTEX(fence->mutex);
- ++fence->refCount;
-
- _glthread_UNLOCK_MUTEX(fence->mutex);
-
- return fence;
-}
-
-void
-driFenceUnReference(DriFenceObject * fence)
-{
- if (!fence)
- return;
-
- _glthread_LOCK_MUTEX(fence->mutex);
- if (--fence->refCount == 0) {
-
- /*
- * At this point nothing can be waiting on the fence mutex.
- * However, unlock it before destroying.
- */
-
- _glthread_UNLOCK_MUTEX(fence->mutex);
- drmFenceUnreference(fence->fd, &fence->fence);
- free(fence);
- return;
- }
- _glthread_UNLOCK_MUTEX(fence->mutex);
-}
-
-int
-driFenceFinish(DriFenceObject * fence, unsigned type, int lazy)
-{
- int ret;
- unsigned flags = (lazy) ? DRM_FENCE_FLAG_WAIT_LAZY : 0;
-
- _glthread_LOCK_MUTEX(fence->mutex);
- ret = drmFenceWait(fence->fd, flags, &fence->fence, type);
- _glthread_UNLOCK_MUTEX(fence->mutex);
- return ret;
-}
-
-int
-driFenceSignaled(DriFenceObject * fence, unsigned type)
-{
- int signaled;
- int ret;
-
- if (fence == NULL)
- return GL_TRUE;
-
- _glthread_LOCK_MUTEX(fence->mutex);
- ret = drmFenceSignaled(fence->fd, &fence->fence, type, &signaled);
- _glthread_UNLOCK_MUTEX(fence->mutex);
- BM_CKFATAL(ret);
- return signaled;
-}
-
-
extern drmBO *
driBOKernel(struct _DriBufferObject *buf)
{
drmBO *ret;
+ driReadLockKernelBO();
_glthread_LOCK_MUTEX(buf->mutex);
assert(buf->private != NULL);
ret = buf->pool->kernel(buf->pool, buf->private);
if (!ret)
BM_CKFATAL(-EINVAL);
_glthread_UNLOCK_MUTEX(buf->mutex);
+ driReadUnlockKernelBO();
return ret;
}
@@ -428,11 +347,6 @@ driBOMap(struct _DriBufferObject *buf, unsigned flags, unsigned hint)
void *virtual;
int retval;
- /*
- * This function may block. Is it sane to keep the mutex held during
- * that time??
- */
-
_glthread_LOCK_MUTEX(buf->mutex);
assert(buf->private != NULL);
retval = buf->pool->map(buf->pool, buf->private, flags, hint, &virtual);
@@ -483,9 +397,11 @@ driBOFlags(struct _DriBufferObject *buf)
assert(buf->private != NULL);
+ driReadLockKernelBO();
_glthread_LOCK_MUTEX(buf->mutex);
ret = buf->pool->flags(buf->pool, buf->private);
_glthread_UNLOCK_MUTEX(buf->mutex);
+ driReadUnlockKernelBO();
return ret;
}
@@ -572,10 +488,9 @@ driBOData(struct _DriBufferObject *buf,
if (buf->private)
buf->pool->destroy(buf->pool, buf->private);
- buf->pool = newPool;
pool = newPool;
-
- buf->private = pool->create(pool, size, flags, DRM_BO_HINT_DONT_FENCE,
+ buf->pool = newPool;
+ buf->private = pool->create(pool, size, flags, DRM_BO_HINT_DONT_FENCE,
buf->alignment);
if (!buf->private)
retval = -ENOMEM;
@@ -918,24 +833,17 @@ driBOUnrefUserList(struct _DriBufferList *list)
}
struct _DriFenceObject *
-driBOFenceUserList(int fd, struct _DriBufferList *list, const char *name,
+driBOFenceUserList(struct _DriFenceMgr *mgr,
+ struct _DriBufferList *list, const char *name,
drmFence *kFence)
{
- DriFenceObject *fence = (DriFenceObject *) malloc(sizeof(*fence));
+ struct _DriFenceObject *fence;
struct _DriBufferObject *buf;
void *curBuf;
- if (!fence)
- BM_CKFATAL(-EINVAL);
-
- fence->refCount = 1;
- fence->name = name;
- fence->fd = fd;
- fence->fence = *kFence;
-
- _glthread_INIT_MUTEX(fence->mutex);
-
- curBuf = drmBOListIterator(&list->driBuffers);
+ fence = driFenceCreate(mgr, kFence->fence_class, kFence->type,
+ kFence, sizeof(*kFence));
+ curBuf = drmBOListIterator(&list->driBuffers);
/*
* User-space fencing callbacks.
diff --git a/src/mesa/drivers/dri/i915tex/ws_dri_bufmgr.h b/src/mesa/drivers/dri/i915tex/ws_dri_bufmgr.h
index 2dba177263..0941c11cea 100644
--- a/src/mesa/drivers/dri/i915tex/ws_dri_bufmgr.h
+++ b/src/mesa/drivers/dri/i915tex/ws_dri_bufmgr.h
@@ -34,6 +34,7 @@
#define _PSB_BUFMGR_H_
#include <xf86mm.h>
#include "i915_drm.h"
+#include "ws_dri_fencemgr.h"
typedef struct _drmBONode
{
@@ -58,16 +59,6 @@ struct _DriBufferObject;
struct _DriBufferPool;
struct _DriBufferList;
-extern struct _DriFenceObject *driFenceReference(struct _DriFenceObject *fence);
-
-extern void driFenceUnReference(struct _DriFenceObject *fence);
-
-extern int
-driFenceFinish(struct _DriFenceObject *fence, unsigned type, int lazy);
-
-extern int driFenceSignaled(struct _DriFenceObject *fence, unsigned type);
-extern unsigned driFenceType(struct _DriFenceObject *fence);
-
/*
* Return a pointer to the libdrm buffer object this DriBufferObject
* uses.
@@ -110,7 +101,7 @@ extern void driBOAddListItem(struct _DriBufferList * list,
extern void driBOValidateList(int fd, struct _DriBufferList * list);
extern void driBOFreeList(struct _DriBufferList * list);
-extern struct _DriFenceObject *driBOFenceUserList(int fd,
+extern struct _DriFenceObject *driBOFenceUserList(struct _DriFenceMgr *mgr,
struct _DriBufferList *list,
const char *name,
drmFence *kFence);
diff --git a/src/mesa/drivers/dri/i915tex/ws_dri_bufpool.h b/src/mesa/drivers/dri/i915tex/ws_dri_bufpool.h
index 07b3cd2939..0b8744febf 100644
--- a/src/mesa/drivers/dri/i915tex/ws_dri_bufpool.h
+++ b/src/mesa/drivers/dri/i915tex/ws_dri_bufpool.h
@@ -81,6 +81,20 @@ extern void bmError(int val, const char *file, const char *function,
*/
extern struct _DriBufferPool *driDRMPoolInit(int fd);
-extern struct _DriBufferPool *driDRMStaticPoolInit(int fd);
+extern struct _DriBufferPool *driMallocPoolInit(void);
+
+struct _DriFreeSlabManager;
+extern struct _DriBufferPool * driSlabPoolInit(int fd, uint64_t flags,
+ uint64_t validMask,
+ uint32_t smallestSize,
+ uint32_t numSizes,
+ uint32_t desiredNumBuffers,
+ uint32_t maxSlabSize,
+ uint32_t pageAlignment,
+ struct _DriFreeSlabManager *fMan);
+extern void driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan);
+extern struct _DriFreeSlabManager *
+driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec);
+
#endif
diff --git a/src/mesa/drivers/dri/i915tex/ws_dri_fencemgr.c b/src/mesa/drivers/dri/i915tex/ws_dri_fencemgr.c
new file mode 100644
index 0000000000..8aaef1c620
--- /dev/null
+++ b/src/mesa/drivers/dri/i915tex/ws_dri_fencemgr.c
@@ -0,0 +1,372 @@
+#include "ws_dri_fencemgr.h"
+#include "glthread.h"
+#include <xf86mm.h>
+#include <string.h>
+#include <unistd.h>
+
+/*
+ * Note: Locking order is
+ * _DriFenceObject::mutex
+ * _DriFenceMgr::mutex
+ */
+
+struct _DriFenceMgr {
+ /*
+ * Constant members. Need no mutex protection.
+ */
+ struct _DriFenceMgrCreateInfo info;
+ void *private;
+
+ /*
+ * These members are protected by this->mutex
+ */
+ _glthread_Mutex mutex;
+ int refCount;
+ drmMMListHead *heads;
+};
+
+struct _DriFenceObject {
+
+ /*
+ * These members are constant and need no mutex protection.
+ */
+ struct _DriFenceMgr *mgr;
+ uint32_t fence_class;
+ uint32_t fence_type;
+
+ /*
+ * These members are protected by mgr->mutex.
+ */
+ drmMMListHead head;
+ int refCount;
+
+ /*
+ * These members are protected by this->mutex.
+ */
+ _glthread_Mutex mutex;
+ uint32_t signaled_type;
+ void *private;
+};
+
+uint32_t
+driFenceType(struct _DriFenceObject *fence)
+{
+ return fence->fence_type;
+}
+
+struct _DriFenceMgr *
+driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info)
+{
+ struct _DriFenceMgr *tmp;
+ uint32_t i;
+
+ tmp = calloc(1, sizeof(*tmp));
+ if (!tmp)
+ return NULL;
+
+ _glthread_INIT_MUTEX(tmp->mutex);
+ _glthread_LOCK_MUTEX(tmp->mutex);
+ tmp->refCount = 1;
+ tmp->info = *info;
+ tmp->heads = calloc(tmp->info.num_classes, sizeof(*tmp->heads));
+ if (!tmp->heads)
+ goto out_err;
+
+ for (i=0; i<tmp->info.num_classes; ++i) {
+ DRMINITLISTHEAD(&tmp->heads[i]);
+ }
+ _glthread_UNLOCK_MUTEX(tmp->mutex);
+ return tmp;
+
+ out_err:
+ if (tmp)
+ free(tmp);
+ return NULL;
+}
+
+static void
+driFenceMgrUnrefUnlock(struct _DriFenceMgr **pMgr)
+{
+ struct _DriFenceMgr *mgr = *pMgr;
+
+ *pMgr = NULL;
+ if (--mgr->refCount == 0)
+ free(mgr);
+ else
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+}
+
+void
+driFenceMgrUnReference(struct _DriFenceMgr **pMgr)
+{
+ _glthread_LOCK_MUTEX((*pMgr)->mutex);
+ driFenceMgrUnrefUnlock(pMgr);
+}
+
+static void
+driFenceUnReferenceLocked(struct _DriFenceObject **pFence)
+{
+ struct _DriFenceObject *fence = *pFence;
+ struct _DriFenceMgr *mgr = fence->mgr;
+
+ *pFence = NULL;
+ if (--fence->refCount == 0) {
+ DRMLISTDELINIT(&fence->head);
+ if (fence->private)
+ mgr->info.unreference(mgr, &fence->private);
+ fence->mgr = NULL;
+ --mgr->refCount;
+ free(fence);
+ }
+}
+
+
+static void
+driSignalPreviousFencesLocked(struct _DriFenceMgr *mgr,
+ drmMMListHead *list,
+ uint32_t fence_class,
+ uint32_t fence_type)
+{
+ struct _DriFenceObject *entry;
+ drmMMListHead *prev;
+
+ while(list != &mgr->heads[fence_class]) {
+ entry = DRMLISTENTRY(struct _DriFenceObject, list, head);
+
+ /*
+ * Up refcount so that entry doesn't disappear from under us
+ * when we unlock-relock mgr to get the correct locking order.
+ */
+
+ ++entry->refCount;
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+ _glthread_LOCK_MUTEX(entry->mutex);
+ _glthread_LOCK_MUTEX(mgr->mutex);
+
+ prev = list->prev;
+
+
+
+ if (list->prev == list) {
+
+ /*
+ * Somebody else removed the entry from the list.
+ */
+
+ _glthread_UNLOCK_MUTEX(entry->mutex);
+ driFenceUnReferenceLocked(&entry);
+ return;
+ }
+
+ entry->signaled_type |= (fence_type & entry->fence_type);
+ if (entry->signaled_type == entry->fence_type) {
+ DRMLISTDELINIT(list);
+ mgr->info.unreference(mgr, &entry->private);
+ }
+ _glthread_UNLOCK_MUTEX(entry->mutex);
+ driFenceUnReferenceLocked(&entry);
+ list = prev;
+ }
+}
+
+
+int
+driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type,
+ int lazy_hint)
+{
+ struct _DriFenceMgr *mgr = fence->mgr;
+ int ret = 0;
+
+ _glthread_LOCK_MUTEX(fence->mutex);
+
+ if ((fence->signaled_type & fence_type) == fence_type)
+ goto out0;
+
+ ret = mgr->info.finish(mgr, fence->private, fence_type, lazy_hint);
+ if (ret)
+ goto out0;
+
+ _glthread_LOCK_MUTEX(mgr->mutex);
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+
+ driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class,
+ fence_type);
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+ return 0;
+
+ out0:
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+ return ret;
+}
+
+uint32_t driFenceSignaledTypeCached(struct _DriFenceObject *fence)
+{
+ uint32_t ret;
+
+ _glthread_LOCK_MUTEX(fence->mutex);
+ ret = fence->signaled_type;
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+
+ return ret;
+}
+
+int
+driFenceSignaledType(struct _DriFenceObject *fence, uint32_t flush_type,
+ uint32_t *signaled)
+{
+ int ret = 0;
+ struct _DriFenceMgr *mgr;
+
+ _glthread_LOCK_MUTEX(fence->mutex);
+ mgr = fence->mgr;
+ *signaled = fence->signaled_type;
+ if ((fence->signaled_type & flush_type) == flush_type)
+ goto out0;
+
+ ret = mgr->info.signaled(mgr, fence->private, flush_type, signaled);
+ if (ret) {
+ *signaled = fence->signaled_type;
+ goto out0;
+ }
+
+ if ((fence->signaled_type | *signaled) == fence->signaled_type)
+ goto out0;
+
+ _glthread_LOCK_MUTEX(mgr->mutex);
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+
+ driSignalPreviousFencesLocked(mgr, &fence->head, fence->fence_class,
+ *signaled);
+
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+ return 0;
+ out0:
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+ return ret;
+}
+
+struct _DriFenceObject *
+driFenceReference(struct _DriFenceObject *fence)
+{
+ _glthread_LOCK_MUTEX(fence->mgr->mutex);
+ ++fence->refCount;
+ _glthread_UNLOCK_MUTEX(fence->mgr->mutex);
+ return fence;
+}
+
+void
+driFenceUnReference(struct _DriFenceObject **pFence)
+{
+ struct _DriFenceMgr *mgr;
+
+ if (*pFence == NULL)
+ return;
+
+ mgr = (*pFence)->mgr;
+ _glthread_LOCK_MUTEX(mgr->mutex);
+ ++mgr->refCount;
+ driFenceUnReferenceLocked(pFence);
+ driFenceMgrUnrefUnlock(&mgr);
+}
+
+struct _DriFenceObject
+*driFenceCreate(struct _DriFenceMgr *mgr, uint32_t fence_class,
+ uint32_t fence_type, void *private, size_t private_size)
+{
+ struct _DriFenceObject *fence;
+ size_t fence_size = sizeof(*fence);
+
+ if (private_size)
+ fence_size = ((fence_size + 15) & ~15);
+
+ fence = calloc(1, fence_size + private_size);
+
+ if (!fence) {
+ int ret = mgr->info.finish(mgr, private, fence_type, 0);
+
+ if (ret)
+ usleep(10000000);
+
+ return NULL;
+ }
+
+ _glthread_INIT_MUTEX(fence->mutex);
+ _glthread_LOCK_MUTEX(fence->mutex);
+ _glthread_LOCK_MUTEX(mgr->mutex);
+ fence->refCount = 1;
+ DRMLISTADDTAIL(&fence->head, &mgr->heads[fence_class]);
+ fence->mgr = mgr;
+ ++mgr->refCount;
+ _glthread_UNLOCK_MUTEX(mgr->mutex);
+ fence->fence_class = fence_class;
+ fence->fence_type = fence_type;
+ fence->signaled_type = 0;
+ fence->private = private;
+ if (private_size) {
+ fence->private = (void *)(((uint8_t *) fence) + fence_size);
+ memcpy(fence->private, private, private_size);
+ }
+
+ _glthread_UNLOCK_MUTEX(fence->mutex);
+ return fence;
+}
+
+
+static int
+tSignaled(struct _DriFenceMgr *mgr, void *private, uint32_t flush_type,
+ uint32_t *signaled_type)
+{
+ long fd = (long) mgr->private;
+ int dummy;
+ drmFence *fence = (drmFence *) private;
+ int ret;
+
+ *signaled_type = 0;
+ ret = drmFenceSignaled((int) fd, fence, flush_type, &dummy);
+ if (ret)
+ return ret;
+
+ *signaled_type = fence->signaled;
+
+ return 0;
+}
+
+static int
+tFinish(struct _DriFenceMgr *mgr, void *private, uint32_t fence_type,
+ int lazy_hint)
+{
+ long fd = (long) mgr->private;
+ unsigned flags = lazy_hint ? DRM_FENCE_FLAG_WAIT_LAZY : 0;
+
+ return drmFenceWait((int)fd, flags, (drmFence *) private, fence_type);
+}
+
+static int
+tUnref(struct _DriFenceMgr *mgr, void **private)
+{
+ long fd = (long) mgr->private;
+ drmFence *fence = (drmFence *) *private;
+ *private = NULL;
+
+ return drmFenceUnreference(fd, fence);
+}
+
+struct _DriFenceMgr *driFenceMgrTTMInit(int fd)
+{
+ struct _DriFenceMgrCreateInfo info;
+ struct _DriFenceMgr *mgr;
+
+ info.flags = DRI_FENCE_CLASS_ORDERED;
+ info.num_classes = 4;
+ info.signaled = tSignaled;
+ info.finish = tFinish;
+ info.unreference = tUnref;
+
+ mgr = driFenceMgrCreate(&info);
+ if (mgr == NULL)
+ return NULL;
+
+ mgr->private = (void *) (long) fd;
+ return mgr;
+}
+
diff --git a/src/mesa/drivers/dri/i915tex/ws_dri_fencemgr.h b/src/mesa/drivers/dri/i915tex/ws_dri_fencemgr.h
new file mode 100644
index 0000000000..4ea58dfe18
--- /dev/null
+++ b/src/mesa/drivers/dri/i915tex/ws_dri_fencemgr.h
@@ -0,0 +1,115 @@
+#ifndef DRI_FENCEMGR_H
+#define DRI_FENCEMGR_H
+
+#include <stdint.h>
+#include <stdlib.h>
+
+struct _DriFenceObject;
+struct _DriFenceMgr;
+
+/*
+ * Do a quick check to see if the fence manager has registered the fence
+ * object as signaled. Note that this function may return a false negative
+ * answer.
+ */
+extern uint32_t driFenceSignaledTypeCached(struct _DriFenceObject *fence);
+
+/*
+ * Check if the fence object is signaled. This function can be substantially
+ * more expensive to call than the above function, but will not return a false
+ * negative answer. The argument "flush_type" sets the types that the
+ * underlying mechanism must make sure will eventually signal.
+ */
+extern int driFenceSignaledType(struct _DriFenceObject *fence,
+ uint32_t flush_type, uint32_t *signaled);
+
+/*
+ * Convenience functions.
+ */
+
+static inline int driFenceSignaled(struct _DriFenceObject *fence,
+ uint32_t flush_type)
+{
+ uint32_t signaled_types;
+ int ret = driFenceSignaledType(fence, flush_type, &signaled_types);
+ if (ret)
+ return 0;
+ return ((signaled_types & flush_type) == flush_type);
+}
+
+static inline int driFenceSignaledCached(struct _DriFenceObject *fence,
+ uint32_t flush_type)
+{
+ uint32_t signaled_types =
+ driFenceSignaledTypeCached(fence);
+
+ return ((signaled_types & flush_type) == flush_type);
+}
+
+/*
+ * Reference a fence object.
+ */
+extern struct _DriFenceObject *driFenceReference(struct _DriFenceObject *fence);
+
+/*
+ * Unreference a fence object. The fence object pointer will be reset to NULL.
+ */
+
+extern void driFenceUnReference(struct _DriFenceObject **pFence);
+
+
+/*
+ * Wait for a fence to signal the indicated fence_type.
+ * If "lazy_hint" is true, it indicates that the wait may sleep to avoid
+ * busy-wait polling.
+ */
+extern int driFenceFinish(struct _DriFenceObject *fence, uint32_t fence_type,
+ int lazy_hint);
+
+/*
+ * Create a DriFenceObject for manager "mgr".
+ *
+ * "private" is a pointer that should be used for the callbacks in
+ * struct _DriFenceMgrCreateInfo.
+ *
+ * if private_size is nonzero, then the info stored at *private, with size
+ * private size will be copied and the fence manager will instead use a
+ * pointer to the copied data for the callbacks in
+ * struct _DriFenceMgrCreateInfo. In that case, the object pointed to by
+ * "private" may be destroyed after the call to driFenceCreate.
+ */
+extern struct _DriFenceObject *driFenceCreate(struct _DriFenceMgr *mgr,
+ uint32_t fence_class,
+ uint32_t fence_type,
+ void *private,
+ size_t private_size);
+
+extern uint32_t driFenceType(struct _DriFenceObject *fence);
+
+/*
+ * Fence creations are ordered. If a fence signals a fence_type,
+ * it is safe to assume that all fences of the same class that was
+ * created before that fence has signaled the same type.
+ */
+
+#define DRI_FENCE_CLASS_ORDERED (1 << 0)
+
+struct _DriFenceMgrCreateInfo {
+ uint32_t flags;
+ uint32_t num_classes;
+ int (*signaled) (struct _DriFenceMgr *mgr, void *private, uint32_t flush_type,
+ uint32_t *signaled_type);
+ int (*finish) (struct _DriFenceMgr *mgr, void *private, uint32_t fence_type, int lazy_hint);
+ int (*unreference) (struct _DriFenceMgr *mgr, void **private);
+};
+
+extern struct _DriFenceMgr *
+driFenceMgrCreate(const struct _DriFenceMgrCreateInfo *info);
+
+void
+driFenceMgrUnReference(struct _DriFenceMgr **pMgr);
+
+extern struct _DriFenceMgr *
+driFenceMgrTTMInit(int fd);
+
+#endif
diff --git a/src/mesa/drivers/dri/i915tex/ws_dri_slabpool.c b/src/mesa/drivers/dri/i915tex/ws_dri_slabpool.c
new file mode 100644
index 0000000000..c92b62a052
--- /dev/null
+++ b/src/mesa/drivers/dri/i915tex/ws_dri_slabpool.c
@@ -0,0 +1,916 @@
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include <stdint.h>
+#include <sys/time.h>
+#include <errno.h>
+#include <unistd.h>
+#include <assert.h>
+#include "ws_dri_bufpool.h"
+#include "ws_dri_fencemgr.h"
+#include "ws_dri_bufmgr.h"
+#include "glthread.h"
+
+#define DRI_SLABPOOL_ALLOC_RETRIES 100
+
+struct _DriSlab;
+
+struct _DriSlabBuffer {
+ int isSlabBuffer;
+ drmBO *bo;
+ struct _DriFenceObject *fence;
+ struct _DriSlab *parent;
+ drmMMListHead head;
+ uint32_t mapCount;
+ uint32_t start;
+ uint32_t fenceType;
+};
+
+struct _DriKernelBO {
+ int fd;
+ drmBO bo;
+ drmMMListHead timeoutHead;
+ drmMMListHead head;
+ struct timeval timeFreed;
+ uint32_t pageAlignment;
+ void *virtual;
+};
+
+struct _DriSlab{
+ drmMMListHead head;
+ drmMMListHead freeBuffers;
+ uint32_t numBuffers;
+ uint32_t numFree;
+ struct _DriSlabBuffer *buffers;
+ struct _DriSlabSizeHeader *header;
+ struct _DriKernelBO *kbo;
+};
+
+
+struct _DriSlabSizeHeader {
+ drmMMListHead slabs;
+ drmMMListHead freeSlabs;
+ drmMMListHead delayedBuffers;
+ uint32_t numDelayed;
+ struct _DriSlabPool *slabPool;
+ uint32_t bufSize;
+ _glthread_Mutex mutex;
+};
+
+struct _DriFreeSlabManager {
+ struct timeval slabTimeout;
+ struct timeval checkInterval;
+ struct timeval nextCheck;
+ drmMMListHead timeoutList;
+ drmMMListHead unCached;
+ drmMMListHead cached;
+ _glthread_Mutex mutex;
+};
+
+
+struct _DriSlabPool {
+
+ /*
+ * The data of this structure remains constant after
+ * initialization and thus needs no mutex protection.
+ */
+
+ struct _DriFreeSlabManager *fMan;
+ uint64_t proposedFlags;
+ uint64_t validMask;
+ uint32_t *bucketSizes;
+ uint32_t numBuckets;
+ uint32_t pageSize;
+ int fd;
+ int pageAlignment;
+ int maxSlabSize;
+ int desiredNumBuffers;
+ struct _DriSlabSizeHeader *headers;
+};
+
+/*
+ * FIXME: Perhaps arrange timeout slabs in size buckets for fast
+ * retreival??
+ */
+
+
+static inline int
+driTimeAfterEq(struct timeval *arg1, struct timeval *arg2)
+{
+ return ((arg1->tv_sec > arg2->tv_sec) ||
+ ((arg1->tv_sec == arg2->tv_sec) &&
+ (arg1->tv_usec > arg2->tv_usec)));
+}
+
+static inline void
+driTimeAdd(struct timeval *arg, struct timeval *add)
+{
+ unsigned int sec;
+
+ arg->tv_sec += add->tv_sec;
+ arg->tv_usec += add->tv_usec;
+ sec = arg->tv_usec / 1000000;
+ arg->tv_sec += sec;
+ arg->tv_usec -= sec*1000000;
+}
+
+static void
+driFreeKernelBO(struct _DriKernelBO *kbo)
+{
+ if (!kbo)
+ return;
+
+ (void) drmBOUnreference(kbo->fd, &kbo->bo);
+ free(kbo);
+}
+
+
+static void
+driFreeTimeoutKBOsLocked(struct _DriFreeSlabManager *fMan,
+ struct timeval *time)
+{
+ drmMMListHead *list, *next;
+ struct _DriKernelBO *kbo;
+
+ if (!driTimeAfterEq(time, &fMan->nextCheck))
+ return;
+
+ for (list = fMan->timeoutList.next, next = list->next;
+ list != &fMan->timeoutList;
+ list = next, next = list->next) {
+
+ kbo = DRMLISTENTRY(struct _DriKernelBO, list, timeoutHead);
+
+ if (!driTimeAfterEq(time, &kbo->timeFreed))
+ break;
+
+ DRMLISTDELINIT(&kbo->timeoutHead);
+ DRMLISTDELINIT(&kbo->head);
+ driFreeKernelBO(kbo);
+ }
+
+ fMan->nextCheck = *time;
+ driTimeAdd(&fMan->nextCheck, &fMan->checkInterval);
+}
+
+
+/*
+ * Add a _DriKernelBO to the free slab manager.
+ * This means that it is available for reuse, but if it's not
+ * reused in a while, it will be freed.
+ */
+
+static void
+driSetKernelBOFree(struct _DriFreeSlabManager *fMan,
+ struct _DriKernelBO *kbo)
+{
+ struct timeval time;
+
+ _glthread_LOCK_MUTEX(fMan->mutex);
+ gettimeofday(&time, NULL);
+ driTimeAdd(&time, &fMan->slabTimeout);
+
+ kbo->timeFreed = time;
+
+ if (kbo->bo.flags & DRM_BO_FLAG_CACHED)
+ DRMLISTADD(&kbo->head, &fMan->cached);
+ else
+ DRMLISTADD(&kbo->head, &fMan->unCached);
+
+ DRMLISTADDTAIL(&kbo->timeoutHead, &fMan->timeoutList);
+ driFreeTimeoutKBOsLocked(fMan, &time);
+
+ _glthread_UNLOCK_MUTEX(fMan->mutex);
+}
+
+/*
+ * Get a _DriKernelBO for us to use as storage for a slab.
+ *
+ */
+
+static struct _DriKernelBO *
+driAllocKernelBO(struct _DriSlabSizeHeader *header)
+
+{
+ struct _DriSlabPool *slabPool = header->slabPool;
+ struct _DriFreeSlabManager *fMan = slabPool->fMan;
+ drmMMListHead *list, *next, *head;
+ uint32_t size = header->bufSize * slabPool->desiredNumBuffers;
+ struct _DriKernelBO *kbo;
+ int ret;
+
+ /*
+ * FIXME: We should perhaps allow some variation in slabsize in order
+ * to efficiently reuse slabs.
+ */
+
+ size = (size <= slabPool->maxSlabSize) ? size : slabPool->maxSlabSize;
+ size = (size + slabPool->pageSize - 1) & ~(slabPool->pageSize - 1);
+ _glthread_LOCK_MUTEX(fMan->mutex);
+
+ kbo = NULL;
+
+ retry:
+ head = (slabPool->proposedFlags & DRM_BO_FLAG_CACHED) ?
+ &fMan->cached : &fMan->unCached;
+
+ for (list = head->next, next = list->next;
+ list != head;
+ list = next, next = list->next) {
+
+ kbo = DRMLISTENTRY(struct _DriKernelBO, list, head);
+
+ if ((kbo->bo.size == size) &&
+ (slabPool->pageAlignment == 0 ||
+ (kbo->pageAlignment % slabPool->pageAlignment) == 0)) {
+
+ DRMLISTDELINIT(&kbo->head);
+ DRMLISTDELINIT(&kbo->timeoutHead);
+ break;
+ }
+
+ kbo = NULL;
+ }
+
+ _glthread_UNLOCK_MUTEX(fMan->mutex);
+
+ if (kbo) {
+ ret = 0;
+ ret = drmBOSetStatus(kbo->fd, &kbo->bo, slabPool->proposedFlags,
+ (slabPool->proposedFlags ^ kbo->bo.flags),
+ DRM_BO_HINT_DONT_FENCE, 0, 0);
+ if (ret == 0)
+ return kbo;
+
+ driFreeKernelBO(kbo);
+ kbo = NULL;
+ goto retry;
+ }
+
+ kbo = calloc(1, sizeof(struct _DriKernelBO));
+ if (!kbo)
+ return NULL;
+
+ kbo->fd = slabPool->fd;
+ DRMINITLISTHEAD(&kbo->head);
+ DRMINITLISTHEAD(&kbo->timeoutHead);
+
+ ret = drmBOCreate(kbo->fd, size, slabPool->pageAlignment, NULL,
+ slabPool->proposedFlags,
+ DRM_BO_HINT_DONT_FENCE, &kbo->bo);
+ if (ret)
+ goto out_err0;
+
+ ret = drmBOMap(kbo->fd, &kbo->bo,
+ DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
+ 0, &kbo->virtual);
+
+ if (ret)
+ goto out_err1;
+
+ ret = drmBOUnmap(kbo->fd, &kbo->bo);
+ if (ret)
+ goto out_err1;
+
+ return kbo;
+
+ out_err1:
+ drmBOUnreference(kbo->fd, &kbo->bo);
+ out_err0:
+ free(kbo);
+ return NULL;
+}
+
+
+static int
+driAllocSlab(struct _DriSlabSizeHeader *header)
+{
+ struct _DriSlab *slab;
+ struct _DriSlabBuffer *buf;
+ uint32_t numBuffers;
+ int ret;
+ int i;
+
+ slab = calloc(1, sizeof(*slab));
+ if (!slab)
+ return -ENOMEM;
+
+ slab->kbo = driAllocKernelBO(header);
+ if (!slab->kbo) {
+ ret = -ENOMEM;
+ goto out_err0;
+ }
+
+ numBuffers = slab->kbo->bo.size / header->bufSize;
+
+ slab->buffers = calloc(numBuffers, sizeof(*slab->buffers));
+ if (!slab->buffers) {
+ ret = -ENOMEM;
+ goto out_err1;
+ }
+
+ DRMINITLISTHEAD(&slab->head);
+ DRMINITLISTHEAD(&slab->freeBuffers);
+ slab->numBuffers = numBuffers;
+ slab->numFree = 0;
+ slab->header = header;
+
+ buf = slab->buffers;
+ for (i=0; i < numBuffers; ++i) {
+ buf->parent = slab;
+ buf->start = i* header->bufSize;
+ buf->mapCount = 0;
+ buf->isSlabBuffer = 1;
+ DRMLISTADDTAIL(&buf->head, &slab->freeBuffers);
+ slab->numFree++;
+ buf++;
+ }
+
+ DRMLISTADDTAIL(&slab->head, &header->slabs);
+
+ return 0;
+
+ out_err1:
+ driSetKernelBOFree(header->slabPool->fMan, slab->kbo);
+ free(slab->buffers);
+ out_err0:
+ free(slab);
+ return ret;
+}
+
+/*
+ * Delete a buffer from the slab header delayed list and put
+ * it on the slab free list.
+ */
+
+static void
+driSlabFreeBufferLocked(struct _DriSlabBuffer *buf)
+{
+ struct _DriSlab *slab = buf->parent;
+ struct _DriSlabSizeHeader *header = slab->header;
+ drmMMListHead *list = &buf->head;
+
+ DRMLISTDEL(list);
+ DRMLISTADDTAIL(list, &slab->freeBuffers);
+ slab->numFree++;
+
+ if (slab->head.next == &slab->head)
+ DRMLISTADDTAIL(&slab->head, &header->slabs);
+
+ if (slab->numFree == slab->numBuffers) {
+ list = &slab->head;
+ DRMLISTDEL(list);
+ DRMLISTADDTAIL(list, &header->freeSlabs);
+ }
+
+ if (header->slabs.next == &header->slabs ||
+ slab->numFree != slab->numBuffers) {
+
+ drmMMListHead *next;
+ struct _DriFreeSlabManager *fMan = header->slabPool->fMan;
+
+ for (list = header->freeSlabs.next, next = list->next;
+ list != &header->freeSlabs;
+ list = next, next = list->next) {
+
+ slab = DRMLISTENTRY(struct _DriSlab, list, head);
+
+ DRMLISTDELINIT(list);
+ driSetKernelBOFree(fMan, slab->kbo);
+ free(slab->buffers);
+ free(slab);
+ }
+ }
+}
+
+static void
+driSlabCheckFreeLocked(struct _DriSlabSizeHeader *header, int wait)
+{
+ drmMMListHead *list, *prev;
+ struct _DriSlabBuffer *buf;
+ struct _DriSlab *slab;
+
+ int signaled = 0;
+ int i;
+ int ret;
+
+ list = header->delayedBuffers.next;
+
+ /* Only examine the oldest 1/3 of delayed buffers:
+ */
+ if (header->numDelayed > 3) {
+ for (i = 0; i < header->numDelayed; i += 3) {
+ list = list->next;
+ }
+ }
+
+ prev = list->prev;
+ for (; list != &header->delayedBuffers; list = prev, prev = list->prev) {
+ buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head);
+ slab = buf->parent;
+
+ if (!signaled) {
+ if (wait) {
+ ret = driFenceFinish(buf->fence, buf->fenceType, 0);
+ if (ret)
+ break;
+ signaled = 1;
+ } else {
+ signaled = driFenceSignaled(buf->fence, buf->fenceType);
+ }
+ if (signaled) {
+ driFenceUnReference(&buf->fence);
+ header->numDelayed--;
+ driSlabFreeBufferLocked(buf);
+ }
+ } else if (driFenceSignaledCached(buf->fence, buf->fenceType)) {
+ driFenceUnReference(&buf->fence);
+ header->numDelayed--;
+ driSlabFreeBufferLocked(buf);
+ }
+ }
+}
+
+
+static struct _DriSlabBuffer *
+driSlabAllocBuffer(struct _DriSlabSizeHeader *header)
+{
+ static struct _DriSlabBuffer *buf;
+ struct _DriSlab *slab;
+ drmMMListHead *list;
+ int count = DRI_SLABPOOL_ALLOC_RETRIES;
+
+ _glthread_LOCK_MUTEX(header->mutex);
+ while(header->slabs.next == &header->slabs && count > 0) {
+ driSlabCheckFreeLocked(header, 0);
+ if (header->slabs.next != &header->slabs)
+ break;
+
+ _glthread_UNLOCK_MUTEX(header->mutex);
+ if (count != DRI_SLABPOOL_ALLOC_RETRIES)
+ usleep(1);
+ (void) driAllocSlab(header);
+ _glthread_LOCK_MUTEX(header->mutex);
+ count--;
+ }
+
+ list = header->slabs.next;
+ if (list == &header->slabs) {
+ _glthread_UNLOCK_MUTEX(header->mutex);
+ return NULL;
+ }
+ slab = DRMLISTENTRY(struct _DriSlab, list, head);
+ if (--slab->numFree == 0)
+ DRMLISTDELINIT(list);
+
+ list = slab->freeBuffers.next;
+ DRMLISTDELINIT(list);
+
+ _glthread_UNLOCK_MUTEX(header->mutex);
+ buf = DRMLISTENTRY(struct _DriSlabBuffer, list, head);
+ return buf;
+}
+
+static void *
+pool_create(struct _DriBufferPool *driPool, unsigned long size,
+ uint64_t flags, unsigned hint, unsigned alignment)
+{
+ struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data;
+ struct _DriSlabSizeHeader *header;
+ struct _DriSlabBuffer *buf;
+ void *dummy;
+ int i;
+ int ret;
+
+ /*
+ * FIXME: Check for compatibility.
+ */
+
+ header = pool->headers;
+ for (i=0; i<pool->numBuckets; ++i) {
+ if (header->bufSize >= size)
+ break;
+ header++;
+ }
+
+ if (i < pool->numBuckets)
+ return driSlabAllocBuffer(header);
+
+
+ /*
+ * Fall back to allocate a buffer object directly from DRM.
+ * and wrap it in a driBO structure.
+ */
+
+
+ buf = calloc(1, sizeof(*buf));
+
+ if (!buf)
+ return NULL;
+
+ buf->bo = calloc(1, sizeof(*buf->bo));
+ if (!buf->bo)
+ goto out_err0;
+
+ if (alignment) {
+ if ((alignment < pool->pageSize) && (pool->pageSize % alignment))
+ goto out_err1;
+ if ((alignment > pool->pageSize) && (alignment % pool->pageSize))
+ goto out_err1;
+ }
+
+ ret = drmBOCreate(pool->fd, size, alignment / pool->pageSize, NULL,
+ flags, hint, buf->bo);
+ if (ret)
+ goto out_err1;
+
+ ret = drmBOMap(pool->fd, buf->bo, DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE,
+ 0, &dummy);
+ if (ret)
+ goto out_err2;
+
+ ret = drmBOUnmap(pool->fd, buf->bo);
+ if (ret)
+ goto out_err2;
+
+ return buf;
+ out_err2:
+ drmBOUnreference(pool->fd, buf->bo);
+ out_err1:
+ free(buf->bo);
+ out_err0:
+ free(buf);
+ return NULL;
+}
+
+static int
+pool_destroy(struct _DriBufferPool *driPool, void *private)
+{
+ struct _DriSlabBuffer *buf =
+ (struct _DriSlabBuffer *) private;
+ struct _DriSlab *slab;
+ struct _DriSlabSizeHeader *header;
+
+ if (!buf->isSlabBuffer) {
+ struct _DriSlabPool *pool = (struct _DriSlabPool *) driPool->data;
+ int ret;
+
+ ret = drmBOUnreference(pool->fd, buf->bo);
+ free(buf->bo);
+ free(buf);
+ return ret;
+ }
+
+ slab = buf->parent;
+ header = slab->header;
+
+ _glthread_LOCK_MUTEX(header->mutex);
+
+ if (buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType)) {
+ DRMLISTADDTAIL(&buf->head, &header->delayedBuffers);
+ header->numDelayed++;
+ } else {
+ driSlabFreeBufferLocked(buf);
+ }
+
+ _glthread_UNLOCK_MUTEX(header->mutex);
+ return 0;
+}
+
+static int
+pool_waitIdle(struct _DriBufferPool *driPool, void *private, int lazy)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+
+ if (!buf->fence)
+ return 0;
+ driFenceFinish(buf->fence, buf->fenceType, lazy);
+ driFenceUnReference(&buf->fence);
+
+ return 0;
+}
+
+static int
+pool_map(struct _DriBufferPool *pool, void *private, unsigned flags,
+ int hint, void **virtual)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+ int busy;
+
+ if (buf->isSlabBuffer)
+ busy = buf->fence && !driFenceSignaledCached(buf->fence, buf->fenceType);
+ else
+ busy = buf->fence && !driFenceSignaled(buf->fence, buf->fenceType);
+
+
+ if (busy) {
+ if (hint & DRM_BO_HINT_DONT_BLOCK)
+ return -EBUSY;
+ else
+ (void) pool_waitIdle(pool, private, 0);
+ }
+
+ ++buf->mapCount;
+ *virtual = (buf->isSlabBuffer) ?
+ (void *) ((uint8_t *) buf->parent->kbo->virtual + buf->start) :
+ (void *) buf->bo->virtual;
+
+ return 0;
+}
+
+static int
+pool_unmap(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+
+ --buf->mapCount;
+ return 0;
+}
+
+static unsigned long
+pool_offset(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+ struct _DriSlab *slab;
+ struct _DriSlabSizeHeader *header;
+
+ if (!buf->isSlabBuffer) {
+ assert(buf->bo->proposedFlags & DRM_BO_FLAG_NO_MOVE);
+ return buf->bo->offset;
+ }
+
+ slab = buf->parent;
+ header = slab->header;
+
+ (void) header;
+ assert(header->slabPool->proposedFlags & DRM_BO_FLAG_NO_MOVE);
+ return slab->kbo->bo.offset + buf->start;
+}
+
+static unsigned long
+pool_poolOffset(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+
+ return buf->start;
+}
+
+static uint64_t
+pool_flags(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+
+ if (!buf->isSlabBuffer)
+ return buf->bo->flags;
+
+ return buf->parent->kbo->bo.flags;
+}
+
+static unsigned long
+pool_size(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+ if (!buf->isSlabBuffer)
+ return buf->bo->size;
+
+ return buf->parent->header->bufSize;
+}
+
+static int
+pool_fence(struct _DriBufferPool *pool, void *private,
+ struct _DriFenceObject *fence)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+ drmBO *bo;
+
+ if (buf->fence)
+ driFenceUnReference(&buf->fence);
+
+ buf->fence = driFenceReference(fence);
+ bo = (buf->isSlabBuffer) ?
+ &buf->parent->kbo->bo:
+ buf->bo;
+ buf->fenceType = bo->fenceFlags;
+
+ return 0;
+}
+
+static drmBO *
+pool_kernel(struct _DriBufferPool *pool, void *private)
+{
+ struct _DriSlabBuffer *buf = (struct _DriSlabBuffer *) private;
+
+ return (buf->isSlabBuffer) ? &buf->parent->kbo->bo : buf->bo;
+}
+
+static int
+pool_validate(struct _DriBufferPool *pool, void *private)
+{
+ return 0;
+}
+
+
+struct _DriFreeSlabManager *
+driInitFreeSlabManager(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec)
+{
+ struct _DriFreeSlabManager *tmp;
+
+ tmp = calloc(1, sizeof(*tmp));
+ if (!tmp)
+ return NULL;
+
+ _glthread_INIT_MUTEX(tmp->mutex);
+ _glthread_LOCK_MUTEX(tmp->mutex);
+ tmp->slabTimeout.tv_usec = slabTimeoutMsec*1000;
+ tmp->slabTimeout.tv_sec = tmp->slabTimeout.tv_usec / 1000000;
+ tmp->slabTimeout.tv_usec -= tmp->slabTimeout.tv_sec*1000000;
+
+ tmp->checkInterval.tv_usec = checkIntervalMsec*1000;
+ tmp->checkInterval.tv_sec = tmp->checkInterval.tv_usec / 1000000;
+ tmp->checkInterval.tv_usec -= tmp->checkInterval.tv_sec*1000000;
+
+ gettimeofday(&tmp->nextCheck, NULL);
+ driTimeAdd(&tmp->nextCheck, &tmp->checkInterval);
+ DRMINITLISTHEAD(&tmp->timeoutList);
+ DRMINITLISTHEAD(&tmp->unCached);
+ DRMINITLISTHEAD(&tmp->cached);
+ _glthread_UNLOCK_MUTEX(tmp->mutex);
+
+ return tmp;
+}
+
+void
+driFinishFreeSlabManager(struct _DriFreeSlabManager *fMan)
+{
+ struct timeval time;
+
+ time = fMan->nextCheck;
+ driTimeAdd(&time, &fMan->checkInterval);
+
+ _glthread_LOCK_MUTEX(fMan->mutex);
+ driFreeTimeoutKBOsLocked(fMan, &time);
+ _glthread_UNLOCK_MUTEX(fMan->mutex);
+
+ assert(fMan->timeoutList.next == &fMan->timeoutList);
+ assert(fMan->unCached.next == &fMan->unCached);
+ assert(fMan->cached.next == &fMan->cached);
+
+ free(fMan);
+}
+
+static void
+driInitSizeHeader(struct _DriSlabPool *pool, uint32_t size,
+ struct _DriSlabSizeHeader *header)
+{
+ _glthread_INIT_MUTEX(header->mutex);
+ _glthread_LOCK_MUTEX(header->mutex);
+
+ DRMINITLISTHEAD(&header->slabs);
+ DRMINITLISTHEAD(&header->freeSlabs);
+ DRMINITLISTHEAD(&header->delayedBuffers);
+
+ header->numDelayed = 0;
+ header->slabPool = pool;
+ header->bufSize = size;
+
+ _glthread_UNLOCK_MUTEX(header->mutex);
+}
+
+static void
+driFinishSizeHeader(struct _DriSlabSizeHeader *header)
+{
+ drmMMListHead *list, *next;
+ struct _DriSlabBuffer *buf;
+
+ _glthread_LOCK_MUTEX(header->mutex);
+ for (list = header->delayedBuffers.next, next = list->next;
+ list != &header->delayedBuffers;
+ list = next, next = list->next) {
+
+ buf = DRMLISTENTRY(struct _DriSlabBuffer, list , head);
+ if (buf->fence) {
+ (void) driFenceFinish(buf->fence, buf->fenceType, 0);
+ driFenceUnReference(&buf->fence);
+ }
+ header->numDelayed--;
+ driSlabFreeBufferLocked(buf);
+ }
+ _glthread_UNLOCK_MUTEX(header->mutex);
+}
+
+static void
+pool_takedown(struct _DriBufferPool *driPool)
+{
+ struct _DriSlabPool *pool = driPool->data;
+ int i;
+
+ for (i=0; i<pool->numBuckets; ++i) {
+ driFinishSizeHeader(&pool->headers[i]);
+ }
+
+ free(pool->headers);
+ free(pool->bucketSizes);
+ free(pool);
+ free(driPool);
+}
+
+struct _DriBufferPool *
+driSlabPoolInit(int fd, uint64_t flags,
+ uint64_t validMask,
+ uint32_t smallestSize,
+ uint32_t numSizes,
+ uint32_t desiredNumBuffers,
+ uint32_t maxSlabSize,
+ uint32_t pageAlignment,
+ struct _DriFreeSlabManager *fMan)
+{
+ struct _DriBufferPool *driPool;
+ struct _DriSlabPool *pool;
+ uint32_t i;
+
+ driPool = calloc(1, sizeof(*driPool));
+ if (!driPool)
+ return NULL;
+
+ pool = calloc(1, sizeof(*pool));
+ if (!pool)
+ goto out_err0;
+
+ pool->bucketSizes = calloc(numSizes, sizeof(*pool->bucketSizes));
+ if (!pool->bucketSizes)
+ goto out_err1;
+
+ pool->headers = calloc(numSizes, sizeof(*pool->headers));
+ if (!pool->headers)
+ goto out_err2;
+
+ pool->fMan = fMan;
+ pool->proposedFlags = flags;
+ pool->validMask = validMask;
+ pool->numBuckets = numSizes;
+ pool->pageSize = getpagesize();
+ pool->fd = fd;
+ pool->pageAlignment = pageAlignment;
+ pool->maxSlabSize = maxSlabSize;
+ pool->desiredNumBuffers = desiredNumBuffers;
+
+ for (i=0; i<pool->numBuckets; ++i) {
+ pool->bucketSizes[i] = (smallestSize << i);
+ driInitSizeHeader(pool, pool->bucketSizes[i],
+ &pool->headers[i]);
+ }
+
+ driPool->data = (void *) pool;
+ driPool->map = &pool_map;
+ driPool->unmap = &pool_unmap;
+ driPool->destroy = &pool_destroy;
+ driPool->offset = &pool_offset;
+ driPool->poolOffset = &pool_poolOffset;
+ driPool->flags = &pool_flags;
+ driPool->size = &pool_size;
+ driPool->create = &pool_create;
+ driPool->fence = &pool_fence;
+ driPool->kernel = &pool_kernel;
+ driPool->validate = &pool_validate;
+ driPool->waitIdle = &pool_waitIdle;
+ driPool->takeDown = &pool_takedown;
+
+ return driPool;
+
+ out_err2:
+ free(pool->bucketSizes);
+ out_err1:
+ free(pool);
+ out_err0:
+ free(driPool);
+
+ return NULL;
+}