summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2009-01-13 12:12:19 +0100
committerThomas Hellstrom <thomas-at-tungstengraphics-dot-com>2009-01-13 12:12:19 +0100
commit8ab0ceedb21cfe6de2a5fc8138bd750e98e9eeb1 (patch)
treeade18844025bbe636c0f8121bf95a0413d90f2bc
parent22e14e744f8a740e9382ec282d858e6af7e47826 (diff)
Run indent and cleanfile.
-rw-r--r--src/wsbm_atomic.h59
-rw-r--r--src/wsbm_driver.c22
-rw-r--r--src/wsbm_driver.h12
-rw-r--r--src/wsbm_fencemgr.c72
-rw-r--r--src/wsbm_fencemgr.h27
-rw-r--r--src/wsbm_mallocpool.c6
-rw-r--r--src/wsbm_manager.c132
-rw-r--r--src/wsbm_manager.h82
-rw-r--r--src/wsbm_mm.h3
-rw-r--r--src/wsbm_pool.h70
-rw-r--r--src/wsbm_slabpool.c319
-rw-r--r--src/wsbm_ttmpool.c29
-rw-r--r--src/wsbm_userpool.c27
13 files changed, 426 insertions, 434 deletions
diff --git a/src/wsbm_atomic.h b/src/wsbm_atomic.h
index 71da6bb..7be2da9 100644
--- a/src/wsbm_atomic.h
+++ b/src/wsbm_atomic.h
@@ -8,7 +8,8 @@
#include <stdint.h>
-struct _WsbmAtomic {
+struct _WsbmAtomic
+{
int32_t count;
};
@@ -16,14 +17,13 @@ struct _WsbmAtomic {
#define wsbmAtomicSet(_v, _i) (((_v)->count) = (_i))
#define wsbmAtomicRead(_v) ((_v)->count)
-static inline int
+static inline int
wsbmAtomicIncZero(struct _WsbmAtomic *v)
{
unsigned char c;
- __asm__ __volatile__(
- "lock; incl %0; sete %1"
- :"+m" (v->count), "=qm" (c)
- : : "memory");
+ __asm__ __volatile__("lock; incl %0; sete %1":"+m"(v->count), "=qm"(c)
+ ::"memory");
+
return c != 0;
}
@@ -32,52 +32,45 @@ wsbmAtomicDecNegative(struct _WsbmAtomic *v)
{
unsigned char c;
int i = -1;
- __asm__ __volatile__(
- "lock; addl %2,%0; sets %1"
- :"+m" (v->count), "=qm" (c)
- :"ir" (i) : "memory");
+ __asm__ __volatile__("lock; addl %2,%0; sets %1":"+m"(v->count), "=qm"(c)
+ :"ir"(i):"memory");
+
return c;
}
-static inline int
+static inline int
wsbmAtomicDecZero(struct _WsbmAtomic *v)
{
unsigned char c;
-
- __asm__ __volatile__(
- "lock; decl %0; sete %1"
- :"+m" (v->count), "=qm" (c)
- : : "memory");
+
+ __asm__ __volatile__("lock; decl %0; sete %1":"+m"(v->count), "=qm"(c)
+ ::"memory");
+
return c != 0;
}
-static inline void wsbmAtomicInc(struct _WsbmAtomic *v)
+static inline void
+wsbmAtomicInc(struct _WsbmAtomic *v)
{
- __asm__ __volatile__(
- "lock; incl %0"
- :"+m" (v->count));
+ __asm__ __volatile__("lock; incl %0":"+m"(v->count));
}
-static inline void wsbmAtomicDec(struct _WsbmAtomic *v)
+static inline void
+wsbmAtomicDec(struct _WsbmAtomic *v)
{
- __asm__ __volatile__(
- "lock; decl %0"
- :"+m" (v->count));
+ __asm__ __volatile__("lock; decl %0":"+m"(v->count));
}
-static inline int32_t wsbmAtomicCmpXchg(volatile struct _WsbmAtomic *v, int32_t old,
- int32_t new)
+static inline int32_t
+wsbmAtomicCmpXchg(volatile struct _WsbmAtomic *v, int32_t old, int32_t new)
{
int32_t previous;
- __asm__ __volatile__(
- "lock; cmpxchgl %k1,%2"
- : "=a" (previous)
- : "r" (new), "m" (v->count), "0" (old)
- : "memory");
+ __asm__ __volatile__("lock; cmpxchgl %k1,%2":"=a"(previous)
+ :"r"(new), "m"(v->count), "0"(old)
+ :"memory");
+
return previous;
}
-
-
#endif
diff --git a/src/wsbm_driver.c b/src/wsbm_driver.c
index c044142..98f846d 100644
--- a/src/wsbm_driver.c
+++ b/src/wsbm_driver.c
@@ -103,25 +103,26 @@ wsbmNullThreadFuncs(void)
* pthreads implementation:
*/
-
-struct _WsbmPMutex {
+struct _WsbmPMutex
+{
struct _WsbmThreadFuncs *func;
pthread_mutex_t mutex;
};
-struct _WsbmPCond {
+struct _WsbmPCond
+{
struct _WsbmThreadFuncs *func;
pthread_cond_t cond;
};
-
static inline struct _WsbmPMutex *
pMutexConvert(struct _WsbmMutex *m)
{
- union _PMutexConverter {
+ union _PMutexConverter
+ {
struct _WsbmMutex wm;
struct _WsbmPMutex pm;
- } *um = containerOf(m, union _PMutexConverter, wm);
+ } *um = containerOf(m, union _PMutexConverter, wm);
return &um->pm;
}
@@ -129,15 +130,15 @@ pMutexConvert(struct _WsbmMutex *m)
static inline struct _WsbmPCond *
pCondConvert(struct _WsbmCond *c)
{
- union _PCondConverter {
+ union _PCondConverter
+ {
struct _WsbmCond wc;
struct _WsbmPCond pc;
- } *uc = containerOf(c, union _PCondConverter, wc);
+ } *uc = containerOf(c, union _PCondConverter, wc);
return &uc->pc;
}
-
static int
p_mutexInit(struct _WsbmMutex *mutex, struct _WsbmThreadFuncs *func)
{
@@ -146,7 +147,7 @@ p_mutexInit(struct _WsbmMutex *mutex, struct _WsbmThreadFuncs *func)
if (sizeof(struct _WsbmMutex) < sizeof(struct _WsbmPMutex))
return -EINVAL;
- pMutex->func = func;
+ pMutex->func = func;
pthread_mutex_init(&pMutex->mutex, NULL);
return 0;
}
@@ -155,6 +156,7 @@ static void
p_mutexFree(struct _WsbmMutex *mutex)
{
struct _WsbmPMutex *pMutex = pMutexConvert(mutex);
+
pthread_mutex_destroy(&pMutex->mutex);
}
diff --git a/src/wsbm_driver.h b/src/wsbm_driver.h
index d5058ed..d7da44e 100644
--- a/src/wsbm_driver.h
+++ b/src/wsbm_driver.h
@@ -39,25 +39,25 @@
#define WSBM_MUTEX_SPACE 16
#define WSBM_COND_SPACE 16
-struct _WsbmMutex {
+struct _WsbmMutex
+{
struct _WsbmThreadFuncs *func;
unsigned long storage[WSBM_MUTEX_SPACE];
};
-struct _WsbmCond {
+struct _WsbmCond
+{
struct _WsbmThreadFuncs *func;
unsigned long storage[WSBM_COND_SPACE];
};
struct _WsbmThreadFuncs
{
- int (*mutexInit) (struct _WsbmMutex *,
- struct _WsbmThreadFuncs *);
+ int (*mutexInit) (struct _WsbmMutex *, struct _WsbmThreadFuncs *);
void (*mutexFree) (struct _WsbmMutex *);
void (*mutexLock) (struct _WsbmMutex *);
void (*mutexUnlock) (struct _WsbmMutex *);
- int (*condInit) (struct _WsbmCond *,
- struct _WsbmThreadFuncs *);
+ int (*condInit) (struct _WsbmCond *, struct _WsbmThreadFuncs *);
void (*condFree) (struct _WsbmCond *);
void (*condWait) (struct _WsbmCond *, struct _WsbmMutex *);
void (*condBroadcast) (struct _WsbmCond *);
diff --git a/src/wsbm_fencemgr.c b/src/wsbm_fencemgr.c
index 7a7577d..047eb81 100644
--- a/src/wsbm_fencemgr.c
+++ b/src/wsbm_fencemgr.c
@@ -42,7 +42,8 @@
#include <string.h>
#include <unistd.h>
-struct _WsbmFenceClass {
+struct _WsbmFenceClass
+{
struct _WsbmListHead head;
struct _WsbmMutex mutex;
struct _WsbmMutex cmd_mutex;
@@ -115,7 +116,7 @@ struct _WsbmFenceMgr *
wsbmFenceMgrCreate(const struct _WsbmFenceMgrCreateInfo *info)
{
struct _WsbmFenceMgr *tmp;
- uint32_t i,j;
+ uint32_t i, j;
int ret;
tmp = calloc(1, sizeof(*tmp));
@@ -129,6 +130,7 @@ wsbmFenceMgrCreate(const struct _WsbmFenceMgrCreateInfo *info)
for (i = 0; i < tmp->info.num_classes; ++i) {
struct _WsbmFenceClass *fc = &tmp->classes[i];
+
WSBMINITLISTHEAD(&fc->head);
ret = WSBM_MUTEX_INIT(&fc->mutex);
if (ret)
@@ -143,8 +145,8 @@ wsbmFenceMgrCreate(const struct _WsbmFenceMgrCreateInfo *info)
return tmp;
- out_err1:
- for (j=0; j<i; ++j) {
+ out_err1:
+ for (j = 0; j < i; ++j) {
WSBM_MUTEX_FREE(&tmp->classes[j].mutex);
WSBM_MUTEX_FREE(&tmp->classes[j].cmd_mutex);
}
@@ -168,9 +170,10 @@ wsbmFenceUnreference(struct _WsbmFenceObject **pFence)
mgr = fence->mgr;
if (wsbmAtomicDecZero(&fence->refCount)) {
struct _WsbmFenceClass *fc = &mgr->classes[fence->fence_class];
- WSBM_MUTEX_LOCK(&fc->mutex);
+
+ WSBM_MUTEX_LOCK(&fc->mutex);
WSBMLISTDELINIT(&fence->head);
- WSBM_MUTEX_UNLOCK(&fc->mutex);
+ WSBM_MUTEX_UNLOCK(&fc->mutex);
if (fence->private)
mgr->info.unreference(mgr, &fence->private);
fence->mgr = NULL;
@@ -193,21 +196,24 @@ wsbmSignalPreviousFences(struct _WsbmFenceMgr *mgr,
WSBM_MUTEX_LOCK(&fc->mutex);
while (list != &fc->head && list->next != list) {
entry = WSBMLISTENTRY(list, struct _WsbmFenceObject, head);
+
prev = list->prev;
do {
old_signaled_types = wsbmAtomicRead(&entry->signaled_types);
- signaled_types = old_signaled_types | (signaled_types & entry->fence_type);
+ signaled_types =
+ old_signaled_types | (signaled_types & entry->fence_type);
if (signaled_types == old_signaled_types)
break;
- ret_st = wsbmAtomicCmpXchg(&entry->signaled_types, old_signaled_types,
- signaled_types);
- } while(ret_st != old_signaled_types);
+ ret_st =
+ wsbmAtomicCmpXchg(&entry->signaled_types, old_signaled_types,
+ signaled_types);
+ } while (ret_st != old_signaled_types);
if (signaled_types == entry->fence_type)
WSBMLISTDELINIT(list);
-
+
list = prev;
}
WSBM_MUTEX_UNLOCK(&fc->mutex);
@@ -215,12 +221,11 @@ wsbmSignalPreviousFences(struct _WsbmFenceMgr *mgr,
int
wsbmFenceFinish(struct _WsbmFenceObject *fence, uint32_t fence_type,
- int lazy_hint)
+ int lazy_hint)
{
struct _WsbmFenceMgr *mgr = fence->mgr;
int ret = 0;
-
if ((wsbmAtomicRead(&fence->signaled_types) & fence_type) == fence_type)
goto out;
@@ -230,7 +235,7 @@ wsbmFenceFinish(struct _WsbmFenceObject *fence, uint32_t fence_type,
wsbmSignalPreviousFences(mgr, &fence->head, fence->fence_class,
fence_type);
- out:
+ out:
return ret;
}
@@ -242,7 +247,7 @@ wsbmFenceSignaledTypeCached(struct _WsbmFenceObject * fence)
int
wsbmFenceSignaledType(struct _WsbmFenceObject *fence, uint32_t flush_type,
- uint32_t * signaled)
+ uint32_t * signaled)
{
int ret = 0;
struct _WsbmFenceMgr *mgr;
@@ -271,8 +276,8 @@ wsbmFenceSignaledType(struct _WsbmFenceObject *fence, uint32_t flush_type,
signaled_types);
if (old_signaled_types == ret_st)
wsbmSignalPreviousFences(mgr, &fence->head, fence->fence_class,
- *signaled);
- } while(old_signaled_types != ret_st);
+ *signaled);
+ } while (old_signaled_types != ret_st);
return 0;
out0:
@@ -304,11 +309,11 @@ wsbmFenceCreate(struct _WsbmFenceMgr *mgr, uint32_t fence_class,
if (!fence)
goto out_err;
- wsbmAtomicSet(&fence->refCount,1);
+ wsbmAtomicSet(&fence->refCount, 1);
fence->mgr = mgr;
fence->fence_class = fence_class;
fence->fence_type = fence_type;
- wsbmAtomicSet(&fence->signaled_types,0);
+ wsbmAtomicSet(&fence->signaled_types, 0);
fence->private = private;
if (private_size) {
fence->private = (void *)(((uint8_t *) fence) + fence_size);
@@ -370,13 +375,11 @@ tFinish(struct _WsbmFenceMgr *mgr, void *private, uint32_t fence_type,
{
struct _WsbmTTMFenceMgrPriv *priv =
(struct _WsbmTTMFenceMgrPriv *)mgr->private;
- union ttm_fence_finish_arg arg =
- {.req =
- {.handle = (unsigned long)private,
- .fence_type = fence_type,
- .mode = (lazy_hint) ? TTM_FENCE_FINISH_MODE_LAZY : 0
- }
- };
+ union ttm_fence_finish_arg arg =
+ {.req = {.handle = (unsigned long)private,
+ .fence_type = fence_type,
+ .mode = (lazy_hint) ? TTM_FENCE_FINISH_MODE_LAZY : 0}
+ };
int ret;
do {
@@ -392,7 +395,7 @@ tUnref(struct _WsbmFenceMgr *mgr, void **private)
{
struct _WsbmTTMFenceMgrPriv *priv =
(struct _WsbmTTMFenceMgrPriv *)mgr->private;
- struct ttm_fence_unref_arg arg = {.handle = (unsigned long) *private };
+ struct ttm_fence_unref_arg arg = {.handle = (unsigned long)*private };
*private = NULL;
@@ -429,18 +432,17 @@ wsbmFenceMgrTTMInit(int fd, unsigned int numClass, unsigned int devOffset)
return mgr;
}
-void wsbmFenceCmdLock(struct _WsbmFenceMgr *mgr,
- uint32_t fence_class)
+void
+wsbmFenceCmdLock(struct _WsbmFenceMgr *mgr, uint32_t fence_class)
{
WSBM_MUTEX_LOCK(&mgr->classes[fence_class].cmd_mutex);
-}
+}
-void wsbmFenceCmdUnlock(struct _WsbmFenceMgr *mgr,
- uint32_t fence_class)
+void
+wsbmFenceCmdUnlock(struct _WsbmFenceMgr *mgr, uint32_t fence_class)
{
WSBM_MUTEX_UNLOCK(&mgr->classes[fence_class].cmd_mutex);
-}
-
+}
void
wsbmFenceMgrTTMTakedown(struct _WsbmFenceMgr *mgr)
@@ -453,7 +455,7 @@ wsbmFenceMgrTTMTakedown(struct _WsbmFenceMgr *mgr)
if (mgr->private)
free(mgr->private);
- for (i=0; i<mgr->info.num_classes; ++i) {
+ for (i = 0; i < mgr->info.num_classes; ++i) {
WSBM_MUTEX_FREE(&mgr->classes[i].mutex);
WSBM_MUTEX_FREE(&mgr->classes[i].cmd_mutex);
}
diff --git a/src/wsbm_fencemgr.h b/src/wsbm_fencemgr.h
index 58bd7c8..0cae2f3 100644
--- a/src/wsbm_fencemgr.h
+++ b/src/wsbm_fencemgr.h
@@ -53,7 +53,7 @@ extern uint32_t wsbmFenceSignaledTypeCached(struct _WsbmFenceObject *fence);
* underlying mechanism must make sure will eventually signal.
*/
extern int wsbmFenceSignaledType(struct _WsbmFenceObject *fence,
- uint32_t flush_type, uint32_t * signaled);
+ uint32_t flush_type, uint32_t * signaled);
/*
* Convenience functions.
@@ -82,7 +82,7 @@ wsbmFenceSignaledCached(struct _WsbmFenceObject *fence, uint32_t flush_type)
* Reference a fence object.
*/
extern struct _WsbmFenceObject *wsbmFenceReference(struct _WsbmFenceObject
- *fence);
+ *fence);
/*
* Unreference a fence object. The fence object pointer will be reset to NULL.
@@ -95,8 +95,8 @@ extern void wsbmFenceUnreference(struct _WsbmFenceObject **pFence);
* If "lazy_hint" is true, it indicates that the wait may sleep to avoid
* busy-wait polling.
*/
-extern int wsbmFenceFinish(struct _WsbmFenceObject *fence, uint32_t fence_type,
- int lazy_hint);
+extern int wsbmFenceFinish(struct _WsbmFenceObject *fence,
+ uint32_t fence_type, int lazy_hint);
/*
* Create a WsbmFenceObject for manager "mgr".
@@ -111,10 +111,10 @@ extern int wsbmFenceFinish(struct _WsbmFenceObject *fence, uint32_t fence_type,
* "private" may be destroyed after the call to wsbmFenceCreate.
*/
extern struct _WsbmFenceObject *wsbmFenceCreate(struct _WsbmFenceMgr *mgr,
- uint32_t fence_class,
- uint32_t fence_type,
- void *private,
- size_t private_size);
+ uint32_t fence_class,
+ uint32_t fence_type,
+ void *private,
+ size_t private_size);
extern uint32_t wsbmFenceType(struct _WsbmFenceObject *fence);
@@ -138,16 +138,17 @@ struct _WsbmFenceMgrCreateInfo
};
extern struct _WsbmFenceMgr *wsbmFenceMgrCreate(const struct
- _WsbmFenceMgrCreateInfo *info);
-extern void wsbmFenceCmdLock(struct _WsbmFenceMgr *mgr,
- uint32_t fence_class);
+ _WsbmFenceMgrCreateInfo
+ *info);
+extern void wsbmFenceCmdLock(struct _WsbmFenceMgr *mgr, uint32_t fence_class);
extern void wsbmFenceCmdUnlock(struct _WsbmFenceMgr *mgr,
uint32_t fence_class);
/*
* Builtin drivers.
*/
-extern struct _WsbmFenceMgr *wsbmFenceMgrTTMInit(int fd, unsigned int numClass,
- unsigned int devOffset);
+extern struct _WsbmFenceMgr *wsbmFenceMgrTTMInit(int fd,
+ unsigned int numClass,
+ unsigned int devOffset);
extern void wsbmFenceMgrTTMTakedown(struct _WsbmFenceMgr *mgr);
#endif
diff --git a/src/wsbm_mallocpool.c b/src/wsbm_mallocpool.c
index c627c34..b5252d6 100644
--- a/src/wsbm_mallocpool.c
+++ b/src/wsbm_mallocpool.c
@@ -97,13 +97,13 @@ pool_unmap(struct _WsbmBufStorage *buf)
}
static int
-pool_syncforcpu (struct _WsbmBufStorage *buf, unsigned mode)
+pool_syncforcpu(struct _WsbmBufStorage *buf, unsigned mode)
{
return 0;
}
-
+
static void
-pool_releasefromcpu (struct _WsbmBufStorage *buf, unsigned mode)
+pool_releasefromcpu(struct _WsbmBufStorage *buf, unsigned mode)
{
;
}
diff --git a/src/wsbm_manager.c b/src/wsbm_manager.c
index 5e7854e..5cfebf0 100644
--- a/src/wsbm_manager.c
+++ b/src/wsbm_manager.c
@@ -79,13 +79,12 @@ struct _WsbmBufferObject
struct _WsbmBufferPool *pool;
};
-
struct _WsbmBufferList
{
int hasKernelBuffers;
- struct _ValidateList kernelBuffers; /* List of kernel buffers needing validation */
- struct _ValidateList userBuffers; /* List of user-space buffers needing validation */
+ struct _ValidateList kernelBuffers; /* List of kernel buffers needing validation */
+ struct _ValidateList userBuffers; /* List of user-space buffers needing validation */
};
static struct _WsbmMutex bmMutex;
@@ -112,22 +111,25 @@ wsbmInit(struct _WsbmThreadFuncs *tf, struct _WsbmVNodeFuncs *vf)
WSBM_MUTEX_FREE(&bmMutex);
return -ENOMEM;
}
-
+
initialized = 1;
return 0;
}
-void wsbmCommonDataSet(void *d)
+void
+wsbmCommonDataSet(void *d)
{
commonData = d;
}
-void *wsbmCommonDataGet(void)
+void *
+wsbmCommonDataGet(void)
{
return commonData;
}
-int wsbmIsInitialized(void)
+int
+wsbmIsInitialized(void)
{
return initialized;
}
@@ -394,6 +396,7 @@ uint32_t
wsbmBOPlacementHint(struct _WsbmBufferObject * buf)
{
struct _WsbmBufStorage *storage = buf->storage;
+
assert(buf->storage != NULL);
return storage->pool->placement(storage);
@@ -412,7 +415,7 @@ wsbmBOReference(struct _WsbmBufferObject *buf)
int
wsbmBOSetStatus(struct _WsbmBufferObject *buf,
- uint32_t setFlags, uint32_t clrFlags)
+ uint32_t setFlags, uint32_t clrFlags)
{
struct _WsbmBufStorage *storage = buf->storage;
@@ -429,13 +432,14 @@ void
wsbmBOUnreference(struct _WsbmBufferObject **p_buf)
{
struct _WsbmBufferObject *buf = *p_buf;
+
*p_buf = NULL;
if (!buf)
return;
if (buf->bufferType == WSBM_BUFFER_SIMPLE) {
- struct _WsbmBufStorage *dummy = buf->storage;
+ struct _WsbmBufStorage *dummy = buf->storage;
wsbmBufStorageUnref(&dummy);
return;
@@ -449,8 +453,8 @@ wsbmBOUnreference(struct _WsbmBufferObject **p_buf)
int
wsbmBOData(struct _WsbmBufferObject *buf,
- unsigned size, const void *data,
- struct _WsbmBufferPool * newPool, uint32_t placement)
+ unsigned size, const void *data,
+ struct _WsbmBufferPool *newPool, uint32_t placement)
{
void *virtual = NULL;
int newBuffer;
@@ -467,19 +471,20 @@ wsbmBOData(struct _WsbmBufferObject *buf,
if (newPool == NULL)
newPool = buf->pool;
-
+
if (newPool == NULL)
return -EINVAL;
newBuffer = (!storage || storage->pool != newPool ||
storage->pool->size(storage) < size ||
- storage->pool->size(storage) > size + WSBM_BODATA_SIZE_ACCEPT);
+ storage->pool->size(storage) >
+ size + WSBM_BODATA_SIZE_ACCEPT);
if (!placement)
placement = buf->placement;
if (newBuffer) {
- if (buf->bufferType == WSBM_BUFFER_REF)
+ if (buf->bufferType == WSBM_BUFFER_REF)
return -EINVAL;
wsbmBufStorageUnref(&buf->storage);
@@ -491,7 +496,8 @@ wsbmBOData(struct _WsbmBufferObject *buf,
goto out;
}
- buf->storage = newPool->create(newPool, size, placement, buf->alignment);
+ buf->storage =
+ newPool->create(newPool, size, placement, buf->alignment);
if (!buf->storage) {
retval = -ENOMEM;
goto out;
@@ -507,9 +513,11 @@ wsbmBOData(struct _WsbmBufferObject *buf,
*/
struct _WsbmBufStorage *tmp_storage;
+
curPool = storage->pool;
- tmp_storage = curPool->create(curPool, size, placement, buf->alignment);
+ tmp_storage =
+ curPool->create(curPool, size, placement, buf->alignment);
if (tmp_storage) {
wsbmBufStorageUnref(&buf->storage);
@@ -521,7 +529,7 @@ wsbmBOData(struct _WsbmBufferObject *buf,
goto out;
synced = 1;
}
- } else
+ } else
synced = 1;
placement_diff = placement ^ buf->placement;
@@ -529,7 +537,7 @@ wsbmBOData(struct _WsbmBufferObject *buf,
/*
* We might need to change buffer placement.
*/
-
+
storage = buf->storage;
curPool = storage->pool;
@@ -537,22 +545,22 @@ wsbmBOData(struct _WsbmBufferObject *buf,
assert(curPool->setStatus != NULL);
curPool->releasefromcpu(storage, WSBM_SYNCCPU_WRITE);
retval = curPool->setStatus(storage,
- placement_diff & placement,
- placement_diff & ~placement);
+ placement_diff & placement,
+ placement_diff & ~placement);
if (retval)
goto out;
-
+
buf->placement = placement;
}
if (!synced) {
retval = curPool->syncforcpu(buf->storage, WSBM_SYNCCPU_WRITE);
-
+
if (retval)
goto out;
synced = 1;
- }
+ }
storage = buf->storage;
curPool = storage->pool;
@@ -564,7 +572,7 @@ wsbmBOData(struct _WsbmBufferObject *buf,
memcpy(virtual, data, size);
curPool->unmap(storage);
}
-
+
out:
if (synced)
@@ -576,17 +584,17 @@ wsbmBOData(struct _WsbmBufferObject *buf,
static struct _WsbmBufStorage *
wsbmStorageClone(struct _WsbmBufferObject *buf)
{
- struct _WsbmBufStorage *storage = buf->storage;
- struct _WsbmBufferPool *pool = storage->pool;
-
- return pool->create(pool, pool->size(storage), buf->placement,
- buf->alignment);
+ struct _WsbmBufStorage *storage = buf->storage;
+ struct _WsbmBufferPool *pool = storage->pool;
+
+ return pool->create(pool, pool->size(storage), buf->placement,
+ buf->alignment);
}
struct _WsbmBufferObject *
-wsbmBOClone(struct _WsbmBufferObject *buf,
- int (*accelCopy) (struct _WsbmBufferObject *,
- struct _WsbmBufferObject *))
+wsbmBOClone(struct _WsbmBufferObject *buf,
+ int (*accelCopy) (struct _WsbmBufferObject *,
+ struct _WsbmBufferObject *))
{
struct _WsbmBufferObject *newBuf;
int ret;
@@ -594,7 +602,7 @@ wsbmBOClone(struct _WsbmBufferObject *buf,
newBuf = malloc(sizeof(*newBuf));
if (!newBuf)
return NULL;
-
+
*newBuf = *buf;
newBuf->storage = wsbmStorageClone(buf);
if (!newBuf->storage)
@@ -623,7 +631,7 @@ wsbmBOClone(struct _WsbmBufferObject *buf,
pool->unmap(newBuf->storage);
pool->unmap(buf->storage);
pool->releasefromcpu(storage, WSBM_SYNCCPU_READ);
- }
+ }
return newBuf;
out_err3:
@@ -637,11 +645,10 @@ wsbmBOClone(struct _WsbmBufferObject *buf,
return 0;
}
-
int
wsbmBOSubData(struct _WsbmBufferObject *buf,
unsigned long offset, unsigned long size, const void *data,
- int (*accelCopy) (struct _WsbmBufferObject *,
+ int (*accelCopy) (struct _WsbmBufferObject *,
struct _WsbmBufferObject *))
{
int ret = 0;
@@ -686,11 +693,11 @@ wsbmBOSubData(struct _WsbmBufferObject *buf,
wsbmBOUnreference(&newBuf);
pool = storage->pool;
}
-
+
ret = pool->syncforcpu(storage, WSBM_SYNCCPU_WRITE);
if (ret)
goto out;
- }
+ }
ret = pool->map(storage, WSBM_ACCESS_WRITE, &virtual);
if (ret) {
@@ -699,7 +706,7 @@ wsbmBOSubData(struct _WsbmBufferObject *buf,
}
memcpy((unsigned char *)virtual + offset, data, size);
- pool->unmap(storage);
+ pool->unmap(storage);
pool->releasefromcpu(storage, WSBM_SYNCCPU_WRITE);
}
out:
@@ -708,7 +715,7 @@ wsbmBOSubData(struct _WsbmBufferObject *buf,
int
wsbmBOGetSubData(struct _WsbmBufferObject *buf,
- unsigned long offset, unsigned long size, void *data)
+ unsigned long offset, unsigned long size, void *data)
{
int ret = 0;
@@ -753,23 +760,22 @@ wsbmBOSetReferenced(struct _WsbmBufferObject *buf, unsigned long handle)
return ret;
}
-void wsbmBOFreeSimple(void *ptr)
-{
+void
+wsbmBOFreeSimple(void *ptr)
+{
free(ptr);
-}
+}
struct _WsbmBufferObject *
wsbmBOCreateSimple(struct _WsbmBufferPool *pool,
unsigned long size,
uint32_t placement,
- unsigned alignment,
- size_t extra_size,
- size_t *offset)
+ unsigned alignment, size_t extra_size, size_t * offset)
{
struct _WsbmBufferObject *buf;
struct _WsbmBufStorage *storage;
-
- *offset = (sizeof(*buf) + 15) & ~15;
+
+ *offset = (sizeof(*buf) + 15) & ~15;
if (extra_size) {
extra_size += *offset - sizeof(*buf);
@@ -798,19 +804,17 @@ wsbmBOCreateSimple(struct _WsbmBufferPool *pool,
free(buf);
return NULL;
}
-
-
int
wsbmGenBuffers(struct _WsbmBufferPool *pool,
- unsigned n,
- struct _WsbmBufferObject *buffers[],
- unsigned alignment, uint32_t placement)
+ unsigned n,
+ struct _WsbmBufferObject *buffers[],
+ unsigned alignment, uint32_t placement)
{
struct _WsbmBufferObject *buf;
int i;
- placement = (placement) ? placement :
+ placement = (placement) ? placement :
WSBM_PL_FLAG_SYSTEM | WSBM_PL_FLAG_CACHED;
for (i = 0; i < n; ++i) {
@@ -889,8 +893,8 @@ wsbmBOFreeList(struct _WsbmBufferList *list)
static int
wsbmAddValidateItem(struct _ValidateList *list, void *buf, uint64_t flags,
- uint64_t mask, int *itemLoc,
- struct _ValidateNode **pnode, int *newItem)
+ uint64_t mask, int *itemLoc,
+ struct _ValidateNode **pnode, int *newItem)
{
struct _ValidateNode *node, *cur;
struct _WsbmListHead *l;
@@ -942,12 +946,12 @@ wsbmAddValidateItem(struct _ValidateList *list, void *buf, uint64_t flags,
}
*itemLoc = cur->listItem;
if (pnode)
- *pnode = cur;
+ *pnode = cur;
return 0;
}
int
-wsbmBOAddListItem(struct _WsbmBufferList *list,
+wsbmBOAddListItem(struct _WsbmBufferList *list,
struct _WsbmBufferObject *buf,
uint64_t flags, uint64_t mask, int *itemLoc,
struct _ValidateNode **node)
@@ -970,7 +974,7 @@ wsbmBOAddListItem(struct _WsbmBufferList *list,
}
ret = wsbmAddValidateItem(&list->userBuffers, storage,
- flags, mask, &dummy, &dummyNode, &newItem);
+ flags, mask, &dummy, &dummyNode, &newItem);
if (ret)
goto out_unlock;
@@ -979,7 +983,7 @@ wsbmBOAddListItem(struct _WsbmBufferList *list,
wsbmAtomicInc(&storage->onList);
}
- out_unlock:
+ out_unlock:
return ret;
}
@@ -994,7 +998,7 @@ wsbmBOFence(struct _WsbmBufferObject *buf, struct _WsbmFenceObject *fence)
}
-int
+int
wsbmBOOnList(const struct _WsbmBufferObject *buf)
{
if (buf->storage == NULL)
@@ -1002,7 +1006,6 @@ wsbmBOOnList(const struct _WsbmBufferObject *buf)
return wsbmAtomicRead(&buf->storage->onList);
}
-
int
wsbmBOUnrefUserList(struct _WsbmBufferList *list)
{
@@ -1074,7 +1077,6 @@ wsbmBOValidateUserList(struct _WsbmBufferList *list)
return 0;
}
-
int
wsbmBOUnvalidateUserList(struct _WsbmBufferList *list)
{
@@ -1172,8 +1174,8 @@ wsbmKBufHandle(const struct _WsbmKernelBuf * kBuf)
extern void
wsbmUpdateKBuf(struct _WsbmKernelBuf *kBuf,
- uint64_t gpuOffset, uint32_t placement,
- uint32_t fence_type_mask)
+ uint64_t gpuOffset, uint32_t placement,
+ uint32_t fence_type_mask)
{
kBuf->gpuOffset = gpuOffset;
kBuf->placement = placement;
diff --git a/src/wsbm_manager.h b/src/wsbm_manager.h
index 02b9b0a..ee0636f 100644
--- a/src/wsbm_manager.h
+++ b/src/wsbm_manager.h
@@ -52,7 +52,7 @@ struct _WsbmBufferList;
* replicate them here, and if there is a discrepancy,
* that needs to be resolved in the buffer pool using
* the TTM flags.
- */
+ */
#define WSBM_PL_MASK_MEM 0x0000FFFF
@@ -77,8 +77,7 @@ struct _WsbmBufferList;
extern void *wsbmBOMap(struct _WsbmBufferObject *buf, unsigned mode);
extern void wsbmBOUnmap(struct _WsbmBufferObject *buf);
-extern int wsbmBOSyncForCpu(struct _WsbmBufferObject *buf,
- unsigned mode);
+extern int wsbmBOSyncForCpu(struct _WsbmBufferObject *buf, unsigned mode);
extern void wsbmBOReleaseFromCpu(struct _WsbmBufferObject *buf,
unsigned mode);
@@ -86,49 +85,52 @@ extern unsigned long wsbmBOOffsetHint(struct _WsbmBufferObject *buf);
extern unsigned long wsbmBOPoolOffset(struct _WsbmBufferObject *buf);
extern uint32_t wsbmBOPlacementHint(struct _WsbmBufferObject *buf);
-extern struct _WsbmBufferObject *wsbmBOReference(struct _WsbmBufferObject *buf);
+extern struct _WsbmBufferObject *wsbmBOReference(struct _WsbmBufferObject
+ *buf);
extern void wsbmBOUnreference(struct _WsbmBufferObject **p_buf);
extern int wsbmBOData(struct _WsbmBufferObject *r_buf,
- unsigned size, const void *data,
- struct _WsbmBufferPool *pool, uint32_t placement);
+ unsigned size, const void *data,
+ struct _WsbmBufferPool *pool, uint32_t placement);
extern int wsbmBOSetStatus(struct _WsbmBufferObject *buf,
- uint32_t setPlacement,
- uint32_t clrPlacement);
+ uint32_t setPlacement, uint32_t clrPlacement);
extern int wsbmBOSubData(struct _WsbmBufferObject *buf,
unsigned long offset, unsigned long size,
- const void *data,
- int (*accelCopy) (struct _WsbmBufferObject *,
+ const void *data,
+ int (*accelCopy) (struct _WsbmBufferObject *,
struct _WsbmBufferObject *));
-extern struct _WsbmBufferObject *
-wsbmBOClone(struct _WsbmBufferObject *buf,
- int (*accelCopy) (struct _WsbmBufferObject *,
- struct _WsbmBufferObject *));
+extern struct _WsbmBufferObject *wsbmBOClone(struct _WsbmBufferObject *buf,
+ int (*accelCopy) (struct
+ _WsbmBufferObject
+ *,
+ struct
+ _WsbmBufferObject
+ *));
extern int wsbmBOGetSubData(struct _WsbmBufferObject *buf,
- unsigned long offset, unsigned long size,
- void *data);
+ unsigned long offset, unsigned long size,
+ void *data);
extern int wsbmGenBuffers(struct _WsbmBufferPool *pool,
- unsigned n,
- struct _WsbmBufferObject *buffers[],
- unsigned alignment, uint32_t placement);
-
-struct _WsbmBufferObject *
-wsbmBOCreateSimple(struct _WsbmBufferPool *pool,
- unsigned long size,
- uint32_t placement,
- unsigned alignment,
- size_t extra_size,
- size_t *offset);
-
-extern void wsbmDeleteBuffers(unsigned n, struct _WsbmBufferObject *buffers[]);
-extern struct _WsbmBufferList *wsbmBOCreateList(int target,
+ unsigned n,
+ struct _WsbmBufferObject *buffers[],
+ unsigned alignment, uint32_t placement);
+
+struct _WsbmBufferObject *wsbmBOCreateSimple(struct _WsbmBufferPool *pool,
+ unsigned long size,
+ uint32_t placement,
+ unsigned alignment,
+ size_t extra_size,
+ size_t * offset);
+
+extern void wsbmDeleteBuffers(unsigned n,
+ struct _WsbmBufferObject *buffers[]);
+extern struct _WsbmBufferList *wsbmBOCreateList(int target,
int hasKernelBuffers);
extern int wsbmBOResetList(struct _WsbmBufferList *list);
extern int wsbmBOAddListItem(struct _WsbmBufferList *list,
- struct _WsbmBufferObject *buf,
- uint64_t flags, uint64_t mask, int *itemLoc,
- struct _ValidateNode **node);
+ struct _WsbmBufferObject *buf,
+ uint64_t flags, uint64_t mask, int *itemLoc,
+ struct _ValidateNode **node);
extern void wsbmBOFreeList(struct _WsbmBufferList *list);
extern int wsbmBOFenceUserList(struct _WsbmBufferList *list,
@@ -139,11 +141,11 @@ extern int wsbmBOValidateUserList(struct _WsbmBufferList *list);
extern int wsbmBOUnvalidateUserList(struct _WsbmBufferList *list);
extern void wsbmBOFence(struct _WsbmBufferObject *buf,
- struct _WsbmFenceObject *fence);
+ struct _WsbmFenceObject *fence);
extern void wsbmPoolTakeDown(struct _WsbmBufferPool *pool);
extern int wsbmBOSetReferenced(struct _WsbmBufferObject *buf,
- unsigned long handle);
+ unsigned long handle);
unsigned long wsbmBOSize(struct _WsbmBufferObject *buf);
extern void wsbmBOWaitIdle(struct _WsbmBufferObject *buf, int lazy);
extern int wsbmBOOnList(const struct _WsbmBufferObject *buf);
@@ -155,15 +157,12 @@ extern void wsbmReadUnlockKernelBO(void);
extern void wsbmWriteLockKernelBO(void);
extern void wsbmWriteUnlockKernelBO(void);
-extern int wsbmInit(struct _WsbmThreadFuncs *tf,
- struct _WsbmVNodeFuncs *vf);
+extern int wsbmInit(struct _WsbmThreadFuncs *tf, struct _WsbmVNodeFuncs *vf);
extern void wsbmTakedown(void);
extern int wsbmIsInitialized(void);
extern void wsbmCommonDataSet(void *d);
extern void *wsbmCommonDataGet(void);
-
-
extern struct _ValidateList *wsbmGetKernelValidateList(struct _WsbmBufferList
*list);
extern struct _ValidateList *wsbmGetUserValidateList(struct _WsbmBufferList
@@ -175,9 +174,8 @@ extern void *validateListNext(struct _ValidateList *list, void *iterator);
extern uint32_t wsbmKBufHandle(const struct _WsbmKernelBuf *);
extern void wsbmUpdateKBuf(struct _WsbmKernelBuf *,
- uint64_t gpuOffset,
- uint32_t placement,
- uint32_t fence_flags);
+ uint64_t gpuOffset,
+ uint32_t placement, uint32_t fence_flags);
extern struct _WsbmKernelBuf *wsbmKBuf(const struct _WsbmBufferObject *buf);
diff --git a/src/wsbm_mm.h b/src/wsbm_mm.h
index e76b119..d3bbe94 100644
--- a/src/wsbm_mm.h
+++ b/src/wsbm_mm.h
@@ -69,6 +69,7 @@ extern struct _WsbmMMNode *wsbmMMGetBlock(struct _WsbmMMNode *parent,
unsigned alignment);
extern void wsbmMMPutBlock(struct _WsbmMMNode *cur);
extern void wsbmMMtakedown(struct _WsbmMM *mm);
-extern int wsbmMMinit(struct _WsbmMM *mm, unsigned long start, unsigned long size);
+extern int wsbmMMinit(struct _WsbmMM *mm, unsigned long start,
+ unsigned long size);
extern int wsbmMMclean(struct _WsbmMM *mm);
#endif
diff --git a/src/wsbm_pool.h b/src/wsbm_pool.h
index df24475..1d07523 100644
--- a/src/wsbm_pool.h
+++ b/src/wsbm_pool.h
@@ -46,7 +46,7 @@ struct _WsbmBufStorage
struct _WsbmMutex mutex;
struct _WsbmAtomic refCount;
struct _WsbmAtomic onList;
- void * destroyArg;
+ void *destroyArg;
void (*destroyContainer) (void *);
};
@@ -57,35 +57,37 @@ struct _WsbmBufferPool
int fd;
int (*map) (struct _WsbmBufStorage * buf, unsigned mode, void **virtual);
void (*unmap) (struct _WsbmBufStorage * buf);
- int (*syncforcpu) (struct _WsbmBufStorage *buf, unsigned mode);
- void (*releasefromcpu) (struct _WsbmBufStorage *buf, unsigned mode);
+ int (*syncforcpu) (struct _WsbmBufStorage * buf, unsigned mode);
+ void (*releasefromcpu) (struct _WsbmBufStorage * buf, unsigned mode);
void (*destroy) (struct _WsbmBufStorage ** buf);
unsigned long (*offset) (struct _WsbmBufStorage * buf);
unsigned long (*poolOffset) (struct _WsbmBufStorage * buf);
- uint32_t(*placement) (struct _WsbmBufStorage * buf);
+ uint32_t(*placement) (struct _WsbmBufStorage * buf);
unsigned long (*size) (struct _WsbmBufStorage * buf);
struct _WsbmKernelBuf *(*kernel) (struct _WsbmBufStorage * buf);
struct _WsbmBufStorage *(*create) (struct _WsbmBufferPool * pool,
- unsigned long size,
- uint32_t placement, unsigned alignment);
- struct _WsbmBufStorage *(*createByReference) (struct _WsbmBufferPool * pool,
- uint32_t handle);
+ unsigned long size,
+ uint32_t placement,
+ unsigned alignment);
+ struct _WsbmBufStorage *(*createByReference) (struct _WsbmBufferPool *
+ pool, uint32_t handle);
void (*fence) (struct _WsbmBufStorage * buf,
- struct _WsbmFenceObject * fence);
+ struct _WsbmFenceObject * fence);
void (*unvalidate) (struct _WsbmBufStorage * buf);
int (*validate) (struct _WsbmBufStorage * buf, uint64_t set_flags,
uint64_t clr_flags);
int (*waitIdle) (struct _WsbmBufStorage * buf, int lazy);
- int (*setStatus) (struct _WsbmBufStorage * buf,
- uint32_t set_placement,
- uint32_t clr_placement);
+ int (*setStatus) (struct _WsbmBufStorage * buf,
+ uint32_t set_placement, uint32_t clr_placement);
void (*takeDown) (struct _WsbmBufferPool * pool);
};
static inline int
-wsbmBufStorageInit(struct _WsbmBufStorage *storage, struct _WsbmBufferPool *pool)
+wsbmBufStorageInit(struct _WsbmBufStorage *storage,
+ struct _WsbmBufferPool *pool)
{
int ret = WSBM_MUTEX_INIT(&storage->mutex);
+
if (ret)
return -ENOMEM;
storage->pool = pool;
@@ -126,32 +128,34 @@ wsbmBufStorageUnref(struct _WsbmBufStorage **pStorage)
* Kernel buffer objects. Size in multiples of page size. Page size aligned.
*/
-extern struct _WsbmBufferPool *wsbmTTMPoolInit(int fd, unsigned int devOffset);
+extern struct _WsbmBufferPool *wsbmTTMPoolInit(int fd,
+ unsigned int devOffset);
extern struct _WsbmBufferPool *wsbmMallocPoolInit(void);
struct _WsbmSlabCache;
-extern struct _WsbmBufferPool * wsbmSlabPoolInit(int fd, uint32_t devOffset,
- uint32_t placement,
- uint32_t validMask,
- uint32_t smallestSize,
- uint32_t numSizes,
- uint32_t desiredNumBuffers,
- uint32_t maxSlabSize,
- uint32_t pageAlignment,
- struct _WsbmSlabCache *cache);
-extern struct _WsbmSlabCache *
-wsbmSlabCacheInit(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec);
+extern struct _WsbmBufferPool *wsbmSlabPoolInit(int fd, uint32_t devOffset,
+ uint32_t placement,
+ uint32_t validMask,
+ uint32_t smallestSize,
+ uint32_t numSizes,
+ uint32_t desiredNumBuffers,
+ uint32_t maxSlabSize,
+ uint32_t pageAlignment,
+ struct _WsbmSlabCache *cache);
+extern struct _WsbmSlabCache *wsbmSlabCacheInit(uint32_t checkIntervalMsec,
+ uint32_t slabTimeoutMsec);
extern void wsbmSlabCacheFinish(struct _WsbmSlabCache *cache);
-extern struct _WsbmBufferPool *
-wsbmUserPoolInit(void *vramAddr,
- unsigned long vramStart, unsigned long vramSize,
- void *agpAddr, unsigned long agpStart,
- unsigned long agpSize,
- uint32_t (*fenceTypes) (uint64_t set_flags));
+extern struct _WsbmBufferPool *wsbmUserPoolInit(void *vramAddr,
+ unsigned long vramStart,
+ unsigned long vramSize,
+ void *agpAddr,
+ unsigned long agpStart,
+ unsigned long agpSize,
+ uint32_t(*fenceTypes)
+ (uint64_t set_flags));
extern void wsbmUserPoolClean(struct _WsbmBufferPool *pool,
- int cleanVram,
- int cleanAgp);
+ int cleanVram, int cleanAgp);
#endif
diff --git a/src/wsbm_slabpool.c b/src/wsbm_slabpool.c
index 4dbc507..b007d1e 100644
--- a/src/wsbm_slabpool.c
+++ b/src/wsbm_slabpool.c
@@ -67,7 +67,8 @@ static int fencesignaled = 0;
struct _WsbmSlab;
-struct _WsbmSlabBuffer {
+struct _WsbmSlabBuffer
+{
struct _WsbmKernelBuf kBuf;
struct _WsbmBufStorage storage;
struct _WsbmCond event;
@@ -95,12 +96,13 @@ struct _WsbmSlabBuffer {
struct _WsbmFenceObject *fence;
uint32_t fenceType;
- struct _WsbmAtomic writers; /* (Only upping) */
+ struct _WsbmAtomic writers; /* (Only upping) */
int unFenced;
};
struct _WsbmSlabPool;
-struct _WsbmSlabKernelBO {
+struct _WsbmSlabKernelBO
+{
/*
* Constant at creation
@@ -123,7 +125,8 @@ struct _WsbmSlabKernelBO {
struct timeval timeFreed;
};
-struct _WsbmSlab{
+struct _WsbmSlab
+{
struct _WsbmListHead head;
struct _WsbmListHead freeBuffers;
uint32_t numBuffers;
@@ -133,8 +136,8 @@ struct _WsbmSlab{
struct _WsbmSlabKernelBO *kbo;
};
-
-struct _WsbmSlabSizeHeader {
+struct _WsbmSlabSizeHeader
+{
/*
* Constant at creation.
*/
@@ -152,7 +155,8 @@ struct _WsbmSlabSizeHeader {
struct _WsbmMutex mutex;
};
-struct _WsbmSlabCache {
+struct _WsbmSlabCache
+{
struct timeval slabTimeout;
struct timeval checkInterval;
struct timeval nextCheck;
@@ -162,8 +166,8 @@ struct _WsbmSlabCache {
struct _WsbmMutex mutex;
};
-
-struct _WsbmSlabPool {
+struct _WsbmSlabPool
+{
struct _WsbmBufferPool pool;
/*
@@ -173,7 +177,7 @@ struct _WsbmSlabPool {
unsigned int devOffset;
struct _WsbmSlabCache *cache;
- uint32_t proposedPlacement;
+ uint32_t proposedPlacement;
uint32_t validMask;
uint32_t *bucketSizes;
uint32_t numBuckets;
@@ -187,7 +191,7 @@ struct _WsbmSlabPool {
static inline struct _WsbmSlabPool *
slabPoolFromPool(struct _WsbmBufferPool *pool)
{
- return containerOf(pool, struct _WsbmSlabPool , pool);
+ return containerOf(pool, struct _WsbmSlabPool, pool);
}
static inline struct _WsbmSlabPool *
@@ -202,13 +206,11 @@ slabBuffer(struct _WsbmBufStorage *buf)
return containerOf(buf, struct _WsbmSlabBuffer, storage);
}
-
/*
* FIXME: Perhaps arrange timeout slabs in size buckets for fast
* retreival??
*/
-
static inline int
wsbmTimeAfterEq(struct timeval *arg1, struct timeval *arg2)
{
@@ -226,7 +228,7 @@ wsbmTimeAdd(struct timeval *arg, struct timeval *add)
arg->tv_usec += add->tv_usec;
sec = arg->tv_usec / 1000000;
arg->tv_sec += sec;
- arg->tv_usec -= sec*1000000;
+ arg->tv_usec -= sec * 1000000;
}
static void
@@ -240,16 +242,15 @@ wsbmFreeKernelBO(struct _WsbmSlabKernelBO *kbo)
slabPool = kbo->slabPool;
arg.handle = kbo->kBuf.handle;
- (void) munmap(kbo->virtual, kbo->actualSize);
- (void) drmCommandWrite(slabPool->pool.fd, slabPool->devOffset + TTM_PL_UNREF,
- &arg, sizeof(arg));
+ (void)munmap(kbo->virtual, kbo->actualSize);
+ (void)drmCommandWrite(slabPool->pool.fd,
+ slabPool->devOffset + TTM_PL_UNREF, &arg,
+ sizeof(arg));
free(kbo);
}
-
static void
-wsbmFreeTimeoutKBOsLocked(struct _WsbmSlabCache *cache,
- struct timeval *time)
+wsbmFreeTimeoutKBOsLocked(struct _WsbmSlabCache *cache, struct timeval *time)
{
struct _WsbmListHead *list, *next;
struct _WsbmSlabKernelBO *kbo;
@@ -272,7 +273,6 @@ wsbmFreeTimeoutKBOsLocked(struct _WsbmSlabCache *cache,
wsbmTimeAdd(&cache->nextCheck, &cache->checkInterval);
}
-
/*
* Add a _SlabKernelBO to the free slab manager.
* This means that it is available for reuse, but if it's not
@@ -307,10 +307,8 @@ wsbmSetKernelBOFree(struct _WsbmSlabCache *cache,
* Get a _SlabKernelBO for us to use as storage for a slab.
*/
-
static struct _WsbmSlabKernelBO *
wsbmAllocKernelBO(struct _WsbmSlabSizeHeader *header)
-
{
struct _WsbmSlabPool *slabPool = header->slabPool;
struct _WsbmSlabCache *cache = slabPool->cache;
@@ -325,7 +323,6 @@ wsbmAllocKernelBO(struct _WsbmSlabSizeHeader *header)
* to efficiently reuse slabs.
*/
-
size = (size <= slabPool->maxSlabSize) ? size : slabPool->maxSlabSize;
if (size < header->bufSize)
size = header->bufSize;
@@ -340,10 +337,11 @@ wsbmAllocKernelBO(struct _WsbmSlabSizeHeader *header)
WSBMLISTFOREACH(list, head) {
kboTmp = WSBMLISTENTRY(list, struct _WsbmSlabKernelBO, head);
+
if ((kboTmp->actualSize == size) &&
(slabPool->pageAlignment == 0 ||
(kboTmp->pageAlignment % slabPool->pageAlignment) == 0)) {
-
+
if (!kbo)
kbo = kboTmp;
@@ -352,7 +350,7 @@ wsbmAllocKernelBO(struct _WsbmSlabSizeHeader *header)
}
}
-
+
if (kbo) {
WSBMLISTDELINIT(&kbo->head);
WSBMLISTDELINIT(&kbo->timeoutHead);
@@ -361,14 +359,15 @@ wsbmAllocKernelBO(struct _WsbmSlabSizeHeader *header)
WSBM_MUTEX_UNLOCK(&cache->mutex);
if (kbo) {
- uint32_t new_mask = kbo->proposedPlacement ^ slabPool->proposedPlacement;
+ uint32_t new_mask =
+ kbo->proposedPlacement ^ slabPool->proposedPlacement;
ret = 0;
if (new_mask) {
union ttm_pl_setstatus_arg arg;
struct ttm_pl_setstatus_req *req = &arg.req;
struct ttm_pl_rep *rep = &arg.rep;
-
+
req->handle = kbo->kBuf.handle;
req->set_placement = slabPool->proposedPlacement & new_mask;
req->clr_placement = ~slabPool->proposedPlacement & new_mask;
@@ -381,7 +380,7 @@ wsbmAllocKernelBO(struct _WsbmSlabSizeHeader *header)
}
kbo->proposedPlacement = slabPool->proposedPlacement;
}
-
+
if (ret == 0)
return kbo;
@@ -400,27 +399,27 @@ wsbmAllocKernelBO(struct _WsbmSlabSizeHeader *header)
kbo->slabPool = slabPool;
WSBMINITLISTHEAD(&kbo->head);
WSBMINITLISTHEAD(&kbo->timeoutHead);
-
+
arg.req.size = size;
arg.req.placement = slabPool->proposedPlacement;
arg.req.page_alignment = slabPool->pageAlignment;
- DRMRESTARTCOMMANDWRITEREAD(slabPool->pool.fd,
+ DRMRESTARTCOMMANDWRITEREAD(slabPool->pool.fd,
slabPool->devOffset + TTM_PL_CREATE,
arg, ret);
if (ret)
goto out_err0;
-
+
kbo->kBuf.gpuOffset = arg.rep.gpu_offset;
kbo->kBuf.placement = arg.rep.placement;
kbo->kBuf.handle = arg.rep.handle;
-
+
kbo->actualSize = arg.rep.bo_size;
kbo->mapHandle = arg.rep.map_handle;
kbo->proposedPlacement = slabPool->proposedPlacement;
}
- kbo->virtual = mmap(0, kbo->actualSize,
+ kbo->virtual = mmap(0, kbo->actualSize,
PROT_READ | PROT_WRITE, MAP_SHARED,
slabPool->pool.fd, kbo->mapHandle);
@@ -433,19 +432,17 @@ wsbmAllocKernelBO(struct _WsbmSlabSizeHeader *header)
out_err1:
{
- struct ttm_pl_reference_req arg =
- {.handle = kbo->kBuf.handle};
+ struct ttm_pl_reference_req arg = {.handle = kbo->kBuf.handle };
- (void) drmCommandWrite(slabPool->pool.fd,
- slabPool->devOffset + TTM_PL_UNREF,
- &arg, sizeof(arg));
+ (void)drmCommandWrite(slabPool->pool.fd,
+ slabPool->devOffset + TTM_PL_UNREF,
+ &arg, sizeof(arg));
}
out_err0:
free(kbo);
return NULL;
}
-
static int
wsbmAllocSlab(struct _WsbmSlabSizeHeader *header)
{
@@ -480,13 +477,13 @@ wsbmAllocSlab(struct _WsbmSlabSizeHeader *header)
slab->header = header;
sBuf = slab->buffers;
- for (i=0; i < numBuffers; ++i) {
+ for (i = 0; i < numBuffers; ++i) {
ret = wsbmBufStorageInit(&sBuf->storage, &header->slabPool->pool);
if (ret)
goto out_err2;
sBuf->parent = slab;
- sBuf->start = i* header->bufSize;
- sBuf->virtual = (void *) ((uint8_t *) slab->kbo->virtual +
+ sBuf->start = i * header->bufSize;
+ sBuf->virtual = (void *)((uint8_t *) slab->kbo->virtual +
sBuf->start);
wsbmAtomicSet(&sBuf->writers, 0);
sBuf->isSlabBuffer = 1;
@@ -502,7 +499,7 @@ wsbmAllocSlab(struct _WsbmSlabSizeHeader *header)
out_err2:
sBuf = slab->buffers;
- for (i=0; i < numBuffers; ++i) {
+ for (i = 0; i < numBuffers; ++i) {
if (sBuf->parent == slab) {
WSBM_COND_FREE(&sBuf->event);
wsbmBufStorageTakedown(&sBuf->storage);
@@ -551,12 +548,13 @@ wsbmSlabFreeBufferLocked(struct _WsbmSlabBuffer *buf)
WSBMLISTFOREACHSAFE(list, next, &header->freeSlabs) {
int i;
struct _WsbmSlabBuffer *sBuf;
-
+
slab = WSBMLISTENTRY(list, struct _WsbmSlab, head);
+
WSBMLISTDELINIT(list);
sBuf = slab->buffers;
- for (i=0; i < slab->numBuffers; ++i) {
+ for (i = 0; i < slab->numBuffers; ++i) {
if (sBuf->parent == slab) {
WSBM_COND_FREE(&sBuf->event);
wsbmBufStorageTakedown(&sBuf->storage);
@@ -581,70 +579,70 @@ wsbmSlabCheckFreeLocked(struct _WsbmSlabSizeHeader *header, int wait)
int i;
int ret;
- /*
- * Rerun the freeing test if the youngest tested buffer
- * was signaled, since there might be more idle buffers
- * in the delay list.
- */
-
- while (firstWasSignaled) {
- firstWasSignaled = 0;
- signaled = 0;
- first = header->delayedBuffers.next;
-
- /* Only examine the oldest 1/3 of delayed buffers:
- */
- if (header->numDelayed > 3) {
- for (i = 0; i < header->numDelayed; i += 3) {
- first = first->next;
- }
- }
-
- /*
- * No need to take the buffer mutex for each buffer we loop
- * through since we're currently the only user.
- */
-
-
- WSBMLISTFOREACHPREVSAFE(list, prev, first->next) {
-
- if (list == &header->delayedBuffers)
- break;
-
- sBuf = WSBMLISTENTRY(list, struct _WsbmSlabBuffer, head);
- slab = sBuf->parent;
-
- if (!signaled) {
- if (wait) {
- ret = wsbmFenceFinish(sBuf->fence, sBuf->fenceType, 0);
- if (ret)
- break;
- signaled = 1;
- wait = 0;
- } else {
- signaled = wsbmFenceSignaled(sBuf->fence, sBuf->fenceType);
+ /*
+ * Rerun the freeing test if the youngest tested buffer
+ * was signaled, since there might be more idle buffers
+ * in the delay list.
+ */
+
+ while (firstWasSignaled) {
+ firstWasSignaled = 0;
+ signaled = 0;
+ first = header->delayedBuffers.next;
+
+ /* Only examine the oldest 1/3 of delayed buffers:
+ */
+ if (header->numDelayed > 3) {
+ for (i = 0; i < header->numDelayed; i += 3) {
+ first = first->next;
+ }
+ }
+
+ /*
+ * No need to take the buffer mutex for each buffer we loop
+ * through since we're currently the only user.
+ */
+
+ WSBMLISTFOREACHPREVSAFE(list, prev, first->next) {
+
+ if (list == &header->delayedBuffers)
+ break;
+
+ sBuf = WSBMLISTENTRY(list, struct _WsbmSlabBuffer, head);
+
+ slab = sBuf->parent;
+
+ if (!signaled) {
+ if (wait) {
+ ret = wsbmFenceFinish(sBuf->fence, sBuf->fenceType, 0);
+ if (ret)
+ break;
+ signaled = 1;
+ wait = 0;
+ } else {
+ signaled =
+ wsbmFenceSignaled(sBuf->fence, sBuf->fenceType);
#ifdef DEBUG_FENCESIGNALED
- fencesignaled++;
+ fencesignaled++;
#endif
- }
- if (signaled) {
- if (list == first)
- firstWasSignaled = 1;
- wsbmFenceUnreference(&sBuf->fence);
- header->numDelayed--;
- wsbmSlabFreeBufferLocked(sBuf);
- } else
- break;
- } else if (wsbmFenceSignaledCached(sBuf->fence, sBuf->fenceType)) {
- wsbmFenceUnreference(&sBuf->fence);
- header->numDelayed--;
- wsbmSlabFreeBufferLocked(sBuf);
- }
- }
- }
+ }
+ if (signaled) {
+ if (list == first)
+ firstWasSignaled = 1;
+ wsbmFenceUnreference(&sBuf->fence);
+ header->numDelayed--;
+ wsbmSlabFreeBufferLocked(sBuf);
+ } else
+ break;
+ } else if (wsbmFenceSignaledCached(sBuf->fence, sBuf->fenceType)) {
+ wsbmFenceUnreference(&sBuf->fence);
+ header->numDelayed--;
+ wsbmSlabFreeBufferLocked(sBuf);
+ }
+ }
+ }
}
-
static struct _WsbmSlabBuffer *
wsbmSlabAllocBuffer(struct _WsbmSlabSizeHeader *header)
{
@@ -654,16 +652,16 @@ wsbmSlabAllocBuffer(struct _WsbmSlabSizeHeader *header)
int count = WSBM_SLABPOOL_ALLOC_RETRIES;
WSBM_MUTEX_LOCK(&header->mutex);
- while(header->slabs.next == &header->slabs && count > 0) {
- wsbmSlabCheckFreeLocked(header, 0);
+ while (header->slabs.next == &header->slabs && count > 0) {
+ wsbmSlabCheckFreeLocked(header, 0);
if (header->slabs.next != &header->slabs)
- break;
+ break;
WSBM_MUTEX_UNLOCK(&header->mutex);
if (count != WSBM_SLABPOOL_ALLOC_RETRIES)
usleep(1000);
WSBM_MUTEX_LOCK(&header->mutex);
- (void) wsbmAllocSlab(header);
+ (void)wsbmAllocSlab(header);
count--;
}
@@ -703,7 +701,7 @@ pool_create(struct _WsbmBufferPool *pool, unsigned long size,
*/
header = slabPool->headers;
- for (i=0; i<slabPool->numBuckets; ++i) {
+ for (i = 0; i < slabPool->numBuckets; ++i) {
if (header->bufSize >= size)
break;
header++;
@@ -714,7 +712,6 @@ pool_create(struct _WsbmBufferPool *pool, unsigned long size,
return ((sBuf) ? &sBuf->storage : NULL);
}
-
/*
* Fall back to allocate a buffer object directly from DRM.
* and wrap it in a wsbmBO structure.
@@ -726,9 +723,11 @@ pool_create(struct _WsbmBufferPool *pool, unsigned long size,
return NULL;
if (alignment) {
- if ((alignment < slabPool->pageSize) && (slabPool->pageSize % alignment))
+ if ((alignment < slabPool->pageSize)
+ && (slabPool->pageSize % alignment))
goto out_err0;
- if ((alignment > slabPool->pageSize) && (alignment % slabPool->pageSize))
+ if ((alignment > slabPool->pageSize)
+ && (alignment % slabPool->pageSize))
goto out_err0;
}
@@ -746,8 +745,8 @@ pool_create(struct _WsbmBufferPool *pool, unsigned long size,
arg.req.size = size;
arg.req.placement = placement;
arg.req.page_alignment = alignment / slabPool->pageSize;
-
- DRMRESTARTCOMMANDWRITEREAD(pool->fd,
+
+ DRMRESTARTCOMMANDWRITEREAD(pool->fd,
slabPool->devOffset + TTM_PL_CREATE,
arg, ret);
@@ -767,16 +766,16 @@ pool_create(struct _WsbmBufferPool *pool, unsigned long size,
goto out_err3;
}
- wsbmAtomicSet(&sBuf->writers, 0);
+ wsbmAtomicSet(&sBuf->writers, 0);
return &sBuf->storage;
out_err3:
{
struct ttm_pl_reference_req arg;
arg.handle = sBuf->kBuf.handle;
- (void) drmCommandWriteRead(pool->fd,
- slabPool->devOffset + TTM_PL_UNREF,
- &arg, sizeof(arg));
+ (void)drmCommandWriteRead(pool->fd,
+ slabPool->devOffset + TTM_PL_UNREF,
+ &arg, sizeof(arg));
}
out_err2:
WSBM_COND_FREE(&sBuf->event);
@@ -798,19 +797,19 @@ pool_destroy(struct _WsbmBufStorage **p_buf)
*p_buf = NULL;
if (!sBuf->isSlabBuffer) {
- struct _WsbmSlabPool *slabPool = slabPoolFromBuf(sBuf);
+ struct _WsbmSlabPool *slabPool = slabPoolFromBuf(sBuf);
struct ttm_pl_reference_req arg;
if (sBuf->virtual != NULL) {
- (void) munmap(sBuf->virtual, sBuf->requestedSize);
+ (void)munmap(sBuf->virtual, sBuf->requestedSize);
sBuf->virtual = NULL;
}
arg.handle = sBuf->kBuf.handle;
- (void) drmCommandWrite(slabPool->pool.fd,
- slabPool->devOffset + TTM_PL_UNREF,
- &arg, sizeof(arg));
-
+ (void)drmCommandWrite(slabPool->pool.fd,
+ slabPool->devOffset + TTM_PL_UNREF,
+ &arg, sizeof(arg));
+
WSBM_COND_FREE(&sBuf->event);
wsbmBufStorageTakedown(&sBuf->storage);
free(sBuf);
@@ -841,24 +840,23 @@ pool_destroy(struct _WsbmBufStorage **p_buf)
WSBM_MUTEX_UNLOCK(&header->mutex);
}
-
static void
waitIdleLocked(struct _WsbmSlabBuffer *sBuf, int lazy)
{
struct _WsbmBufStorage *storage = &sBuf->storage;
- while(sBuf->unFenced || sBuf->fence != NULL) {
+ while (sBuf->unFenced || sBuf->fence != NULL) {
if (sBuf->unFenced)
WSBM_COND_WAIT(&sBuf->event, &storage->mutex);
if (sBuf->fence != NULL) {
if (!wsbmFenceSignaled(sBuf->fence, sBuf->fenceType)) {
- struct _WsbmFenceObject *fence =
+ struct _WsbmFenceObject *fence =
wsbmFenceReference(sBuf->fence);
WSBM_MUTEX_UNLOCK(&storage->mutex);
- (void) wsbmFenceFinish(fence, sBuf->fenceType, lazy);
+ (void)wsbmFenceFinish(fence, sBuf->fenceType, lazy);
WSBM_MUTEX_LOCK(&storage->mutex);
if (sBuf->fence == fence)
wsbmFenceUnreference(&sBuf->fence);
@@ -870,7 +868,7 @@ waitIdleLocked(struct _WsbmSlabBuffer *sBuf, int lazy)
}
}
}
-
+
static int
pool_waitIdle(struct _WsbmBufStorage *buf, int lazy)
{
@@ -879,7 +877,7 @@ pool_waitIdle(struct _WsbmBufStorage *buf, int lazy)
WSBM_MUTEX_LOCK(&buf->mutex);
waitIdleLocked(sBuf, lazy);
WSBM_MUTEX_UNLOCK(&buf->mutex);
-
+
return 0;
}
@@ -898,7 +896,7 @@ pool_releaseFromCpu(struct _WsbmBufStorage *buf, unsigned mode)
{
struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
- if (wsbmAtomicDecZero(&sBuf->writers))
+ if (wsbmAtomicDecZero(&sBuf->writers))
WSBM_COND_BROADCAST(&sBuf->event);
}
@@ -918,12 +916,12 @@ pool_syncForCpu(struct _WsbmBufStorage *buf, unsigned mode)
}
if (sBuf->isSlabBuffer)
- signaled = (sBuf->fence == NULL) ||
+ signaled = (sBuf->fence == NULL) ||
wsbmFenceSignaledCached(sBuf->fence, sBuf->fenceType);
else
- signaled = (sBuf->fence == NULL) ||
+ signaled = (sBuf->fence == NULL) ||
wsbmFenceSignaled(sBuf->fence, sBuf->fenceType);
-
+
ret = 0;
if (signaled) {
wsbmFenceUnreference(&sBuf->fence);
@@ -942,7 +940,7 @@ pool_syncForCpu(struct _WsbmBufStorage *buf, unsigned mode)
static void
pool_unmap(struct _WsbmBufStorage *buf)
{
- ;
+ ;
}
static unsigned long
@@ -957,6 +955,7 @@ static unsigned long
pool_size(struct _WsbmBufStorage *buf)
{
struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
+
if (!sBuf->isSlabBuffer)
return sBuf->requestedSize;
@@ -971,14 +970,12 @@ pool_kernel(struct _WsbmBufStorage *buf)
return (sBuf->isSlabBuffer) ? &sBuf->parent->kbo->kBuf : &sBuf->kBuf;
}
-
static unsigned long
pool_offset(struct _WsbmBufStorage *buf)
{
return pool_kernel(buf)->gpuOffset + pool_poolOffset(buf);
}
-
static void
pool_fence(struct _WsbmBufStorage *buf, struct _WsbmFenceObject *fence)
{
@@ -998,14 +995,13 @@ pool_fence(struct _WsbmBufStorage *buf, struct _WsbmFenceObject *fence)
}
static int
-pool_validate(struct _WsbmBufStorage *buf,
- uint64_t set_flags,
- uint64_t clr_flags)
+pool_validate(struct _WsbmBufStorage *buf,
+ uint64_t set_flags, uint64_t clr_flags)
{
struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
-
+
WSBM_MUTEX_LOCK(&buf->mutex);
- while(wsbmAtomicRead(&sBuf->writers) != 0) {
+ while (wsbmAtomicRead(&sBuf->writers) != 0) {
WSBM_COND_WAIT(&sBuf->event, &buf->mutex);
}
@@ -1014,7 +1010,7 @@ pool_validate(struct _WsbmBufStorage *buf,
return 0;
}
-static void
+static void
pool_unvalidate(struct _WsbmBufStorage *buf)
{
struct _WsbmSlabBuffer *sBuf = slabBuffer(buf);
@@ -1038,13 +1034,13 @@ wsbmSlabCacheInit(uint32_t checkIntervalMsec, uint32_t slabTimeoutMsec)
WSBM_MUTEX_INIT(&tmp->mutex);
WSBM_MUTEX_LOCK(&tmp->mutex);
- tmp->slabTimeout.tv_usec = slabTimeoutMsec*1000;
+ tmp->slabTimeout.tv_usec = slabTimeoutMsec * 1000;
tmp->slabTimeout.tv_sec = tmp->slabTimeout.tv_usec / 1000000;
- tmp->slabTimeout.tv_usec -= tmp->slabTimeout.tv_sec*1000000;
+ tmp->slabTimeout.tv_usec -= tmp->slabTimeout.tv_sec * 1000000;
- tmp->checkInterval.tv_usec = checkIntervalMsec*1000;
+ tmp->checkInterval.tv_usec = checkIntervalMsec * 1000;
tmp->checkInterval.tv_sec = tmp->checkInterval.tv_usec / 1000000;
- tmp->checkInterval.tv_usec -= tmp->checkInterval.tv_sec*1000000;
+ tmp->checkInterval.tv_usec -= tmp->checkInterval.tv_sec * 1000000;
gettimeofday(&tmp->nextCheck, NULL);
wsbmTimeAdd(&tmp->nextCheck, &tmp->checkInterval);
@@ -1077,7 +1073,7 @@ wsbmSlabCacheFinish(struct _WsbmSlabCache *cache)
static void
wsbmInitSizeHeader(struct _WsbmSlabPool *slabPool, uint32_t size,
- struct _WsbmSlabSizeHeader *header)
+ struct _WsbmSlabSizeHeader *header)
{
WSBM_MUTEX_INIT(&header->mutex);
WSBM_MUTEX_LOCK(&header->mutex);
@@ -1102,8 +1098,9 @@ wsbmFinishSizeHeader(struct _WsbmSlabSizeHeader *header)
WSBM_MUTEX_LOCK(&header->mutex);
WSBMLISTFOREACHSAFE(list, next, &header->delayedBuffers) {
sBuf = WSBMLISTENTRY(list, struct _WsbmSlabBuffer, head);
+
if (sBuf->fence) {
- (void) wsbmFenceFinish(sBuf->fence, sBuf->fenceType, 0);
+ (void)wsbmFenceFinish(sBuf->fence, sBuf->fenceType, 0);
wsbmFenceUnreference(&sBuf->fence);
}
header->numDelayed--;
@@ -1113,24 +1110,23 @@ wsbmFinishSizeHeader(struct _WsbmSlabSizeHeader *header)
WSBM_MUTEX_FREE(&header->mutex);
}
-
static void
pool_takedown(struct _WsbmBufferPool *pool)
{
struct _WsbmSlabPool *slabPool = slabPoolFromPool(pool);
int i;
- for (i=0; i<slabPool->numBuckets; ++i) {
- wsbmFinishSizeHeader(&slabPool->headers[i]);
- }
+ for (i = 0; i < slabPool->numBuckets; ++i) {
+ wsbmFinishSizeHeader(&slabPool->headers[i]);
+ }
- free(slabPool->headers);
- free(slabPool->bucketSizes);
- free(slabPool);
+ free(slabPool->headers);
+ free(slabPool->bucketSizes);
+ free(slabPool);
}
struct _WsbmBufferPool *
-wsbmSlabPoolInit(int fd,
+wsbmSlabPoolInit(int fd,
uint32_t devOffset,
uint32_t placement,
uint32_t validMask,
@@ -1138,8 +1134,7 @@ wsbmSlabPoolInit(int fd,
uint32_t numSizes,
uint32_t desiredNumBuffers,
uint32_t maxSlabSize,
- uint32_t pageAlignment,
- struct _WsbmSlabCache *cache)
+ uint32_t pageAlignment, struct _WsbmSlabCache *cache)
{
struct _WsbmBufferPool *pool;
struct _WsbmSlabPool *slabPool;
@@ -1169,7 +1164,7 @@ wsbmSlabPoolInit(int fd,
slabPool->maxSlabSize = maxSlabSize;
slabPool->desiredNumBuffers = desiredNumBuffers;
- for (i=0; i<slabPool->numBuckets; ++i) {
+ for (i = 0; i < slabPool->numBuckets; ++i) {
slabPool->bucketSizes[i] = (smallestSize << i);
wsbmInitSizeHeader(slabPool, slabPool->bucketSizes[i],
&slabPool->headers[i]);
diff --git a/src/wsbm_ttmpool.c b/src/wsbm_ttmpool.c
index ff132e0..5867428 100644
--- a/src/wsbm_ttmpool.c
+++ b/src/wsbm_ttmpool.c
@@ -217,16 +217,16 @@ pool_destroy(struct _WsbmBufStorage **buf)
dBuf->virtual = NULL;
}
arg.handle = dBuf->kBuf.handle;
- (void) drmCommandWrite(dBuf->buf.pool->fd,
- ttmPool->devOffset + TTM_PL_UNREF,
- &arg, sizeof(arg));
+ (void)drmCommandWrite(dBuf->buf.pool->fd,
+ ttmPool->devOffset + TTM_PL_UNREF,
+ &arg, sizeof(arg));
WSBM_COND_FREE(&dBuf->event);
wsbmBufStorageTakedown(&dBuf->buf);
free(dBuf);
}
-static int
+static int
syncforcpu_locked(struct _WsbmBufStorage *buf, unsigned mode)
{
uint32_t kmode = 0;
@@ -247,7 +247,7 @@ syncforcpu_locked(struct _WsbmBufStorage *buf, unsigned mode)
if ((mode & WSBM_SYNCCPU_WRITE) && (++dBuf->writers == 1))
kmode |= TTM_PL_SYNCCPU_MODE_WRITE;
-
+
if (kmode) {
struct ttm_pl_synccpu_arg arg;
@@ -256,7 +256,6 @@ syncforcpu_locked(struct _WsbmBufStorage *buf, unsigned mode)
dBuf->syncInProgress = 1;
-
/*
* This might be a lengthy wait, so
* release the mutex.
@@ -284,7 +283,7 @@ syncforcpu_locked(struct _WsbmBufStorage *buf, unsigned mode)
return ret;
}
-static int
+static int
releasefromcpu_locked(struct _WsbmBufStorage *buf, unsigned mode)
{
uint32_t kmode = 0;
@@ -300,7 +299,7 @@ releasefromcpu_locked(struct _WsbmBufStorage *buf, unsigned mode)
if ((mode & WSBM_SYNCCPU_WRITE) && (--dBuf->writers == 0))
kmode |= TTM_PL_SYNCCPU_MODE_WRITE;
-
+
if (kmode) {
struct ttm_pl_synccpu_arg arg;
@@ -311,7 +310,7 @@ releasefromcpu_locked(struct _WsbmBufStorage *buf, unsigned mode)
DRMRESTARTCOMMANDWRITE(dBuf->buf.pool->fd,
ttmPool->devOffset + TTM_PL_SYNCCPU, arg, ret);
- }
+ }
return ret;
}
@@ -320,22 +319,21 @@ static int
pool_syncforcpu(struct _WsbmBufStorage *buf, unsigned mode)
{
int ret;
-
+
WSBM_MUTEX_LOCK(&buf->mutex);
ret = syncforcpu_locked(buf, mode);
WSBM_MUTEX_UNLOCK(&buf->mutex);
return ret;
}
-
+
static void
-pool_releasefromcpu(struct _WsbmBufStorage *buf, unsigned mode)
+pool_releasefromcpu(struct _WsbmBufStorage *buf, unsigned mode)
{
WSBM_MUTEX_LOCK(&buf->mutex);
- (void) releasefromcpu_locked(buf, mode);
+ (void)releasefromcpu_locked(buf, mode);
WSBM_MUTEX_UNLOCK(&buf->mutex);
}
-
static int
pool_map(struct _WsbmBufStorage *buf, unsigned mode, void **virtual)
{
@@ -343,7 +341,6 @@ pool_map(struct _WsbmBufStorage *buf, unsigned mode, void **virtual)
void *virt;
int ret = 0;
-
WSBM_MUTEX_LOCK(&buf->mutex);
/*
@@ -366,7 +363,7 @@ pool_map(struct _WsbmBufStorage *buf, unsigned mode, void **virtual)
out_unlock:
WSBM_MUTEX_UNLOCK(&buf->mutex);
-
+
return ret;
}
diff --git a/src/wsbm_userpool.c b/src/wsbm_userpool.c
index e91ba98..f7833b7 100644
--- a/src/wsbm_userpool.c
+++ b/src/wsbm_userpool.c
@@ -102,7 +102,7 @@ struct _WsbmUserPool
struct _WsbmListHead agpLRU;
struct _WsbmMM vramMM;
struct _WsbmMM agpMM;
- uint32_t (*fenceTypes) (uint64_t);
+ uint32_t(*fenceTypes) (uint64_t);
};
static inline struct _WsbmUserPool *
@@ -279,9 +279,9 @@ pool_validate(struct _WsbmBufStorage *buf, uint64_t set_flags,
WSBM_MUTEX_LOCK(&buf->mutex);
- while(wsbmAtomicRead(&vBuf->writers) != 0)
+ while (wsbmAtomicRead(&vBuf->writers) != 0)
WSBM_COND_WAIT(&vBuf->event, &buf->mutex);
-
+
vBuf->unFenced = 1;
WSBM_MUTEX_LOCK(&p->mutex);
@@ -467,7 +467,7 @@ pool_releaseFromCpu(struct _WsbmBufStorage *buf, unsigned mode)
{
struct _WsbmUserBuffer *vBuf = userBuf(buf);
- if (wsbmAtomicDecZero(&vBuf->writers))
+ if (wsbmAtomicDecZero(&vBuf->writers))
WSBM_COND_BROADCAST(&vBuf->event);
}
@@ -487,7 +487,7 @@ pool_syncForCpu(struct _WsbmBufStorage *buf, unsigned mode)
}
ret = 0;
- if ((vBuf->fence == NULL) ||
+ if ((vBuf->fence == NULL) ||
wsbmFenceSignaled(vBuf->fence, vBuf->kBuf.fence_type_mask)) {
wsbmFenceUnreference(&vBuf->fence);
wsbmAtomicInc(&vBuf->writers);
@@ -503,7 +503,6 @@ pool_syncForCpu(struct _WsbmBufStorage *buf, unsigned mode)
return ret;
}
-
static unsigned long
pool_offset(struct _WsbmBufStorage *buf)
{
@@ -536,7 +535,7 @@ pool_fence(struct _WsbmBufStorage *buf, struct _WsbmFenceObject *fence)
vBuf->fence = wsbmFenceReference(fence);
vBuf->unFenced = 0;
vBuf->kBuf.fence_type_mask = vBuf->newFenceType;
-
+
WSBM_COND_BROADCAST(&vBuf->event);
WSBM_MUTEX_LOCK(&p->mutex);
if (vBuf->kBuf.placement & WSBM_PL_FLAG_VRAM)
@@ -609,12 +608,10 @@ pool_takedown(struct _WsbmBufferPool *pool)
free(p);
}
-void
-wsbmUserPoolClean(struct _WsbmBufferPool *pool,
- int cleanVram,
- int cleanAgp)
+void
+wsbmUserPoolClean(struct _WsbmBufferPool *pool, int cleanVram, int cleanAgp)
{
- struct _WsbmUserPool *p = containerOf(pool, struct _WsbmUserPool, pool);
+ struct _WsbmUserPool *p = containerOf(pool, struct _WsbmUserPool, pool);
WSBM_MUTEX_LOCK(&p->mutex);
if (cleanVram)
@@ -627,9 +624,9 @@ wsbmUserPoolClean(struct _WsbmBufferPool *pool,
struct _WsbmBufferPool *
wsbmUserPoolInit(void *vramAddr,
unsigned long vramStart, unsigned long vramSize,
- void *agpAddr, unsigned long agpStart,
+ void *agpAddr, unsigned long agpStart,
unsigned long agpSize,
- uint32_t (*fenceTypes) (uint64_t set_flags))
+ uint32_t(*fenceTypes) (uint64_t set_flags))
{
struct _WsbmBufferPool *pool;
struct _WsbmUserPool *uPool;
@@ -659,7 +656,7 @@ wsbmUserPoolInit(void *vramAddr,
uPool->agpMap = (unsigned long)agpAddr;
uPool->vramOffset = vramStart;
uPool->vramMap = (unsigned long)vramAddr;
- uPool->fenceTypes = fenceTypes;
+ uPool->fenceTypes = fenceTypes;
pool = &uPool->pool;
pool->map = &pool_map;