diff options
-rw-r--r-- | drivers/gpu/drm/radeon/radeon.h | 84 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_device.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/radeon/radeon_ring.c | 22 |
3 files changed, 54 insertions, 54 deletions
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 73e05cb85eca..1668ec1ee770 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -157,6 +157,47 @@ bool radeon_get_bios(struct radeon_device *rdev); /* + * Mutex which allows recursive locking from the same process. + */ +struct radeon_mutex { + struct mutex mutex; + struct task_struct *owner; + int level; +}; + +static inline void radeon_mutex_init(struct radeon_mutex *mutex) +{ + mutex_init(&mutex->mutex); + mutex->owner = NULL; + mutex->level = 0; +} + +static inline void radeon_mutex_lock(struct radeon_mutex *mutex) +{ + if (mutex_trylock(&mutex->mutex)) { + /* The mutex was unlocked before, so it's ours now */ + mutex->owner = current; + } else if (mutex->owner != current) { + /* Another process locked the mutex, take it */ + mutex_lock(&mutex->mutex); + mutex->owner = current; + } + /* Otherwise the mutex was already locked by this process */ + + mutex->level++; +} + +static inline void radeon_mutex_unlock(struct radeon_mutex *mutex) +{ + if (--mutex->level > 0) + return; + + mutex->owner = NULL; + mutex_unlock(&mutex->mutex); +} + + +/* * Dummy page */ struct radeon_dummy_page { @@ -598,7 +639,7 @@ struct radeon_ib { * mutex protects scheduled_ibs, ready, alloc_bm */ struct radeon_ib_pool { - struct mutex mutex; + struct radeon_mutex mutex; struct radeon_sa_manager sa_manager; struct radeon_ib ibs[RADEON_IB_POOL_SIZE]; bool ready; @@ -1355,47 +1396,6 @@ struct r600_vram_scratch { /* - * Mutex which allows recursive locking from the same process. - */ -struct radeon_mutex { - struct mutex mutex; - struct task_struct *owner; - int level; -}; - -static inline void radeon_mutex_init(struct radeon_mutex *mutex) -{ - mutex_init(&mutex->mutex); - mutex->owner = NULL; - mutex->level = 0; -} - -static inline void radeon_mutex_lock(struct radeon_mutex *mutex) -{ - if (mutex_trylock(&mutex->mutex)) { - /* The mutex was unlocked before, so it's ours now */ - mutex->owner = current; - } else if (mutex->owner != current) { - /* Another process locked the mutex, take it */ - mutex_lock(&mutex->mutex); - mutex->owner = current; - } - /* Otherwise the mutex was already locked by this process */ - - mutex->level++; -} - -static inline void radeon_mutex_unlock(struct radeon_mutex *mutex) -{ - if (--mutex->level > 0) - return; - - mutex->owner = NULL; - mutex_unlock(&mutex->mutex); -} - - -/* * Core structure, functions and helpers. */ typedef uint32_t (*radeon_rreg_t)(struct radeon_device*, uint32_t); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index a811bc64ad5c..cec51a5b69dd 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -720,7 +720,7 @@ int radeon_device_init(struct radeon_device *rdev, /* mutex initialization are all done here so we * can recall function without having locking issues */ radeon_mutex_init(&rdev->cs_mutex); - mutex_init(&rdev->ib_pool.mutex); + radeon_mutex_init(&rdev->ib_pool.mutex); for (i = 0; i < RADEON_NUM_RINGS; ++i) mutex_init(&rdev->ring[i].mutex); mutex_init(&rdev->dc_hw_i2c_mutex); diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c index 1cb4b941be47..30a4c5014c8b 100644 --- a/drivers/gpu/drm/radeon/radeon_ring.c +++ b/drivers/gpu/drm/radeon/radeon_ring.c @@ -109,12 +109,12 @@ int radeon_ib_get(struct radeon_device *rdev, int ring, return r; } - mutex_lock(&rdev->ib_pool.mutex); + radeon_mutex_lock(&rdev->ib_pool.mutex); idx = rdev->ib_pool.head_id; retry: if (cretry > 5) { dev_err(rdev->dev, "failed to get an ib after 5 retry\n"); - mutex_unlock(&rdev->ib_pool.mutex); + radeon_mutex_unlock(&rdev->ib_pool.mutex); radeon_fence_unref(&fence); return -ENOMEM; } @@ -139,7 +139,7 @@ retry: */ rdev->ib_pool.head_id = (1 + idx); rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1); - mutex_unlock(&rdev->ib_pool.mutex); + radeon_mutex_unlock(&rdev->ib_pool.mutex); return 0; } } @@ -158,7 +158,7 @@ retry: } idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1); } - mutex_unlock(&rdev->ib_pool.mutex); + radeon_mutex_unlock(&rdev->ib_pool.mutex); radeon_fence_unref(&fence); return r; } @@ -171,12 +171,12 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib) if (tmp == NULL) { return; } - mutex_lock(&rdev->ib_pool.mutex); + radeon_mutex_lock(&rdev->ib_pool.mutex); if (tmp->fence && !tmp->fence->emitted) { radeon_sa_bo_free(rdev, &tmp->sa_bo); radeon_fence_unref(&tmp->fence); } - mutex_unlock(&rdev->ib_pool.mutex); + radeon_mutex_unlock(&rdev->ib_pool.mutex); } int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib) @@ -214,9 +214,9 @@ int radeon_ib_pool_init(struct radeon_device *rdev) return r; } - mutex_lock(&rdev->ib_pool.mutex); + radeon_mutex_lock(&rdev->ib_pool.mutex); if (rdev->ib_pool.ready) { - mutex_unlock(&rdev->ib_pool.mutex); + radeon_mutex_unlock(&rdev->ib_pool.mutex); radeon_sa_bo_manager_fini(rdev, &tmp); return 0; } @@ -239,7 +239,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev) if (radeon_debugfs_ring_init(rdev)) { DRM_ERROR("Failed to register debugfs file for rings !\n"); } - mutex_unlock(&rdev->ib_pool.mutex); + radeon_mutex_unlock(&rdev->ib_pool.mutex); return 0; } @@ -247,7 +247,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) { unsigned i; - mutex_lock(&rdev->ib_pool.mutex); + radeon_mutex_lock(&rdev->ib_pool.mutex); if (rdev->ib_pool.ready) { for (i = 0; i < RADEON_IB_POOL_SIZE; i++) { radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo); @@ -256,7 +256,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev) radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager); rdev->ib_pool.ready = false; } - mutex_unlock(&rdev->ib_pool.mutex); + radeon_mutex_unlock(&rdev->ib_pool.mutex); } int radeon_ib_pool_start(struct radeon_device *rdev) |