summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorEric Anholt <eric@anholt.net>2010-05-25 20:32:22 -0700
committerEric Anholt <eric@anholt.net>2010-05-26 12:11:58 -0700
commit385193244ba1ed74276667a6d685205692059ba3 (patch)
tree2a3333fe1570a9a82c0c3f5ea2661121ade82d0f
parent607e228c263d5d171bd0615d5d93202dda371e5f (diff)
intel: Add likely/unlikely annotations to hopefully improve perf.likely
-rw-r--r--intel/intel_bufmgr_gem.c88
-rw-r--r--intel/intel_bufmgr_priv.h10
2 files changed, 54 insertions, 44 deletions
diff --git a/intel/intel_bufmgr_gem.c b/intel/intel_bufmgr_gem.c
index b76fd7ed..b9ff0a5c 100644
--- a/intel/intel_bufmgr_gem.c
+++ b/intel/intel_bufmgr_gem.c
@@ -62,7 +62,7 @@
#include "i915_drm.h"
#define DBG(...) do { \
- if (bufmgr_gem->bufmgr.debug) \
+ if (unlikely(bufmgr_gem->bufmgr.debug)) \
fprintf(stderr, __VA_ARGS__); \
} while (0)
@@ -357,7 +357,7 @@ drm_intel_add_validate_buffer(drm_intel_bo *bo)
return;
/* Extend the array of validation entries as necessary. */
- if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
+ if (unlikely(bufmgr_gem->exec_count == bufmgr_gem->exec_size)) {
int new_size = bufmgr_gem->exec_size * 2;
if (new_size == 0)
@@ -399,7 +399,7 @@ drm_intel_add_validate_buffer2(drm_intel_bo *bo, int need_fence)
}
/* Extend the array of validation entries as necessary. */
- if (bufmgr_gem->exec_count == bufmgr_gem->exec_size) {
+ if (unlikely(bufmgr_gem->exec_count == bufmgr_gem->exec_size)) {
int new_size = bufmgr_gem->exec_size * 2;
if (new_size == 0)
@@ -499,7 +499,7 @@ drm_intel_gem_bo_busy(drm_intel_bo *bo)
do {
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
- } while (ret == -1 && errno == EINTR);
+ } while (unlikely(ret == -1 && errno == EINTR));
return (ret == 0 && busy.busy);
}
@@ -716,11 +716,11 @@ drm_intel_gem_bo_alloc_tiled(drm_intel_bufmgr *bufmgr, const char *name,
size = drm_intel_gem_bo_tile_size(bufmgr_gem, size, tiling_mode);
bo = drm_intel_gem_bo_alloc_internal(bufmgr, name, size, flags);
- if (!bo)
+ if (unlikely(!bo))
return NULL;
ret = drm_intel_gem_bo_set_tiling(bo, tiling_mode, stride);
- if (ret != 0) {
+ if (unlikely(ret != 0)) {
drm_intel_gem_bo_unreference(bo);
return NULL;
}
@@ -748,7 +748,7 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
struct drm_i915_gem_get_tiling get_tiling;
bo_gem = calloc(1, sizeof(*bo_gem));
- if (!bo_gem)
+ if (unlikely(!bo_gem))
return NULL;
memset(&open_arg, 0, sizeof(open_arg));
@@ -757,8 +757,8 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
ret = ioctl(bufmgr_gem->fd,
DRM_IOCTL_GEM_OPEN,
&open_arg);
- } while (ret == -1 && errno == EINTR);
- if (ret != 0) {
+ } while (unlikely(ret == -1 && errno == EINTR));
+ if (unlikely(ret != 0)) {
fprintf(stderr, "Couldn't reference %s handle 0x%08x: %s\n",
name, handle, strerror(errno));
free(bo_gem);
@@ -778,7 +778,7 @@ drm_intel_bo_gem_create_from_name(drm_intel_bufmgr *bufmgr,
memset(&get_tiling, 0, sizeof(get_tiling));
get_tiling.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
- if (ret != 0) {
+ if (unlikely(ret != 0)) {
drm_intel_gem_bo_unreference(&bo_gem->bo);
return NULL;
}
@@ -808,7 +808,7 @@ drm_intel_gem_bo_free(drm_intel_bo *bo)
memset(&close, 0, sizeof(close));
close.handle = bo_gem->gem_handle;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_GEM_CLOSE, &close);
- if (ret != 0) {
+ if (unlikely(ret != 0)) {
fprintf(stderr,
"DRM_IOCTL_GEM_CLOSE %d failed (%s): %s\n",
bo_gem->gem_handle, bo_gem->name, strerror(errno));
@@ -898,7 +898,7 @@ static void drm_intel_gem_bo_unreference_locked_timed(drm_intel_bo *bo,
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
assert(atomic_read(&bo_gem->refcount) > 0);
- if (atomic_dec_and_test(&bo_gem->refcount))
+ if (unlikely(atomic_dec_and_test(&bo_gem->refcount)))
drm_intel_gem_bo_unreference_final(bo, time);
}
@@ -907,7 +907,7 @@ static void drm_intel_gem_bo_unreference(drm_intel_bo *bo)
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
assert(atomic_read(&bo_gem->refcount) > 0);
- if (atomic_dec_and_test(&bo_gem->refcount)) {
+ if (unlikely(atomic_dec_and_test(&bo_gem->refcount))) {
drm_intel_bufmgr_gem *bufmgr_gem =
(drm_intel_bufmgr_gem *) bo->bufmgr;
struct timespec time;
@@ -932,7 +932,7 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
/* Allow recursive mapping. Mesa may recursively map buffers with
* nested display loops.
*/
- if (!bo_gem->mem_virtual) {
+ if (unlikely(!bo_gem->mem_virtual)) {
struct drm_i915_gem_mmap mmap_arg;
DBG("bo_map: %d (%s)\n", bo_gem->gem_handle, bo_gem->name);
@@ -971,8 +971,8 @@ static int drm_intel_gem_bo_map(drm_intel_bo *bo, int write_enable)
ret = ioctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_SET_DOMAIN,
&set_domain);
- } while (ret == -1 && errno == EINTR);
- if (ret != 0) {
+ } while (unlikely(ret == -1 && errno == EINTR));
+ if (unlikely(ret != 0)) {
ret = -errno;
fprintf(stderr, "%s:%d: Error setting to CPU domain %d: %s\n",
__FILE__, __LINE__, bo_gem->gem_handle,
@@ -996,7 +996,7 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
pthread_mutex_lock(&bufmgr_gem->lock);
/* Get a mapping of the buffer if we haven't before. */
- if (bo_gem->gtt_virtual == NULL) {
+ if (unlikely(bo_gem->gtt_virtual == NULL)) {
struct drm_i915_gem_mmap_gtt mmap_arg;
DBG("bo_map_gtt: mmap %d (%s)\n", bo_gem->gem_handle,
@@ -1052,9 +1052,9 @@ int drm_intel_gem_bo_map_gtt(drm_intel_bo *bo)
ret = ioctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_SET_DOMAIN,
&set_domain);
- } while (ret == -1 && errno == EINTR);
+ } while (unlikely(ret == -1 && errno == EINTR));
- if (ret != 0) {
+ if (unlikely(ret != 0)) {
ret = -errno;
fprintf(stderr, "%s:%d: Error setting domain %d: %s\n",
__FILE__, __LINE__, bo_gem->gem_handle,
@@ -1072,7 +1072,7 @@ int drm_intel_gem_bo_unmap_gtt(drm_intel_bo *bo)
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
int ret = 0;
- if (bo == NULL)
+ if (unlikely(bo == NULL))
return 0;
assert(bo_gem->gtt_virtual != NULL);
@@ -1091,7 +1091,7 @@ static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
struct drm_i915_gem_sw_finish sw_finish;
int ret;
- if (bo == NULL)
+ if (unlikely(bo == NULL))
return 0;
assert(bo_gem->mem_virtual != NULL);
@@ -1106,7 +1106,7 @@ static int drm_intel_gem_bo_unmap(drm_intel_bo *bo)
ret = ioctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_SW_FINISH,
&sw_finish);
- } while (ret == -1 && errno == EINTR);
+ } while (unlikely(ret == -1 && errno == EINTR));
ret = ret == -1 ? -errno : 0;
bo->virtual = NULL;
@@ -1133,8 +1133,8 @@ drm_intel_gem_bo_subdata(drm_intel_bo *bo, unsigned long offset,
ret = ioctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_PWRITE,
&pwrite);
- } while (ret == -1 && errno == EINTR);
- if (ret != 0) {
+ } while (unlikely(ret == -1 && errno == EINTR));
+ if (unlikely(ret != 0)) {
ret = -errno;
fprintf(stderr,
"%s:%d: Error writing data to buffer %d: (%d %d) %s .\n",
@@ -1155,7 +1155,7 @@ drm_intel_gem_get_pipe_from_crtc_id(drm_intel_bufmgr *bufmgr, int crtc_id)
get_pipe_from_crtc_id.crtc_id = crtc_id;
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GET_PIPE_FROM_CRTC_ID,
&get_pipe_from_crtc_id);
- if (ret != 0) {
+ if (unlikely(ret != 0)) {
/* We return -1 here to signal that we don't
* know which pipe is associated with this crtc.
* This lets the caller know that this information
@@ -1186,8 +1186,8 @@ drm_intel_gem_bo_get_subdata(drm_intel_bo *bo, unsigned long offset,
ret = ioctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_PREAD,
&pread);
- } while (ret == -1 && errno == EINTR);
- if (ret != 0) {
+ } while (unlikely(ret == -1 && errno == EINTR));
+ if (unlikely(ret != 0)) {
ret = -errno;
fprintf(stderr,
"%s:%d: Error reading data from buffer %d: (%d %d) %s .\n",
@@ -1227,8 +1227,8 @@ drm_intel_gem_bo_start_gtt_access(drm_intel_bo *bo, int write_enable)
ret = ioctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_SET_DOMAIN,
&set_domain);
- } while (ret == -1 && errno == EINTR);
- if (ret != 0) {
+ } while (unlikely(ret == -1 && errno == EINTR));
+ if (unlikely(ret != 0)) {
fprintf(stderr,
"%s:%d: Error setting memory domains %d (%08x %08x): %s .\n",
__FILE__, __LINE__, bo_gem->gem_handle,
@@ -1286,10 +1286,10 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
drm_intel_bo_gem *target_bo_gem = (drm_intel_bo_gem *) target_bo;
- if (bo_gem->has_error)
+ if (unlikely(bo_gem->has_error))
return -ENOMEM;
- if (target_bo_gem->has_error) {
+ if (unlikely(target_bo_gem->has_error)) {
bo_gem->has_error = 1;
return -ENOMEM;
}
@@ -1302,7 +1302,7 @@ do_bo_emit_reloc(drm_intel_bo *bo, uint32_t offset,
need_fence = 0;
/* Create a new relocation list if needed */
- if (bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo))
+ if (unlikely(bo_gem->relocs == NULL && drm_intel_setup_reloc_list(bo)))
return -ENOMEM;
/* Check overflow */
@@ -1430,7 +1430,7 @@ drm_intel_update_buffer_offsets(drm_intel_bufmgr_gem *bufmgr_gem)
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo;
/* Update the buffer offset */
- if (bufmgr_gem->exec_objects[i].offset != bo->offset) {
+ if (unlikely(bufmgr_gem->exec_objects[i].offset != bo->offset)) {
DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
bo_gem->gem_handle, bo_gem->name, bo->offset,
(unsigned long long)bufmgr_gem->exec_objects[i].
@@ -1450,7 +1450,7 @@ drm_intel_update_buffer_offsets2 (drm_intel_bufmgr_gem *bufmgr_gem)
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *)bo;
/* Update the buffer offset */
- if (bufmgr_gem->exec2_objects[i].offset != bo->offset) {
+ if (unlikely(bufmgr_gem->exec2_objects[i].offset != bo->offset)) {
DBG("BO %d (%s) migrated: 0x%08lx -> 0x%08llx\n",
bo_gem->gem_handle, bo_gem->name, bo->offset,
(unsigned long long)bufmgr_gem->exec2_objects[i].offset);
@@ -1468,7 +1468,7 @@ drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
struct drm_i915_gem_execbuffer execbuf;
int ret, i;
- if (bo_gem->has_error)
+ if (unlikely(bo_gem->has_error))
return -ENOMEM;
pthread_mutex_lock(&bufmgr_gem->lock);
@@ -1493,9 +1493,9 @@ drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
ret = ioctl(bufmgr_gem->fd,
DRM_IOCTL_I915_GEM_EXECBUFFER,
&execbuf);
- } while (ret != 0 && errno == EINTR);
+ } while (unlikely(ret != 0 && errno == EINTR));
- if (ret != 0) {
+ if (unlikely(ret != 0)) {
ret = -errno;
if (errno == ENOSPC) {
fprintf(stderr,
@@ -1512,7 +1512,7 @@ drm_intel_gem_bo_exec(drm_intel_bo *bo, int used,
}
drm_intel_update_buffer_offsets(bufmgr_gem);
- if (bufmgr_gem->bufmgr.debug)
+ if (unlikely(bufmgr_gem->bufmgr.debug))
drm_intel_gem_dump_validation_list(bufmgr_gem);
for (i = 0; i < bufmgr_gem->exec_count; i++) {
@@ -1562,9 +1562,9 @@ drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
do {
ret = ioctl(bufmgr_gem->fd, DRM_IOCTL_I915_GEM_EXECBUFFER2,
&execbuf);
- } while (ret != 0 && errno == EINTR);
+ } while (unlikely(ret != 0 && errno == EINTR));
- if (ret != 0) {
+ if (unlikely(ret != 0)) {
ret = -errno;
if (ret == -ENOMEM) {
fprintf(stderr,
@@ -1579,7 +1579,7 @@ drm_intel_gem_bo_exec2(drm_intel_bo *bo, int used,
}
drm_intel_update_buffer_offsets2(bufmgr_gem);
- if (bufmgr_gem->bufmgr.debug)
+ if (unlikely(bufmgr_gem->bufmgr.debug))
drm_intel_gem_dump_validation_list(bufmgr_gem);
for (i = 0; i < bufmgr_gem->exec_count; i++) {
@@ -1820,7 +1820,7 @@ drm_intel_gem_estimate_batch_space(drm_intel_bo **bo_array, int count)
for (i = 0; i < count; i++) {
drm_intel_bo_gem *bo_gem = (drm_intel_bo_gem *) bo_array[i];
- if (bo_gem != NULL)
+ if (likely(bo_gem != NULL))
total += bo_gem->reloc_tree_size;
}
return total;
@@ -1894,10 +1894,10 @@ drm_intel_gem_check_aperture_space(drm_intel_bo **bo_array, int count)
total = drm_intel_gem_estimate_batch_space(bo_array, count);
- if (total > threshold)
+ if (unlikely(total > threshold))
total = drm_intel_gem_compute_batch_space(bo_array, count);
- if (total > threshold) {
+ if (unlikely(total > threshold)) {
DBG("check_space: overflowed available aperture, "
"%dkb vs %dkb\n",
total / 1024, (int)bufmgr_gem->gtt_size / 1024);
diff --git a/intel/intel_bufmgr_priv.h b/intel/intel_bufmgr_priv.h
index f987d97a..f8137139 100644
--- a/intel/intel_bufmgr_priv.h
+++ b/intel/intel_bufmgr_priv.h
@@ -277,4 +277,14 @@ struct _drm_intel_bufmgr {
#define ROUND_UP_TO(x, y) (((x) + (y) - 1) / (y) * (y))
#define ROUND_UP_TO_MB(x) ROUND_UP_TO((x), 1024*1024)
+#ifndef likely
+#ifdef __GNUC__
+#define likely(expr) (__builtin_expect(expr, 1))
+#define unlikely(expr) (__builtin_expect(expr, 0))
+#else
+#define likely(expr) (expr)
+#define unlikely(expr) (expr)
+#endif
+#endif
+
#endif /* INTEL_BUFMGR_PRIV_H */