summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2009-06-11 15:18:55 +0100
committerChris Wilson <chris@chris-wilson.co.uk>2009-07-23 16:18:42 +0100
commitc3f2db4f73e93a3dafe0f52a5f9ca09ca78cc906 (patch)
tree5cb530cb949e950370c2ff4866c560b1a1075b6f /src
parentbed2701e1c89095878d549cbca8f22d84f3dda3c (diff)
[drm] Add an accelerated image surface.
Use the DRM interface to h/w accelerate composition on image surfaces. The purpose of the backend is simply to explore what such a hardware interface might look like and what benefits we might expect. The use case that might justify writing such custom backends are embedded devices running a drm compositor like wayland - which would, for example, allow one to write applications that seamlessly integrated accelerated, dynamic, high quality 2D graphics using Cairo with advanced interaction (e.g. smooth animations in the UI) driven by a clutter framework... In this first step we introduce the fundamental wrapping of GEM for intel and radeon chipsets, and, for comparison, gallium. No acceleration, all we do is use buffer objects (that is use the kernel memory manager) to allocate images and simply use the fallback mechanism. This provides a suitable base to start writing chip specific drivers.
Diffstat (limited to 'src')
-rw-r--r--src/Makefile.sources13
-rw-r--r--src/Makefile.win32.features28
-rw-r--r--src/cairo-debug.c4
-rw-r--r--src/cairo-drm.h135
-rw-r--r--src/cairo-freelist-private.h59
-rw-r--r--src/cairo-freelist.c73
-rw-r--r--src/cairo-image-surface.c1
-rw-r--r--src/cairo-mutex-list-private.h3
-rw-r--r--src/cairo.h2
-rw-r--r--src/cairoint.h8
-rw-r--r--src/drm/cairo-drm-bo.c120
-rw-r--r--src/drm/cairo-drm-gallium-surface.c696
-rw-r--r--src/drm/cairo-drm-intel-private.h182
-rw-r--r--src/drm/cairo-drm-intel-surface.c475
-rw-r--r--src/drm/cairo-drm-intel.c933
-rw-r--r--src/drm/cairo-drm-ioctl-private.h12
-rw-r--r--src/drm/cairo-drm-private.h257
-rw-r--r--src/drm/cairo-drm-radeon-private.h110
-rw-r--r--src/drm/cairo-drm-radeon-surface.c437
-rw-r--r--src/drm/cairo-drm-radeon.c447
-rw-r--r--src/drm/cairo-drm-surface.c517
-rw-r--r--src/drm/cairo-drm.c362
22 files changed, 4869 insertions, 5 deletions
diff --git a/src/Makefile.sources b/src/Makefile.sources
index d6fa582a..1a1d35f9 100644
--- a/src/Makefile.sources
+++ b/src/Makefile.sources
@@ -273,6 +273,19 @@ cairo_glitz_sources = cairo-glitz-surface.c
cairo_directfb_headers = cairo-directfb.h
cairo_directfb_sources = cairo-directfb-surface.c
+cairo_drm_headers = cairo-drm.h
+cairo_drm_private = drm/cairo-drm-private.h \
+ drm/cairo-drm-intel-private.h \
+ drm/cairo-drm-radeon-private.h
+cairo_drm_sources = drm/cairo-drm.c \
+ drm/cairo-drm-bo.c \
+ drm/cairo-drm-surface.c \
+ drm/cairo-drm-intel.c \
+ drm/cairo-drm-intel-surface.c \
+ drm/cairo-drm-radeon.c \
+ drm/cairo-drm-radeon-surface.c
+cairo_gallium_sources = drm/cairo-drm-gallium-surface.c
+
cairo_script_headers = cairo-script.h
cairo_script_sources = cairo-script-surface.c
diff --git a/src/Makefile.win32.features b/src/Makefile.win32.features
index 61b630c4..048ead05 100644
--- a/src/Makefile.win32.features
+++ b/src/Makefile.win32.features
@@ -175,6 +175,34 @@ ifeq ($(CAIRO_HAS_BEOS_SURFACE),1)
enabled_cairo_pkgconf += cairo-beos.pc
endif
+unsupported_cairo_headers += $(cairo_drm_headers)
+all_cairo_headers += $(cairo_drm_headers)
+all_cairo_private += $(cairo_drm_private)
+all_cairo_sources += $(cairo_drm_sources)
+ifeq ($(CAIRO_HAS_DRM_SURFACE),1)
+enabled_cairo_headers += $(cairo_drm_headers)
+enabled_cairo_private += $(cairo_drm_private)
+enabled_cairo_sources += $(cairo_drm_sources)
+endif
+all_cairo_pkgconf += cairo-drm.pc
+ifeq ($(CAIRO_HAS_DRM_SURFACE),1)
+enabled_cairo_pkgconf += cairo-drm.pc
+endif
+
+unsupported_cairo_headers += $(cairo_gallium_headers)
+all_cairo_headers += $(cairo_gallium_headers)
+all_cairo_private += $(cairo_gallium_private)
+all_cairo_sources += $(cairo_gallium_sources)
+ifeq ($(CAIRO_HAS_GALLIUM_SURFACE),1)
+enabled_cairo_headers += $(cairo_gallium_headers)
+enabled_cairo_private += $(cairo_gallium_private)
+enabled_cairo_sources += $(cairo_gallium_sources)
+endif
+all_cairo_pkgconf += cairo-gallium.pc
+ifeq ($(CAIRO_HAS_GALLIUM_SURFACE),1)
+enabled_cairo_pkgconf += cairo-gallium.pc
+endif
+
supported_cairo_headers += $(cairo_png_headers)
all_cairo_headers += $(cairo_png_headers)
all_cairo_private += $(cairo_png_private)
diff --git a/src/cairo-debug.c b/src/cairo-debug.c
index 4dccebeb..4409e6eb 100644
--- a/src/cairo-debug.c
+++ b/src/cairo-debug.c
@@ -77,6 +77,10 @@ cairo_debug_reset_static_data (void)
_cairo_clip_reset_static_data ();
+#if CAIRO_HAS_DRM_SURFACE
+ _cairo_drm_device_reset_static_data ();
+#endif
+
CAIRO_MUTEX_FINALIZE ();
}
diff --git a/src/cairo-drm.h b/src/cairo-drm.h
new file mode 100644
index 00000000..1b50b1bd
--- /dev/null
+++ b/src/cairo-drm.h
@@ -0,0 +1,135 @@
+/* Cairo - a vector graphics library with display and print output
+ *
+ * Copyright © 2009 Chris Wilson
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ * The Original Code is the cairo graphics library.
+ *
+ * The Initial Developer of the Original Code is Chris Wilson.
+ */
+
+#ifndef CAIRO_DRM_H
+#define CAIRO_DRM_H
+
+#include "cairo.h"
+
+#if CAIRO_HAS_DRM_SURFACE
+
+CAIRO_BEGIN_DECLS
+
+typedef struct _cairo_drm_device cairo_drm_device_t;
+
+struct udev_device;
+
+cairo_public cairo_drm_device_t *
+cairo_drm_device_get (struct udev_device *device);
+
+cairo_public cairo_drm_device_t *
+cairo_drm_device_get_for_fd (int fd);
+
+cairo_public cairo_drm_device_t *
+cairo_drm_device_default (void);
+
+cairo_public cairo_drm_device_t *
+cairo_drm_device_reference (cairo_drm_device_t *device);
+
+cairo_public cairo_status_t
+cairo_drm_device_status (cairo_drm_device_t *device);
+
+cairo_public int
+cairo_drm_device_get_fd (cairo_drm_device_t *device);
+
+cairo_public void
+cairo_drm_device_throttle (cairo_drm_device_t *device);
+
+cairo_public void
+cairo_drm_device_destroy (cairo_drm_device_t *device);
+
+
+cairo_public cairo_surface_t *
+cairo_drm_surface_create (cairo_drm_device_t *device,
+ cairo_content_t content,
+ int width, int height);
+
+cairo_public cairo_surface_t *
+cairo_drm_surface_create_for_name (cairo_drm_device_t *device,
+ unsigned int name,
+ cairo_format_t format,
+ int width, int height, int stride);
+
+cairo_public cairo_surface_t *
+cairo_drm_surface_create_from_cacheable_image (cairo_drm_device_t *device,
+ cairo_surface_t *surface);
+
+cairo_public cairo_status_t
+cairo_drm_surface_enable_scan_out (cairo_surface_t *surface);
+
+cairo_public cairo_drm_device_t *
+cairo_drm_surface_get_device (cairo_surface_t *abstract_surface);
+
+cairo_public unsigned int
+cairo_drm_surface_get_handle (cairo_surface_t *surface);
+
+cairo_public unsigned int
+cairo_drm_surface_get_name (cairo_surface_t *surface);
+
+cairo_public cairo_format_t
+cairo_drm_surface_get_format (cairo_surface_t *surface);
+
+cairo_public int
+cairo_drm_surface_get_width (cairo_surface_t *surface);
+
+cairo_public int
+cairo_drm_surface_get_height (cairo_surface_t *surface);
+
+cairo_public int
+cairo_drm_surface_get_stride (cairo_surface_t *surface);
+
+/* XXX map/unmap, general surface layer? */
+
+/* Rough outline, culled from a conversation on IRC:
+ * map() returns an image-surface representation of the drm-surface,
+ * which you unmap() when you are finished, i.e. map() pulls the buffer back
+ * from the GPU, maps it into the CPU domain and gives you direct access to
+ * the pixels. With the unmap(), the buffer is ready to be used again by the
+ * GPU and *until* the unmap(), all operations will be done in software.
+ *
+ * (Technically calling cairo_surface_flush() on the underlying drm-surface
+ * will also disassociate the mapping.)
+*/
+cairo_public cairo_surface_t *
+cairo_drm_surface_map (cairo_surface_t *surface);
+
+cairo_public void
+cairo_drm_surface_unmap (cairo_surface_t *drm_surface,
+ cairo_surface_t *image_surface);
+
+CAIRO_END_DECLS
+
+#else /* CAIRO_HAS_DRM_SURFACE */
+# error Cairo was not compiled with support for the DRM backend
+#endif /* CAIRO_HAS_DRM_SURFACE */
+
+#endif /* CAIRO_DRM_H */
diff --git a/src/cairo-freelist-private.h b/src/cairo-freelist-private.h
index 48791c23..8f9f1534 100644
--- a/src/cairo-freelist-private.h
+++ b/src/cairo-freelist-private.h
@@ -25,18 +25,31 @@
#include "cairo-types-private.h"
#include "cairo-compiler-private.h"
-/* Opaque implementation types. */
-typedef struct _cairo_freelist cairo_freelist_t;
-typedef struct _cairo_freelist_node cairo_freelist_node_t;
+/* for stand-alone compilation*/
+#ifndef VG
+#define VG(x)
+#endif
+
+#ifndef NULL
+#define NULL (void *) 0
+#endif
+typedef struct _cairo_freelist_node cairo_freelist_node_t;
struct _cairo_freelist_node {
cairo_freelist_node_t *next;
};
-struct _cairo_freelist {
+typedef struct _cairo_freelist {
cairo_freelist_node_t *first_free_node;
unsigned nodesize;
-};
+} cairo_freelist_t;
+
+typedef struct _cairo_freepool {
+ cairo_freelist_node_t *first_free_node;
+ cairo_freelist_node_t *pools;
+ unsigned nodesize;
+ char embedded_pool[1000];
+} cairo_freepool_t;
/* Initialise a freelist that will be responsible for allocating
@@ -68,4 +81,40 @@ _cairo_freelist_calloc (cairo_freelist_t *freelist);
cairo_private void
_cairo_freelist_free (cairo_freelist_t *freelist, void *node);
+
+cairo_private void
+_cairo_freepool_init (cairo_freepool_t *freepool, unsigned nodesize);
+
+cairo_private void
+_cairo_freepool_fini (cairo_freepool_t *freepool);
+
+cairo_private void *
+_cairo_freepool_alloc_from_new_pool (cairo_freepool_t *freepool);
+
+static inline void *
+_cairo_freepool_alloc (cairo_freepool_t *freepool)
+{
+ cairo_freelist_node_t *node;
+
+ node = freepool->first_free_node;
+ if (unlikely (node == NULL))
+ return _cairo_freepool_alloc_from_new_pool (freepool);
+
+ VG (VALGRIND_MAKE_MEM_DEFINED (node, sizeof (node->next)));
+ freepool->first_free_node = node->next;
+ VG (VALGRIND_MAKE_MEM_UNDEFINED (node, freepool->nodesize));
+
+ return node;
+}
+
+static inline void
+_cairo_freepool_free (cairo_freepool_t *freepool, void *ptr)
+{
+ cairo_freelist_node_t *node = ptr;
+
+ node->next = freepool->first_free_node;
+ freepool->first_free_node = node;
+ VG (VALGRIND_MAKE_MEM_NOACCESS (node, freepool->nodesize));
+}
+
#endif /* CAIRO_FREELIST_H */
diff --git a/src/cairo-freelist.c b/src/cairo-freelist.c
index f6eb4fed..83647f20 100644
--- a/src/cairo-freelist.c
+++ b/src/cairo-freelist.c
@@ -82,3 +82,76 @@ _cairo_freelist_free (cairo_freelist_t *freelist, void *voidnode)
VG (VALGRIND_MAKE_MEM_NOACCESS (node, freelist->nodesize));
}
}
+
+
+void
+_cairo_freepool_init (cairo_freepool_t *freepool, unsigned nodesize)
+{
+ int poolsize;
+ char *ptr;
+
+ freepool->first_free_node = NULL;
+ freepool->pools = NULL;
+ freepool->nodesize = nodesize;
+
+ poolsize = sizeof (freepool->embedded_pool);
+ ptr = freepool->embedded_pool + poolsize - freepool->nodesize;
+
+ poolsize /= freepool->nodesize;
+ while (poolsize--) {
+ cairo_freelist_node_t *node = (cairo_freelist_node_t *) ptr;
+ ptr -= freepool->nodesize;
+
+ node->next = freepool->first_free_node;
+ freepool->first_free_node = node;
+ VG (VALGRIND_MAKE_MEM_NOACCESS (node, freepool->nodesize));
+ }
+}
+
+void
+_cairo_freepool_fini (cairo_freepool_t *freepool)
+{
+ cairo_freelist_node_t *node = freepool->pools;
+ while (node != NULL) {
+ cairo_freelist_node_t *next;
+
+ VG (VALGRIND_MAKE_MEM_DEFINED (node, sizeof (node->next)));
+ next = node->next;
+
+ free (node);
+ node = next;
+ }
+ VG (VALGRIND_MAKE_MEM_NOACCESS (freepool, sizeof (freepool)));
+}
+
+void *
+_cairo_freepool_alloc_from_new_pool (cairo_freepool_t *freepool)
+{
+ cairo_freelist_node_t *node;
+ char *ptr;
+ int poolsize;
+
+ poolsize = (128 * freepool->nodesize + 8191) & -8192;
+ node = malloc (poolsize);
+ if (node == NULL)
+ return node;
+
+ node->next = freepool->pools;
+ freepool->pools = node;
+
+ ptr = (char *) node + poolsize - freepool->nodesize;
+
+ poolsize -= sizeof (cairo_freelist_node_t);
+ poolsize /= freepool->nodesize;
+
+ while (--poolsize) {
+ node = (cairo_freelist_node_t *) ptr;
+ ptr -= freepool->nodesize;
+
+ node->next = freepool->first_free_node;
+ freepool->first_free_node = node;
+ VG (VALGRIND_MAKE_MEM_NOACCESS (node, freepool->nodesize));
+ }
+
+ return ptr;
+}
diff --git a/src/cairo-image-surface.c b/src/cairo-image-surface.c
index c7ceb262..e7efdca1 100644
--- a/src/cairo-image-surface.c
+++ b/src/cairo-image-surface.c
@@ -598,6 +598,7 @@ cairo_image_surface_get_format (cairo_surface_t *surface)
return image_surface->format;
}
+slim_hidden_def (cairo_image_surface_get_format);
/**
* cairo_image_surface_get_width:
diff --git a/src/cairo-mutex-list-private.h b/src/cairo-mutex-list-private.h
index cb984cce..2f483163 100644
--- a/src/cairo-mutex-list-private.h
+++ b/src/cairo-mutex-list-private.h
@@ -60,5 +60,8 @@ CAIRO_MUTEX_DECLARE (_cairo_gl_context_mutex)
CAIRO_MUTEX_DECLARE (_cairo_atomic_mutex)
#endif
+#if CAIRO_HAS_DRM_SURFACE
+CAIRO_MUTEX_DECLARE (_cairo_drm_device_mutex)
+#endif
/* Undefine, to err on unintended inclusion */
#undef CAIRO_MUTEX_DECLARE
diff --git a/src/cairo.h b/src/cairo.h
index 4e930604..e0c53436 100644
--- a/src/cairo.h
+++ b/src/cairo.h
@@ -1947,6 +1947,7 @@ cairo_surface_status (cairo_surface_t *surface);
* @CAIRO_SURFACE_TYPE_META: The surface is a meta-type, since 1.10
* @CAIRO_SURFACE_TYPE_VG: The surface is a OpenVG surface, since 1.10
* @CAIRO_SURFACE_TYPE_GL: The surface is of type OpenGL, since 1.10
+ * @CAIRO_SURFACE_TYPE_DRM: The surface is of type Direct Render Manager, since 1.10
*
* #cairo_surface_type_t is used to describe the type of a given
* surface. The surface types are also known as "backends" or "surface
@@ -1991,6 +1992,7 @@ typedef enum _cairo_surface_type {
CAIRO_SURFACE_TYPE_META,
CAIRO_SURFACE_TYPE_VG,
CAIRO_SURFACE_TYPE_GL,
+ CAIRO_SURFACE_TYPE_DRM
} cairo_surface_type_t;
cairo_public cairo_surface_type_t
diff --git a/src/cairoint.h b/src/cairoint.h
index 5782f071..3cd0cb37 100644
--- a/src/cairoint.h
+++ b/src/cairoint.h
@@ -2516,6 +2516,13 @@ _cairo_pattern_equal (const cairo_pattern_t *a,
cairo_private void
_cairo_pattern_reset_static_data (void);
+#if CAIRO_HAS_DRM_SURFACE
+
+cairo_private void
+_cairo_drm_device_reset_static_data (void);
+
+#endif
+
cairo_private void
_cairo_clip_reset_static_data (void);
@@ -2588,6 +2595,7 @@ slim_hidden_proto (cairo_glyph_free);
slim_hidden_proto (cairo_image_surface_create);
slim_hidden_proto (cairo_image_surface_create_for_data);
slim_hidden_proto (cairo_image_surface_get_data);
+slim_hidden_proto (cairo_image_surface_get_format);
slim_hidden_proto (cairo_image_surface_get_height);
slim_hidden_proto (cairo_image_surface_get_stride);
slim_hidden_proto (cairo_image_surface_get_width);
diff --git a/src/drm/cairo-drm-bo.c b/src/drm/cairo-drm-bo.c
new file mode 100644
index 00000000..3346fc97
--- /dev/null
+++ b/src/drm/cairo-drm-bo.c
@@ -0,0 +1,120 @@
+/* Cairo - a vector graphics library with display and print output
+ *
+ * Copyright © 2009 Chris Wilson
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#include "cairoint.h"
+
+#include "cairo-drm-private.h"
+#include "cairo-drm-ioctl-private.h"
+
+#include <sys/ioctl.h>
+#include <errno.h>
+
+struct drm_gem_close {
+ /** Handle of the object to be closed. */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_gem_flink {
+ /** Handle for the object being named */
+ uint32_t handle;
+
+ /** Returned global name */
+ uint32_t name;
+};
+
+struct drm_gem_open {
+ /** Name of object being opened */
+ uint32_t name;
+
+ /** Returned handle for the object */
+ uint32_t handle;
+
+ /** Returned size of the object */
+ uint64_t size;
+};
+
+#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close)
+#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink)
+#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open)
+
+cairo_status_t
+_cairo_drm_bo_open_for_name (const cairo_drm_device_t *dev,
+ cairo_drm_bo_t *bo,
+ uint32_t name)
+{
+ struct drm_gem_open open;
+ int ret;
+
+ open.name = name;
+ open.handle = 0;
+ open.size = 0;
+ do {
+ ret = ioctl (dev->fd, DRM_IOCTL_GEM_OPEN, &open);
+ } while (ret == -1 && errno == EINTR);
+ if (ret == -1)
+ return _cairo_error (CAIRO_STATUS_NO_MEMORY);
+
+ bo->name = name;
+ bo->size = open.size;
+ bo->handle = open.handle;
+
+ return CAIRO_STATUS_SUCCESS;
+}
+
+cairo_status_t
+_cairo_drm_bo_flink (const cairo_drm_device_t *dev,
+ cairo_drm_bo_t *bo)
+{
+ struct drm_gem_flink flink;
+ int ret;
+
+ memset (&flink, 0, sizeof (flink));
+ flink.handle = bo->handle;
+ ret = ioctl (dev->fd, DRM_IOCTL_GEM_FLINK, &flink);
+ if (ret == -1)
+ return _cairo_error (CAIRO_STATUS_NO_MEMORY);
+
+ bo->name = flink.name;
+
+ return CAIRO_STATUS_SUCCESS;
+}
+
+void
+_cairo_drm_bo_close (const cairo_drm_device_t *dev,
+ cairo_drm_bo_t *bo)
+{
+ struct drm_gem_close close;
+ int ret;
+
+ close.handle = bo->handle;
+ do {
+ ret = ioctl (dev->fd, DRM_IOCTL_GEM_CLOSE, &close);
+ } while (ret == -1 && errno == EINTR);
+}
diff --git a/src/drm/cairo-drm-gallium-surface.c b/src/drm/cairo-drm-gallium-surface.c
new file mode 100644
index 00000000..b61e902d
--- /dev/null
+++ b/src/drm/cairo-drm-gallium-surface.c
@@ -0,0 +1,696 @@
+/* Cairo - a vector graphics library with display and print output
+ *
+ * Copyright © 2009 Chris Wilson
+ * Copyright © 2009 Eric Anholt
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ * The Original Code is the cairo graphics library.
+ *
+ * The Initial Developer of the Original Code is Chris Wilson.
+ */
+
+#include "cairoint.h"
+
+#include "cairo-drm-private.h"
+
+#include <dlfcn.h>
+
+#include <state_tracker/drm_api.h>
+#include <pipe/p_inlines.h>
+#include <pipe/p_screen.h>
+#include <pipe/p_context.h>
+
+typedef struct _gallium_surface gallium_surface_t;
+typedef struct _gallium_device gallium_device_t;
+
+struct _gallium_device {
+ cairo_drm_device_t base;
+ cairo_mutex_t mutex;
+
+ void *dlhandle;
+ struct drm_api *api;
+
+ struct pipe_screen *screen;
+ struct pipe_context *pipe;
+
+ int max_size;
+};
+
+struct _gallium_surface {
+ cairo_drm_surface_t base;
+
+ struct pipe_buffer *buffer;
+ enum pipe_format pipe_format;
+
+ struct pipe_texture *texture;
+
+ cairo_surface_t *fallback;
+};
+
+static cairo_surface_t *
+gallium_surface_create_internal (gallium_device_t *device,
+ cairo_content_t content,
+ enum pipe_format format,
+ int width, int height);
+
+static gallium_device_t *
+gallium_device_acquire (cairo_drm_device_t *base_dev)
+{
+ gallium_device_t *device = (gallium_device_t *) base_dev;
+ CAIRO_MUTEX_LOCK (device->mutex);
+ return device;
+}
+
+static void
+gallium_device_release (gallium_device_t *device)
+{
+ CAIRO_MUTEX_UNLOCK (device->mutex);
+}
+
+static cairo_format_t
+_cairo_format_from_pipe_format (enum pipe_format format)
+{
+ switch ((int) format) {
+ case PIPE_FORMAT_A8_UNORM:
+ return CAIRO_FORMAT_A8;
+ case PIPE_FORMAT_A8R8G8B8_UNORM:
+ return CAIRO_FORMAT_ARGB32;
+ default:
+ return CAIRO_FORMAT_INVALID;
+ }
+}
+
+static enum pipe_format
+pipe_format_from_content (cairo_content_t content)
+{
+ if (content == CAIRO_CONTENT_ALPHA)
+ return PIPE_FORMAT_A8_UNORM;
+ else
+ return PIPE_FORMAT_A8R8G8B8_UNORM;
+}
+
+static cairo_bool_t
+format_is_supported_destination (gallium_device_t *device,
+ enum pipe_format format)
+{
+ return device->screen->is_format_supported (device->screen,
+ format,
+ 0,
+ PIPE_TEXTURE_USAGE_RENDER_TARGET,
+ 0);
+}
+
+static cairo_bool_t
+format_is_supported_source (gallium_device_t *device,
+ enum pipe_format format)
+{
+ return device->screen->is_format_supported (device->screen,
+ format,
+ 0,
+ PIPE_TEXTURE_USAGE_SAMPLER,
+ 0);
+}
+
+static cairo_surface_t *
+gallium_surface_create_similar (void *abstract_src,
+ cairo_content_t content,
+ int width,
+ int height)
+{
+ gallium_surface_t *other = abstract_src;
+ gallium_device_t *device;
+ enum pipe_format pipe_format;
+ cairo_surface_t *surface = NULL;
+
+ device = gallium_device_acquire (other->base.device);
+
+ if (MAX (width, height) > device->max_size)
+ goto RELEASE;
+
+ pipe_format = pipe_format_from_content (content);
+
+ if (! format_is_supported_destination (device, pipe_format))
+ goto RELEASE;
+
+ surface = gallium_surface_create_internal (device,
+ content, pipe_format,
+ width, height);
+
+RELEASE:
+ gallium_device_release (device);
+
+ return surface;
+}
+
+static cairo_status_t
+gallium_surface_finish (void *abstract_surface)
+{
+ gallium_surface_t *surface = abstract_surface;
+ gallium_device_t *device;
+
+ device = gallium_device_acquire (surface->base.device);
+ device->screen->buffer_destroy (surface->buffer);
+ gallium_device_release (device);
+
+ return _cairo_drm_surface_finish (&surface->base);
+}
+
+static void
+gallium_surface_unmap (void *closure)
+{
+ gallium_surface_t *surface = closure;
+ gallium_device_t *device;
+
+ device = gallium_device_acquire (surface->base.device);
+ pipe_buffer_unmap (device->screen, surface->buffer);
+ gallium_device_release (device);
+}
+
+static cairo_status_t
+gallium_surface_acquire_source_image (void *abstract_surface,
+ cairo_image_surface_t **image_out,
+ void **image_extra)
+{
+ gallium_surface_t *surface = abstract_surface;
+ gallium_device_t *device;
+ cairo_format_t format;
+ cairo_image_surface_t *image;
+ cairo_status_t status;
+ void *ptr;
+
+ if (surface->fallback != NULL) {
+ *image_out = (cairo_image_surface_t *)
+ cairo_surface_reference (surface->fallback);
+ *image_extra = NULL;
+ return CAIRO_STATUS_SUCCESS;
+ }
+
+ if (unlikely (surface->base.width == 0 || surface->base.height == 0)) {
+ image = (cairo_image_surface_t *)
+ cairo_image_surface_create (surface->base.format, 0, 0);
+ status = image->base.status;
+ if (unlikely (status))
+ return status;
+
+ *image_out = image;
+ *image_extra = NULL;
+ return CAIRO_STATUS_SUCCESS;
+ }
+
+ format = _cairo_format_from_pipe_format (surface->pipe_format);
+ if (format == CAIRO_FORMAT_INVALID)
+ return CAIRO_INT_STATUS_UNSUPPORTED;
+
+ device = gallium_device_acquire (surface->base.device);
+ ptr = pipe_buffer_map (device->screen, surface->buffer,
+ PIPE_BUFFER_USAGE_CPU_READ);
+ gallium_device_release (device);
+
+ image = (cairo_image_surface_t *)
+ cairo_image_surface_create_for_data (ptr, format,
+ surface->base.width,
+ surface->base.height,
+ surface->base.stride);
+ if (unlikely (image->base.status))
+ return image->base.status;
+
+ status = _cairo_user_data_array_set_data (&image->base.user_data,
+ (cairo_user_data_key_t *) &surface->fallback,
+ surface,
+ gallium_surface_unmap);
+ if (unlikely (status)) {
+ cairo_surface_destroy (&image->base);
+ return status;
+ }
+
+ *image_out = (cairo_image_surface_t *) image;
+ *image_extra = NULL;
+ return CAIRO_STATUS_SUCCESS;
+}
+
+static void
+gallium_surface_release_source_image (void *abstract_surface,
+ cairo_image_surface_t *image,
+ void *image_extra)
+{
+ cairo_surface_destroy (&image->base);
+}
+
+static cairo_status_t
+gallium_surface_acquire_dest_image (void *abstract_surface,
+ cairo_rectangle_int_t *interest_rect,
+ cairo_image_surface_t **image_out,
+ cairo_rectangle_int_t *image_rect_out,
+ void **image_extra)
+{
+ gallium_surface_t *surface = abstract_surface;
+ gallium_device_t *device;
+ cairo_surface_t *image;
+ cairo_format_t format;
+ cairo_status_t status;
+ void *ptr;
+
+ assert (surface->fallback == NULL);
+
+ format = _cairo_format_from_pipe_format (surface->pipe_format);
+ if (format == CAIRO_FORMAT_INVALID)
+ return CAIRO_INT_STATUS_UNSUPPORTED;
+
+ device = gallium_device_acquire (surface->base.device);
+ ptr = pipe_buffer_map (device->screen, surface->buffer,
+ PIPE_BUFFER_USAGE_CPU_READ_WRITE);
+ gallium_device_release (device);
+
+ image = cairo_image_surface_create_for_data (ptr, format,
+ surface->base.width,
+ surface->base.height,
+ surface->base.stride);
+ if (unlikely (image->status))
+ return image->status;
+
+ status = _cairo_user_data_array_set_data (&image->user_data,
+ (cairo_user_data_key_t *) &surface->fallback,
+ surface,
+ gallium_surface_unmap);
+ if (unlikely (status)) {
+ cairo_surface_destroy (image);
+ return status;
+ }
+
+ surface->fallback = cairo_surface_reference (image);
+
+ *image_out = (cairo_image_surface_t *) image;
+ *image_extra = NULL;
+
+ image_rect_out->x = 0;
+ image_rect_out->y = 0;
+ image_rect_out->width = surface->base.width;
+ image_rect_out->height = surface->base.height;
+
+ return CAIRO_STATUS_SUCCESS;
+}
+
+static void
+gallium_surface_release_dest_image (void *abstract_surface,
+ cairo_rectangle_int_t *interest_rect,
+ cairo_image_surface_t *image,
+ cairo_rectangle_int_t *image_rect,
+ void *image_extra)
+{
+ /* Keep the fallback until we flush, either explicitly or at the
+ * end of this device. The idea is to avoid excess migration of
+ * the buffer between GPU and CPU domains.
+ */
+ cairo_surface_destroy (&image->base);
+}
+
+static cairo_status_t
+gallium_surface_flush (void *abstract_surface)
+{
+ gallium_surface_t *surface = abstract_surface;
+ gallium_device_t *device;
+ cairo_status_t status;
+
+ if (surface->fallback == NULL)
+ return CAIRO_STATUS_SUCCESS;
+
+ /* kill any outstanding maps */
+ cairo_surface_finish (surface->fallback);
+
+ device = gallium_device_acquire (surface->base.device);
+ pipe_buffer_flush_mapped_range (device->screen,
+ surface->buffer,
+ 0,
+ surface->base.stride * surface->base.height);
+ gallium_device_release (device);
+
+ status = cairo_surface_status (surface->fallback);
+ cairo_surface_destroy (surface->fallback);
+ surface->fallback = NULL;
+
+ return status;
+}
+
+static const cairo_surface_backend_t gallium_surface_backend = {
+ CAIRO_SURFACE_TYPE_DRM,
+ gallium_surface_create_similar,
+ gallium_surface_finish,
+
+ gallium_surface_acquire_source_image,
+ gallium_surface_release_source_image,
+ gallium_surface_acquire_dest_image,
+ gallium_surface_release_dest_image,
+
+ NULL, //gallium_surface_clone_similar,
+ NULL, //gallium_surface_composite,
+ NULL, //gallium_surface_fill_rectangles,
+ NULL, //gallium_surface_composite_trapezoids,
+ NULL, //gallium_surface_create_span_renderer,
+ NULL, //gallium_surface_check_span_renderer,
+ NULL, /* copy_page */
+ NULL, /* show_page */
+ _cairo_drm_surface_get_extents,
+ NULL, /* old_show_glyphs */
+ _cairo_drm_surface_get_font_options,
+ gallium_surface_flush,
+ NULL, /* mark_dirty_rectangle */
+ NULL, //gallium_surface_scaled_font_fini,
+ NULL, //gallium_surface_scaled_glyph_fini,
+
+ _cairo_drm_surface_paint,
+ _cairo_drm_surface_mask,
+ _cairo_drm_surface_stroke,
+ _cairo_drm_surface_fill,
+ _cairo_drm_surface_show_glyphs,
+
+ NULL, /* snapshot */
+
+ NULL, /* is_similar */
+
+ NULL, /* reset */
+};
+
+static int
+gallium_format_stride_for_width (enum pipe_format format, int width)
+{
+ int stride;
+
+ stride = 1024; /* XXX fugly */
+ while (stride < width)
+ stride *= 2;
+
+ if (format == PIPE_FORMAT_A8R8G8B8_UNORM)
+ stride *= 4;
+
+ return stride;
+}
+
+static cairo_drm_bo_t *
+_gallium_fake_bo_create (uint32_t size, uint32_t name)
+{
+ cairo_drm_bo_t *bo;
+
+ bo = malloc (sizeof (cairo_drm_bo_t));
+
+ CAIRO_REFERENCE_COUNT_INIT (&bo->ref_count, 1);
+ bo->name = name;
+ bo->handle = 0;
+ bo->size = size;
+
+ return bo;
+}
+
+static void
+_gallium_fake_bo_release (void *dev, void *bo)
+{
+ free (bo);
+}
+
+static cairo_surface_t *
+gallium_surface_create_internal (gallium_device_t *device,
+ cairo_content_t content,
+ enum pipe_format pipe_format,
+ int width, int height)
+{
+ gallium_surface_t *surface;
+ cairo_status_t status;
+ int stride, size;
+
+ surface = malloc (sizeof (gallium_surface_t));
+ if (unlikely (surface == NULL))
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
+
+ _cairo_surface_init (&surface->base.base,
+ &gallium_surface_backend,
+ content);
+ _cairo_drm_surface_init (&surface->base, &device->base);
+
+ stride = gallium_format_stride_for_width (pipe_format, width);
+ size = stride * height;
+
+ surface->base.width = width;
+ surface->base.height = height;
+ surface->base.stride = stride;
+ surface->base.bo = _gallium_fake_bo_create (size, 0);
+
+ surface->buffer = pipe_buffer_create (device->screen,
+ 0,
+ PIPE_BUFFER_USAGE_GPU_READ_WRITE |
+ PIPE_BUFFER_USAGE_CPU_READ_WRITE,
+ size);
+ if (unlikely (surface->buffer == NULL)) {
+ status = _cairo_drm_surface_finish (&surface->base);
+ free (surface);
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
+ }
+
+ surface->pipe_format = pipe_format;
+ surface->texture = NULL;
+
+ return &surface->base.base;
+}
+
+static cairo_surface_t *
+gallium_surface_create (cairo_drm_device_t *base_dev,
+ cairo_content_t content,
+ int width, int height)
+{
+ gallium_device_t *device;
+ cairo_surface_t *surface;
+ enum pipe_format pipe_format;
+
+ device = gallium_device_acquire (base_dev);
+
+ if (MAX (width, height) > device->max_size) {
+ surface = _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_SIZE));
+ goto RELEASE;
+ }
+
+ pipe_format = pipe_format_from_content (content);
+
+ if (! format_is_supported_destination (device, pipe_format)) {
+ surface = _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
+ goto RELEASE;
+ }
+
+ surface = gallium_surface_create_internal (device,
+ content, pipe_format,
+ width, height);
+
+RELEASE:
+ gallium_device_release (device);
+
+ return surface;
+}
+
+static cairo_surface_t *
+gallium_surface_create_for_name (cairo_drm_device_t *base_dev,
+ unsigned int name,
+ cairo_format_t format,
+ int width, int height, int stride)
+{
+ gallium_device_t *device;
+ gallium_surface_t *surface;
+ cairo_status_t status;
+ cairo_content_t content;
+
+ surface = malloc (sizeof (gallium_surface_t));
+ if (unlikely (surface == NULL))
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
+
+ switch (format) {
+ default:
+ case CAIRO_FORMAT_A1:
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
+ case CAIRO_FORMAT_A8:
+ surface->pipe_format = PIPE_FORMAT_A8_UNORM;
+ break;
+ case CAIRO_FORMAT_RGB24:
+ case CAIRO_FORMAT_ARGB32:
+ surface->pipe_format = PIPE_FORMAT_A8R8G8B8_UNORM;
+ break;
+ }
+
+ device = gallium_device_acquire (base_dev);
+
+ if (MAX (width, height) > device->max_size) {
+ gallium_device_release (device);
+ free (surface);
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_SIZE));
+ }
+
+ if (! format_is_supported_destination (device, surface->pipe_format)) {
+ gallium_device_release (device);
+ free (surface);
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
+ }
+
+ content = _cairo_content_from_format (format);
+ _cairo_surface_init (&surface->base.base,
+ &gallium_surface_backend,
+ content);
+ _cairo_drm_surface_init (&surface->base, base_dev);
+
+ surface->base.bo = _gallium_fake_bo_create (height * stride, name);
+
+ surface->base.width = width;
+ surface->base.height = height;
+ surface->base.stride = stride;
+
+ surface->buffer = device->api->buffer_from_handle (device->api,
+ device->screen,
+ "cairo-gallium alien",
+ name);
+ if (unlikely (surface->buffer == NULL)) {
+ status = _cairo_drm_surface_finish (&surface->base);
+ gallium_device_release (device);
+ free (surface);
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
+ }
+
+ surface->texture = NULL;
+
+ surface->fallback = NULL;
+
+ gallium_device_release (device);
+
+ return &surface->base.base;
+}
+
+static cairo_int_status_t
+gallium_surface_flink (void *abstract_surface)
+{
+ gallium_surface_t *surface = abstract_surface;
+ gallium_device_t *device;
+ cairo_status_t status = CAIRO_STATUS_SUCCESS;
+
+ device = gallium_device_acquire (surface->base.device);
+ if (! device->api->global_handle_from_buffer (device->api,
+ device->screen,
+ surface->buffer,
+ &surface->base.bo->name))
+ {
+ status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
+ }
+ gallium_device_release (device);
+
+ return status;
+}
+
+static void
+gallium_device_destroy (void *abstract_device)
+{
+ gallium_device_t *device = abstract_device;
+
+ device->pipe->destroy (device->pipe);
+ device->screen->destroy (device->screen);
+ device->api->destroy (device->api);
+
+ CAIRO_MUTEX_FINI (device->mutex);
+
+ dlclose (device->dlhandle);
+ free (device);
+}
+
+cairo_drm_device_t *
+_cairo_drm_gallium_device_create (int fd, dev_t dev, int vendor_id, int chip_id)
+{
+ gallium_device_t *device;
+ cairo_status_t status;
+ void *handle;
+ const char *libdir;
+ char buf[4096];
+ struct drm_api *(*ctor) (void);
+
+ /* XXX need search path + probe */
+ libdir = getenv ("CAIRO_GALLIUM_LIBDIR");
+ if (libdir == NULL)
+ libdir = "/usr/lib/dri";
+ buf[snprintf (buf, sizeof (buf)-1, "%s/i915_dri.so", libdir)] = '\0';
+
+ handle = dlopen (buf, RTLD_LAZY);
+ if (handle == NULL)
+ return NULL;
+
+ ctor = dlsym (handle, "drm_api_create");
+ if (ctor == NULL) {
+ dlclose (handle);
+ return NULL;
+ }
+
+ device = malloc (sizeof (gallium_device_t));
+ if (device == NULL) {
+ dlclose (handle);
+ return _cairo_drm_device_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
+ }
+
+ device->dlhandle = handle;
+
+ CAIRO_MUTEX_INIT (device->mutex);
+
+ device->base.status = CAIRO_STATUS_SUCCESS;
+
+ device->base.surface.create = gallium_surface_create;
+ device->base.surface.create_for_name = gallium_surface_create_for_name;
+ device->base.surface.enable_scan_out = NULL;
+ device->base.surface.flink = gallium_surface_flink;
+
+ device->base.device.throttle = NULL;
+ device->base.device.destroy = gallium_device_destroy;
+
+ device->base.bo.release = _gallium_fake_bo_release;
+
+ device->api = ctor ();
+ if (device->api == NULL) {
+ status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
+ goto CLEANUP;
+ }
+
+ device->screen = device->api->create_screen (device->api, fd, NULL);
+ if (device->screen == NULL) {
+ status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
+ goto CLEANUP_API;
+ }
+
+ device->max_size = 1 << device->screen->get_param (device->screen,
+ PIPE_CAP_MAX_TEXTURE_2D_LEVELS);
+
+ device->pipe = device->api->create_context (device->api, device->screen);
+ if (device->pipe == NULL) {
+ status = _cairo_error (CAIRO_STATUS_NO_MEMORY);
+ goto CLEANUP_SCREEN;
+ }
+
+ return _cairo_drm_device_init (&device->base, fd, dev, device->max_size);
+
+CLEANUP_SCREEN:
+ device->screen->destroy (device->screen);
+CLEANUP_API:
+ device->api->destroy (device->api);
+CLEANUP:
+ free (device);
+ dlclose (handle);
+ return _cairo_drm_device_create_in_error (status);
+}
diff --git a/src/drm/cairo-drm-intel-private.h b/src/drm/cairo-drm-intel-private.h
new file mode 100644
index 00000000..f5791f3c
--- /dev/null
+++ b/src/drm/cairo-drm-intel-private.h
@@ -0,0 +1,182 @@
+/* Cairo - a vector graphics library with display and print output
+ *
+ * Copyright © 2009 Chris Wilson
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#ifndef CAIRO_DRM_INTEL_PRIVATE_H
+#define CAIRO_DRM_INTEL_PRIVATE_H
+
+#include "cairo-compiler-private.h"
+#include "cairo-types-private.h"
+#include "cairo-drm-private.h"
+#include "cairo-list-private.h"
+#include "cairo-freelist-private.h"
+#include "cairo-mutex-private.h"
+
+/** @{
+ * Intel memory domains
+ *
+ * Most of these just align with the various caches in
+ * the system and are used to flush and invalidate as
+ * objects end up cached in different domains.
+ */
+/** CPU cache */
+#define I915_GEM_DOMAIN_CPU 0x00000001
+/** Render cache, used by 2D and 3D drawing */
+#define I915_GEM_DOMAIN_RENDER 0x00000002
+/** Sampler cache, used by texture engine */
+#define I915_GEM_DOMAIN_SAMPLER 0x00000004
+/** Command queue, used to load batch buffers */
+#define I915_GEM_DOMAIN_COMMAND 0x00000008
+/** Instruction cache, used by shader programs */
+#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010
+/** Vertex address cache */
+#define I915_GEM_DOMAIN_VERTEX 0x00000020
+/** GTT domain - aperture and scanout */
+#define I915_GEM_DOMAIN_GTT 0x00000040
+/** @} */
+
+#define I915_TILING_NONE 0
+#define I915_TILING_X 1
+#define I915_TILING_Y 2
+
+#define I915_BIT_6_SWIZZLE_NONE 0
+#define I915_BIT_6_SWIZZLE_9 1
+#define I915_BIT_6_SWIZZLE_9_10 2
+#define I915_BIT_6_SWIZZLE_9_11 3
+#define I915_BIT_6_SWIZZLE_9_10_11 4
+
+#define INTEL_TILING_DEFAULT I915_TILING_Y
+
+
+#define INTEL_BO_CACHE_BUCKETS 12 /* cache surfaces up to 16 MiB */
+
+typedef struct _intel_bo {
+ cairo_drm_bo_t base;
+
+ cairo_list_t cache_list;
+
+ uint32_t offset;
+ void *virtual;
+
+ uint32_t tiling;
+ uint32_t swizzle;
+ uint32_t stride;
+
+ cairo_bool_t in_batch;
+ uint32_t read_domains;
+ uint32_t write_domain;
+} intel_bo_t;
+
+typedef struct _intel_device {
+ cairo_drm_device_t base;
+
+ size_t gtt_max_size;
+ size_t gtt_avail_size;
+
+ cairo_mutex_t bo_mutex;
+ cairo_freepool_t bo_pool;
+ struct _intel_bo_cache {
+ cairo_list_t list;
+ uint16_t min_entries;
+ uint16_t num_entries;
+ } bo_cache[INTEL_BO_CACHE_BUCKETS];
+ size_t bo_cache_size;
+ size_t bo_max_cache_size_high;
+ size_t bo_max_cache_size_low;
+} intel_device_t;
+
+cairo_private cairo_bool_t
+intel_info (int fd, uint64_t *gtt_size);
+
+cairo_private cairo_status_t
+intel_device_init (intel_device_t *device, int fd);
+
+cairo_private void
+intel_device_fini (intel_device_t *dev);
+
+cairo_private cairo_drm_bo_t *
+intel_bo_create (intel_device_t *dev,
+ uint32_t size,
+ cairo_bool_t gpu_target);
+
+cairo_private void
+intel_bo_release (void *_dev, void *_bo);
+
+cairo_private cairo_drm_bo_t *
+intel_bo_create_for_name (intel_device_t *dev, uint32_t name);
+
+cairo_private void
+intel_bo_set_tiling (intel_device_t *dev,
+ intel_bo_t *bo,
+ uint32_t tiling,
+ uint32_t stride);
+
+cairo_private void
+intel_bo_write (const intel_device_t *dev,
+ intel_bo_t *bo,
+ unsigned long offset,
+ unsigned long size,
+ const void *data);
+
+cairo_private void
+intel_bo_read (const intel_device_t *dev,
+ intel_bo_t *bo,
+ unsigned long offset,
+ unsigned long size,
+ void *data);
+
+cairo_private void
+intel_bo_wait (const intel_device_t *dev, intel_bo_t *bo);
+
+cairo_private void *
+intel_bo_map (const intel_device_t *dev, intel_bo_t *bo);
+
+cairo_private void
+intel_bo_unmap (intel_bo_t *bo);
+
+cairo_private cairo_status_t
+intel_bo_init (const intel_device_t *dev,
+ intel_bo_t *bo,
+ uint32_t size,
+ uint32_t initial_domain);
+
+cairo_private cairo_status_t
+intel_bo_init_for_name (const intel_device_t *dev,
+ intel_bo_t *bo,
+ uint32_t size,
+ uint32_t name);
+
+cairo_private cairo_surface_t *
+intel_bo_get_image (const intel_device_t *device,
+ intel_bo_t *bo,
+ const cairo_drm_surface_t *surface);
+
+cairo_private void
+intel_throttle (intel_device_t *device);
+
+#endif /* CAIRO_DRM_INTEL_PRIVATE_H */
diff --git a/src/drm/cairo-drm-intel-surface.c b/src/drm/cairo-drm-intel-surface.c
new file mode 100644
index 00000000..2152c591
--- /dev/null
+++ b/src/drm/cairo-drm-intel-surface.c
@@ -0,0 +1,475 @@
+/* Cairo - a vector graphics library with display and print output
+ *
+ * Copyright © 2009 Chris Wilson
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#include "cairoint.h"
+
+#include "cairo-drm-private.h"
+#include "cairo-drm-intel-private.h"
+
+/* Basic generic/stub surface for intel chipsets */
+
+#define MAX_SIZE 2048
+
+typedef struct _intel_surface intel_surface_t;
+
+struct _intel_surface {
+ cairo_drm_surface_t base;
+};
+
+static inline intel_device_t *
+to_intel_device (cairo_drm_device_t *device)
+{
+ return (intel_device_t *) device;
+}
+
+static inline intel_bo_t *
+to_intel_bo (cairo_drm_bo_t *bo)
+{
+ return (intel_bo_t *) bo;
+}
+
+static cairo_status_t
+intel_batch_flush (intel_device_t *device)
+{
+ return CAIRO_STATUS_SUCCESS;
+}
+
+static cairo_status_t
+intel_surface_batch_flush (intel_surface_t *surface)
+{
+ if (to_intel_bo (surface->base.bo)->write_domain)
+ return intel_batch_flush (to_intel_device (surface->base.device));
+
+ return CAIRO_STATUS_SUCCESS;
+}
+
+static cairo_status_t
+intel_surface_finish (void *abstract_surface)
+{
+ intel_surface_t *surface = abstract_surface;
+
+ return _cairo_drm_surface_finish (&surface->base);
+}
+
+static cairo_status_t
+intel_surface_acquire_source_image (void *abstract_surface,
+ cairo_image_surface_t **image_out,
+ void **image_extra)
+{
+ intel_surface_t *surface = abstract_surface;
+ cairo_surface_t *image;
+ cairo_status_t status;
+
+ if (surface->base.fallback != NULL) {
+ image = surface->base.fallback;
+ goto DONE;
+ }
+
+ image = _cairo_surface_has_snapshot (&surface->base.base,
+ &_cairo_image_surface_backend,
+ surface->base.base.content);
+ if (image != NULL)
+ goto DONE;
+
+ status = intel_surface_batch_flush (surface);
+ if (unlikely (status))
+ return status;
+
+ image = intel_bo_get_image (to_intel_device (surface->base.device),
+ to_intel_bo (surface->base.bo),
+ &surface->base);
+ status = image->status;
+ if (unlikely (status))
+ return status;
+
+ status = _cairo_surface_attach_snapshot (&surface->base.base,
+ image,
+ cairo_surface_destroy);
+ if (unlikely (status)) {
+ cairo_surface_destroy (image);
+ return status;
+ }
+
+DONE:
+ *image_out = (cairo_image_surface_t *) cairo_surface_reference (image);
+ *image_extra = NULL;
+ return CAIRO_STATUS_SUCCESS;
+}
+
+static void
+intel_surface_release_source_image (void *abstract_surface,
+ cairo_image_surface_t *image,
+ void *image_extra)
+{
+ cairo_surface_destroy (&image->base);
+}
+
+static cairo_surface_t *
+intel_surface_snapshot (void *abstract_surface)
+{
+ intel_surface_t *surface = abstract_surface;
+ cairo_status_t status;
+
+ if (surface->base.fallback != NULL)
+ return NULL;
+
+ status = intel_surface_batch_flush (surface);
+ if (unlikely (status))
+ return _cairo_surface_create_in_error (status);
+
+ return intel_bo_get_image (to_intel_device (surface->base.device),
+ to_intel_bo (surface->base.bo),
+ &surface->base);
+}
+
+static cairo_status_t
+intel_surface_acquire_dest_image (void *abstract_surface,
+ cairo_rectangle_int_t *interest_rect,
+ cairo_image_surface_t **image_out,
+ cairo_rectangle_int_t *image_rect_out,
+ void **image_extra)
+{
+ intel_surface_t *surface = abstract_surface;
+ cairo_surface_t *image;
+ cairo_status_t status;
+ void *ptr;
+
+ assert (surface->base.fallback == NULL);
+
+ status = intel_surface_batch_flush (surface);
+ if (unlikely (status))
+ return status;
+
+ /* Force a read barrier, as well as flushing writes above */
+ if (to_intel_bo (surface->base.bo)->in_batch) {
+ status = intel_batch_flush (to_intel_device (surface->base.device));
+ if (unlikely (status))
+ return status;
+ }
+
+ ptr = intel_bo_map (to_intel_device (surface->base.device),
+ to_intel_bo (surface->base.bo));
+ if (unlikely (ptr == NULL))
+ return _cairo_error (CAIRO_STATUS_NO_MEMORY);
+
+ image = cairo_image_surface_create_for_data (ptr,
+ surface->base.format,
+ surface->base.width,
+ surface->base.height,
+ surface->base.stride);
+ status = image->status;
+ if (unlikely (status)) {
+ intel_bo_unmap (to_intel_bo (surface->base.bo));
+ return status;
+ }
+
+ surface->base.fallback = cairo_surface_reference (image);
+
+ *image_out = (cairo_image_surface_t *) image;
+ *image_extra = NULL;
+
+ image_rect_out->x = 0;
+ image_rect_out->y = 0;
+ image_rect_out->width = surface->base.width;
+ image_rect_out->height = surface->base.height;
+
+ return CAIRO_STATUS_SUCCESS;
+}
+
+static void
+intel_surface_release_dest_image (void *abstract_surface,
+ cairo_rectangle_int_t *interest_rect,
+ cairo_image_surface_t *image,
+ cairo_rectangle_int_t *image_rect,
+ void *image_extra)
+{
+ /* Keep the fallback until we flush, either explicitly or at the
+ * end of this context. The idea is to avoid excess migration of
+ * the buffer between GPU and CPU domains.
+ */
+ cairo_surface_destroy (&image->base);
+}
+
+static cairo_status_t
+intel_surface_flush (void *abstract_surface)
+{
+ intel_surface_t *surface = abstract_surface;
+ cairo_status_t status;
+
+ if (surface->base.fallback == NULL)
+ return intel_surface_batch_flush (surface);
+
+ /* kill any outstanding maps */
+ cairo_surface_finish (surface->base.fallback);
+
+ status = cairo_surface_status (surface->base.fallback);
+ cairo_surface_destroy (surface->base.fallback);
+ surface->base.fallback = NULL;
+
+ intel_bo_unmap (to_intel_bo (surface->base.bo));
+
+ return status;
+}
+
+static const cairo_surface_backend_t intel_surface_backend = {
+ CAIRO_SURFACE_TYPE_DRM,
+ _cairo_drm_surface_create_similar,
+ intel_surface_finish,
+
+ intel_surface_acquire_source_image,
+ intel_surface_release_source_image,
+ intel_surface_acquire_dest_image,
+ intel_surface_release_dest_image,
+
+ NULL, //intel_surface_clone_similar,
+ NULL, //intel_surface_composite,
+ NULL, //intel_surface_fill_rectangles,
+ NULL, //intel_surface_composite_trapezoids,
+ NULL, //intel_surface_create_span_renderer,
+ NULL, //intel_surface_check_span_renderer,
+ NULL, /* copy_page */
+ NULL, /* show_page */
+ _cairo_drm_surface_get_extents,
+ NULL, /* old_show_glyphs */
+ _cairo_drm_surface_get_font_options,
+ intel_surface_flush,
+ NULL, /* mark_dirty_rectangle */
+ NULL, //intel_surface_scaled_font_fini,
+ NULL, //intel_surface_scaled_glyph_fini,
+
+ _cairo_drm_surface_paint,
+ _cairo_drm_surface_mask,
+ _cairo_drm_surface_stroke,
+ _cairo_drm_surface_fill,
+ _cairo_drm_surface_show_glyphs,
+
+ intel_surface_snapshot,
+
+ NULL, /* is_similar */
+};
+
+static void
+intel_surface_init (intel_surface_t *surface,
+ cairo_content_t content,
+ cairo_drm_device_t *device)
+{
+ _cairo_surface_init (&surface->base.base, &intel_surface_backend, content);
+ _cairo_drm_surface_init (&surface->base, device);
+
+ switch (content) {
+ case CAIRO_CONTENT_ALPHA:
+ surface->base.format = CAIRO_FORMAT_A8;
+ break;
+ case CAIRO_CONTENT_COLOR:
+ surface->base.format = CAIRO_FORMAT_RGB24;
+ break;
+ default:
+ ASSERT_NOT_REACHED;
+ case CAIRO_CONTENT_COLOR_ALPHA:
+ surface->base.format = CAIRO_FORMAT_ARGB32;
+ break;
+ }
+}
+
+static cairo_surface_t *
+intel_surface_create_internal (cairo_drm_device_t *device,
+ cairo_content_t content,
+ int width, int height)
+{
+ intel_surface_t *surface;
+ cairo_status_t status;
+
+ surface = malloc (sizeof (intel_surface_t));
+ if (unlikely (surface == NULL))
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
+
+ intel_surface_init (surface, content, device);
+
+ if (width && height) {
+ surface->base.width = width;
+ surface->base.height = height;
+
+ /* Vol I, p134: size restrictions for textures */
+ width = (width + 3) & -4;
+ height = (height + 1) & -2;
+ surface->base.stride =
+ cairo_format_stride_for_width (surface->base.format, width);
+ surface->base.bo = intel_bo_create (to_intel_device (device),
+ surface->base.stride * height,
+ TRUE);
+ if (surface->base.bo == NULL) {
+ status = _cairo_drm_surface_finish (&surface->base);
+ free (surface);
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
+ }
+ }
+
+ return &surface->base.base;
+}
+
+static cairo_surface_t *
+intel_surface_create (cairo_drm_device_t *device,
+ cairo_content_t content,
+ int width, int height)
+{
+ return intel_surface_create_internal (device, content, width, height);
+}
+
+static cairo_surface_t *
+intel_surface_create_for_name (cairo_drm_device_t *device,
+ unsigned int name,
+ cairo_format_t format,
+ int width, int height, int stride)
+{
+ intel_surface_t *surface;
+ cairo_content_t content;
+ cairo_status_t status;
+
+ switch (format) {
+ default:
+ case CAIRO_FORMAT_A1:
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
+ case CAIRO_FORMAT_ARGB32:
+ content = CAIRO_CONTENT_COLOR_ALPHA;
+ break;
+ case CAIRO_FORMAT_RGB24:
+ content = CAIRO_CONTENT_COLOR;
+ break;
+ case CAIRO_FORMAT_A8:
+ content = CAIRO_CONTENT_ALPHA;
+ break;
+ }
+
+ if (stride < cairo_format_stride_for_width (format, width))
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_STRIDE));
+
+ surface = malloc (sizeof (intel_surface_t));
+ if (unlikely (surface == NULL))
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
+
+ intel_surface_init (surface, content, device);
+
+ if (width && height) {
+ surface->base.width = width;
+ surface->base.height = height;
+ surface->base.stride = stride;
+
+ surface->base.bo = intel_bo_create_for_name (to_intel_device (device),
+ name);
+ if (unlikely (surface->base.bo == NULL)) {
+ status = _cairo_drm_surface_finish (&surface->base);
+ free (surface);
+ return _cairo_surface_create_in_error (_cairo_error
+ (CAIRO_STATUS_NO_MEMORY));
+ }
+ }
+
+ return &surface->base.base;
+}
+
+static cairo_status_t
+intel_surface_enable_scan_out (void *abstract_surface)
+{
+ intel_surface_t *surface = abstract_surface;
+ cairo_status_t status;
+
+ if (unlikely (surface->base.bo == NULL))
+ return _cairo_error (CAIRO_STATUS_INVALID_SIZE);
+
+ status = intel_surface_batch_flush (surface);
+ if (unlikely (status))
+ return status;
+
+ if (to_intel_bo (surface->base.bo)->tiling == I915_TILING_Y) {
+ intel_bo_set_tiling (to_intel_device (surface->base.device),
+ to_intel_bo (surface->base.bo),
+ I915_TILING_X, surface->base.stride);
+ }
+
+ if (unlikely (to_intel_bo (surface->base.bo)->tiling == I915_TILING_Y))
+ return _cairo_error (CAIRO_STATUS_INVALID_FORMAT); /* XXX */
+
+ return CAIRO_STATUS_SUCCESS;
+}
+
+static cairo_int_status_t
+intel_device_throttle (cairo_drm_device_t *device)
+{
+ cairo_status_t status;
+
+ status = intel_batch_flush (to_intel_device (device));
+ if (unlikely (status))
+ return status;
+
+ intel_throttle (to_intel_device (device));
+ return CAIRO_STATUS_SUCCESS;
+}
+
+static void
+intel_device_destroy (void *data)
+{
+ intel_device_t *device = data;
+
+ intel_device_fini (device);
+
+ free (data);
+}
+
+cairo_drm_device_t *
+_cairo_drm_intel_device_create (int fd, dev_t dev, int vendor_id, int chip_id)
+{
+ intel_device_t *device;
+ cairo_status_t status;
+
+ if (! intel_info (fd, NULL))
+ return NULL;
+
+ device = malloc (sizeof (intel_device_t));
+ if (unlikely (device == NULL))
+ return _cairo_drm_device_create_in_error (CAIRO_STATUS_NO_MEMORY);
+
+ status = intel_device_init (device, fd);
+ if (unlikely (status)) {
+ free (device);
+ return _cairo_drm_device_create_in_error (status);
+ }
+
+ device->base.bo.release = intel_bo_release;
+
+ device->base.surface.create = intel_surface_create;
+ device->base.surface.create_for_name = intel_surface_create_for_name;
+ device->base.surface.create_from_cacheable_image = NULL;
+ device->base.surface.flink = _cairo_drm_surface_flink;
+ device->base.surface.enable_scan_out = intel_surface_enable_scan_out;
+
+ device->base.device.throttle = intel_device_throttle;
+ device->base.device.destroy = intel_device_destroy;
+
+ return _cairo_drm_device_init (&device->base, fd, dev, MAX_SIZE);
+}
diff --git a/src/drm/cairo-drm-intel.c b/src/drm/cairo-drm-intel.c
new file mode 100644
index 00000000..5a37f00b
--- /dev/null
+++ b/src/drm/cairo-drm-intel.c
@@ -0,0 +1,933 @@
+/* Cairo - a vector graphics library with display and print output
+ *
+ * Copyright © 2009 Chris Wilson
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#include "cairoint.h"
+
+#include "cairo-drm-private.h"
+#include "cairo-drm-ioctl-private.h"
+#include "cairo-drm-intel-private.h"
+#include "cairo-freelist-private.h"
+
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <errno.h>
+
+#define DRM_I915_GEM_EXECBUFFER 0x14
+#define DRM_I915_GEM_BUSY 0x17
+#define DRM_I915_GEM_THROTTLE 0x18
+#define DRM_I915_GEM_CREATE 0x1b
+#define DRM_I915_GEM_PREAD 0x1c
+#define DRM_I915_GEM_PWRITE 0x1d
+#define DRM_I915_GEM_MMAP 0x1e
+#define DRM_I915_GEM_SET_DOMAIN 0x1f
+#define DRM_I915_GEM_SET_TILING 0x21
+#define DRM_I915_GEM_GET_TILING 0x22
+#define DRM_I915_GEM_GET_APERTURE 0x23
+#define DRM_I915_GEM_MMAP_GTT 0x24
+
+struct drm_i915_gem_create {
+ /**
+ * Requested size for the object.
+ *
+ * The (page-aligned) allocated size for the object will be returned.
+ */
+ uint64_t size;
+ /**
+ * Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_i915_gem_pread {
+ /** Handle for the object being read. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to read from */
+ uint64_t offset;
+ /** Length of data to read */
+ uint64_t size;
+ /**
+ * Pointer to write the data into.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ uint64_t data_ptr;
+};
+
+struct drm_i915_gem_pwrite {
+ /** Handle for the object being written to. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to write to */
+ uint64_t offset;
+ /** Length of data to write */
+ uint64_t size;
+ /**
+ * Pointer to read the data from.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ uint64_t data_ptr;
+};
+
+struct drm_i915_gem_mmap {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset in the object to map. */
+ uint64_t offset;
+ /**
+ * Length of data to map.
+ *
+ * The value will be page-aligned.
+ */
+ uint64_t size;
+ /**
+ * Returned pointer the data was mapped at.
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ uint64_t addr_ptr;
+};
+
+struct drm_i915_gem_mmap_gtt {
+ /** Handle for the object being mapped. */
+ uint32_t handle;
+ uint32_t pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ uint64_t offset;
+};
+
+struct drm_i915_gem_set_domain {
+ /** Handle for the object */
+ uint32_t handle;
+
+ /** New read domains */
+ uint32_t read_domains;
+
+ /** New write domain */
+ uint32_t write_domain;
+};
+
+struct drm_i915_gem_relocation_entry {
+ /**
+ * Handle of the buffer being pointed to by this relocation entry.
+ *
+ * It's appealing to make this be an index into the mm_validate_entry
+ * list to refer to the buffer, but this allows the driver to create
+ * a relocation list for state buffers and not re-write it per
+ * exec using the buffer.
+ */
+ uint32_t target_handle;
+
+ /**
+ * Value to be added to the offset of the target buffer to make up
+ * the relocation entry.
+ */
+ uint32_t delta;
+
+ /** Offset in the buffer the relocation entry will be written into */
+ uint64_t offset;
+
+ /**
+ * Offset value of the target buffer that the relocation entry was last
+ * written as.
+ *
+ * If the buffer has the same offset as last time, we can skip syncing
+ * and writing the relocation. This value is written back out by
+ * the execbuffer ioctl when the relocation is written.
+ */
+ uint64_t presumed_offset;
+
+ /**
+ * Target memory domains read by this operation.
+ */
+ uint32_t read_domains;
+
+ /**
+ * Target memory domains written by this operation.
+ *
+ * Note that only one domain may be written by the whole
+ * execbuffer operation, so that where there are conflicts,
+ * the application will get -EINVAL back.
+ */
+ uint32_t write_domain;
+};
+
+struct drm_i915_gem_exec_object {
+ /**
+ * User's handle for a buffer to be bound into the GTT for this
+ * operation.
+ */
+ uint32_t handle;
+
+ /** Number of relocations to be performed on this buffer */
+ uint32_t relocation_count;
+ /**
+ * Pointer to array of struct drm_i915_gem_relocation_entry containing
+ * the relocations to be performed in this buffer.
+ */
+ uint64_t relocs_ptr;
+
+ /** Required alignment in graphics aperture */
+ uint64_t alignment;
+
+ /**
+ * Returned value of the updated offset of the object, for future
+ * presumed_offset writes.
+ */
+ uint64_t offset;
+};
+
+struct drm_i915_gem_execbuffer {
+ /**
+ * List of buffers to be validated with their relocations to be
+ * performend on them.
+ *
+ * This is a pointer to an array of struct drm_i915_gem_validate_entry.
+ *
+ * These buffers must be listed in an order such that all relocations
+ * a buffer is performing refer to buffers that have already appeared
+ * in the validate list.
+ */
+ uint64_t buffers_ptr;
+ uint32_t buffer_count;
+
+ /** Offset in the batchbuffer to start execution from. */
+ uint32_t batch_start_offset;
+ /** Bytes used in batchbuffer from batch_start_offset */
+ uint32_t batch_len;
+ uint32_t DR1;
+ uint32_t DR4;
+ uint32_t num_cliprects;
+ /** This is a struct drm_clip_rect *cliprects */
+ uint64_t cliprects_ptr;
+};
+
+struct drm_i915_gem_busy {
+ /** Handle of the buffer to check for busy */
+ uint32_t handle;
+
+ /** Return busy status (1 if busy, 0 if idle) */
+ uint32_t busy;
+};
+
+struct drm_i915_gem_set_tiling {
+ /** Handle of the buffer to have its tiling state updated */
+ uint32_t handle;
+
+ /**
+ * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ *
+ * This value is to be set on request, and will be updated by the
+ * kernel on successful return with the actual chosen tiling layout.
+ *
+ * The tiling mode may be demoted to I915_TILING_NONE when the system
+ * has bit 6 swizzling that can't be managed correctly by GEM.
+ *
+ * Buffer contents become undefined when changing tiling_mode.
+ */
+ uint32_t tiling_mode;
+
+ /**
+ * Stride in bytes for the object when in I915_TILING_X or
+ * I915_TILING_Y.
+ */
+ uint32_t stride;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ uint32_t swizzle_mode;
+};
+
+struct drm_i915_gem_get_tiling {
+ /** Handle of the buffer to get tiling state for. */
+ uint32_t handle;
+
+ /**
+ * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X,
+ * I915_TILING_Y).
+ */
+ uint32_t tiling_mode;
+
+ /**
+ * Returned address bit 6 swizzling required for CPU access through
+ * mmap mapping.
+ */
+ uint32_t swizzle_mode;
+};
+
+struct drm_i915_gem_get_aperture {
+ /** Total size of the aperture used by i915_gem_execbuffer, in bytes */
+ uint64_t aper_size;
+
+ /**
+ * Available space in the aperture used by i915_gem_execbuffer, in
+ * bytes
+ */
+ uint64_t aper_available_size;
+};
+
+
+#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
+#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
+#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE)
+#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create)
+#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread)
+#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
+#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
+#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
+#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
+#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
+#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling)
+#define DRM_IOCTL_I915_GEM_GET_APERTURE DRM_IOR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_APERTURE, struct drm_i915_gem_get_aperture)
+
+/* XXX madvise */
+#ifndef DRM_I915_GEM_MADVISE
+#define I915_MADV_WILLNEED 0
+#define I915_MADV_DONTNEED 1
+
+struct drm_i915_gem_madvise {
+ uint32_t handle;
+ uint32_t madv;
+ uint32_t retained;
+};
+#define DRM_I915_GEM_MADVISE 0x26
+#define DRM_IOCTL_I915_GEM_MADVISE DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_MADVISE, struct drm_i915_gem_madvise)
+#endif
+
+
+cairo_bool_t
+intel_info (int fd, uint64_t *gtt_size)
+{
+ struct drm_i915_gem_get_aperture info;
+ int ret;
+
+ ret = ioctl (fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &info);
+ if (ret == -1)
+ return FALSE;
+
+ if (gtt_size != NULL)
+ *gtt_size = info.aper_size;
+
+ return TRUE;
+}
+
+void
+intel_bo_write (const intel_device_t *device,
+ intel_bo_t *bo,
+ unsigned long offset,
+ unsigned long size,
+ const void *data)
+{
+ struct drm_i915_gem_pwrite pwrite;
+ int ret;
+
+ memset (&pwrite, 0, sizeof (pwrite));
+ pwrite.handle = bo->base.handle;
+ pwrite.offset = offset;
+ pwrite.size = size;
+ pwrite.data_ptr = (uint64_t) (uintptr_t) data;
+ do {
+ ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_PWRITE, &pwrite);
+ } while (ret == -1 && errno == EINTR);
+}
+
+void
+intel_bo_read (const intel_device_t *device,
+ intel_bo_t *bo,
+ unsigned long offset,
+ unsigned long size,
+ void *data)
+{
+ struct drm_i915_gem_pread pread;
+ int ret;
+
+ memset (&pread, 0, sizeof (pread));
+ pread.handle = bo->base.handle;
+ pread.offset = offset;
+ pread.size = size;
+ pread.data_ptr = (uint64_t) (uintptr_t) data;
+ do {
+ ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_PREAD, &pread);
+ } while (ret == -1 && errno == EINTR);
+}
+
+void *
+intel_bo_map (const intel_device_t *device, intel_bo_t *bo)
+{
+ struct drm_i915_gem_set_domain set_domain;
+ int ret;
+ uint32_t domain;
+
+ assert (bo->virtual == NULL);
+
+ if (bo->tiling != I915_TILING_NONE) {
+ struct drm_i915_gem_mmap_gtt mmap_arg;
+ void *ptr;
+
+ mmap_arg.handle = bo->base.handle;
+ mmap_arg.offset = 0;
+
+ /* Get the fake offset back... */
+ do {
+ ret = ioctl (device->base.fd,
+ DRM_IOCTL_I915_GEM_MMAP_GTT, &mmap_arg);
+ } while (ret == -1 && errno == EINTR);
+ if (unlikely (ret != 0)) {
+ _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
+ return NULL;
+ }
+
+ /* and mmap it */
+ ptr = mmap (0, bo->base.size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, device->base.fd,
+ mmap_arg.offset);
+ if (unlikely (ptr == MAP_FAILED)) {
+ _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
+ return NULL;
+ }
+
+ bo->virtual = ptr;
+ } else {
+ struct drm_i915_gem_mmap mmap_arg;
+
+ mmap_arg.handle = bo->base.handle;
+ mmap_arg.offset = 0;
+ mmap_arg.size = bo->base.size;
+ mmap_arg.addr_ptr = 0;
+
+ do {
+ ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_MMAP, &mmap_arg);
+ } while (ret == -1 && errno == EINTR);
+ if (unlikely (ret != 0)) {
+ _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
+ return NULL;
+ }
+
+ bo->virtual = (void *) (uintptr_t) mmap_arg.addr_ptr;
+ }
+
+ domain = bo->tiling == I915_TILING_NONE ?
+ I915_GEM_DOMAIN_CPU : I915_GEM_DOMAIN_GTT;
+ set_domain.handle = bo->base.handle;
+ set_domain.read_domains = domain;
+ set_domain.write_domain = domain;
+
+ do {
+ ret = ioctl (device->base.fd,
+ DRM_IOCTL_I915_GEM_SET_DOMAIN, &set_domain);
+ } while (ret == -1 && errno == EINTR);
+
+ if (ret != 0) {
+ _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
+ return NULL;
+ }
+
+ return bo->virtual;
+}
+
+void
+intel_bo_unmap (intel_bo_t *bo)
+{
+ munmap (bo->virtual, bo->base.size);
+ bo->virtual = NULL;
+}
+
+static cairo_bool_t
+intel_bo_is_inactive (const intel_device_t *device, const intel_bo_t *bo)
+{
+ struct drm_i915_gem_busy busy;
+
+ /* Is this buffer busy for our intended usage pattern? */
+ busy.handle = bo->base.handle;
+ busy.busy = 1;
+ ioctl (device->base.fd, DRM_IOCTL_I915_GEM_BUSY, &busy);
+
+ return ! busy.busy;
+}
+
+static inline int
+pot (int v)
+{
+ v--;
+ v |= v >> 1;
+ v |= v >> 2;
+ v |= v >> 4;
+ v |= v >> 8;
+ v |= v >> 16;
+ v++;
+ return v;
+}
+
+static void
+intel_bo_cache_remove (intel_device_t *device,
+ intel_bo_t *bo,
+ int bucket)
+{
+ _cairo_drm_bo_close (&device->base, &bo->base);
+
+ cairo_list_del (&bo->cache_list);
+
+ if (device->bo_cache[bucket].num_entries-- >
+ device->bo_cache[bucket].min_entries)
+ {
+ device->bo_cache_size -= bo->base.size;
+ }
+
+ _cairo_freepool_free (&device->bo_pool, bo);
+}
+
+static cairo_bool_t
+intel_bo_madvise (intel_device_t *device,
+ intel_bo_t *bo,
+ int advice)
+{
+ struct drm_i915_gem_madvise madv;
+
+ madv.handle = bo->base.handle;
+ madv.madv = advice;
+ madv.retained = TRUE;
+ ioctl (device->base.fd, DRM_IOCTL_I915_GEM_MADVISE, &madv);
+ return madv.retained;
+}
+
+static void
+intel_bo_cache_purge (intel_device_t *device)
+{
+ int bucket;
+
+ for (bucket = 0; bucket < INTEL_BO_CACHE_BUCKETS; bucket++) {
+ intel_bo_t *bo, *next;
+
+ cairo_list_foreach_entry_safe (bo, next,
+ intel_bo_t,
+ &device->bo_cache[bucket].list,
+ cache_list)
+ {
+ if (! intel_bo_madvise (device, bo, I915_MADV_DONTNEED))
+ intel_bo_cache_remove (device, bo, bucket);
+ }
+ }
+}
+
+cairo_drm_bo_t *
+intel_bo_create (intel_device_t *device,
+ uint32_t size,
+ cairo_bool_t gpu_target)
+{
+ intel_bo_t *bo = NULL;
+ uint32_t cache_size;
+ struct drm_i915_gem_create create;
+ int bucket;
+ int ret;
+
+ cache_size = pot ((size + 4095) & -4096);
+ bucket = ffs (cache_size / 4096) - 1;
+ CAIRO_MUTEX_LOCK (device->bo_mutex);
+ if (bucket < INTEL_BO_CACHE_BUCKETS) {
+ size = cache_size;
+
+ /* Our goal is to avoid clflush which occur on CPU->GPU
+ * transitions, so we want to minimise reusing CPU
+ * write buffers. However, by the time a buffer is freed
+ * it is most likely in the GPU domain anyway (readback is rare!).
+ */
+ retry:
+ if (gpu_target) {
+ do {
+ cairo_list_foreach_entry_reverse (bo,
+ intel_bo_t,
+ &device->bo_cache[bucket].list,
+ cache_list)
+ {
+ /* For a gpu target, by the time our batch fires, the
+ * GPU will have finished using this buffer. However,
+ * changing tiling may require a fence deallocation and
+ * cause serialisation...
+ */
+
+ if (! intel_bo_madvise (device, bo, I915_MADV_WILLNEED)) {
+ intel_bo_cache_remove (device, bo, bucket);
+ goto retry;
+ }
+
+ if (device->bo_cache[bucket].num_entries-- >
+ device->bo_cache[bucket].min_entries)
+ {
+ device->bo_cache_size -= bo->base.size;
+ }
+ cairo_list_del (&bo->cache_list);
+ CAIRO_MUTEX_UNLOCK (device->bo_mutex);
+ goto DONE;
+ }
+
+ /* As it is unlikely to trigger clflush, we can use the
+ * first available buffer into which we fit.
+ */
+ } while (++bucket < INTEL_BO_CACHE_BUCKETS);
+ } else {
+ if (! cairo_list_is_empty (&device->bo_cache[bucket].list)) {
+ bo = cairo_list_first_entry (&device->bo_cache[bucket].list,
+ intel_bo_t, cache_list);
+ if (intel_bo_is_inactive (device, bo)) {
+ if (! intel_bo_madvise (device, bo, I915_MADV_WILLNEED)) {
+ intel_bo_cache_remove (device, bo, bucket);
+ goto retry;
+ }
+
+ if (device->bo_cache[bucket].num_entries-- >
+ device->bo_cache[bucket].min_entries)
+ {
+ device->bo_cache_size -= bo->base.size;
+ }
+ cairo_list_del (&bo->cache_list);
+ CAIRO_MUTEX_UNLOCK (device->bo_mutex);
+ goto DONE;
+ }
+ }
+ }
+ }
+
+ if (device->bo_cache_size > device->bo_max_cache_size_high) {
+ intel_bo_cache_purge (device);
+
+ /* trim caches by discarding the most recent buffer in each bucket */
+ while (device->bo_cache_size > device->bo_max_cache_size_low) {
+ for (bucket = INTEL_BO_CACHE_BUCKETS; bucket--; ) {
+ if (device->bo_cache[bucket].num_entries >
+ device->bo_cache[bucket].min_entries)
+ {
+ bo = cairo_list_last_entry (&device->bo_cache[bucket].list,
+ intel_bo_t, cache_list);
+
+ intel_bo_cache_remove (device, bo, bucket);
+ }
+ }
+ }
+ }
+
+ /* no cached buffer available, allocate fresh */
+ bo = _cairo_freepool_alloc (&device->bo_pool);
+ CAIRO_MUTEX_UNLOCK (device->bo_mutex);
+ if (unlikely (bo == NULL)) {
+ _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
+ return NULL;
+ }
+
+ cairo_list_init (&bo->cache_list);
+
+ bo->base.name = 0;
+ bo->base.size = size;
+
+ bo->offset = 0;
+ bo->virtual = NULL;
+
+ bo->tiling = I915_TILING_NONE;
+ bo->stride = 0;
+ bo->swizzle = I915_BIT_6_SWIZZLE_NONE;
+
+ bo->in_batch = FALSE;
+ bo->read_domains = 0;
+ bo->write_domain = 0;
+
+ create.size = size;
+ create.handle = 0;
+ ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_CREATE, &create);
+ if (unlikely (ret != 0)) {
+ _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
+ free (bo);
+ return NULL;
+ }
+
+ bo->base.handle = create.handle;
+
+DONE:
+ CAIRO_REFERENCE_COUNT_INIT (&bo->base.ref_count, 1);
+
+ return &bo->base;
+}
+
+cairo_drm_bo_t *
+intel_bo_create_for_name (intel_device_t *device, uint32_t name)
+{
+ struct drm_i915_gem_get_tiling get_tiling;
+ cairo_status_t status;
+ intel_bo_t *bo;
+ int ret;
+
+ CAIRO_MUTEX_LOCK (device->bo_mutex);
+ bo = _cairo_freepool_alloc (&device->bo_pool);
+ CAIRO_MUTEX_UNLOCK (device->bo_mutex);
+ if (unlikely (bo == NULL)) {
+ _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
+ return NULL;
+ }
+
+ status = _cairo_drm_bo_open_for_name (&device->base, &bo->base, name);
+ if (unlikely (status)) {
+ _cairo_freepool_free (&device->bo_pool, bo);
+ return NULL;
+ }
+
+ CAIRO_REFERENCE_COUNT_INIT (&bo->base.ref_count, 1);
+ cairo_list_init (&bo->cache_list);
+
+ bo->offset = 0;
+ bo->virtual = NULL;
+
+ bo->in_batch = FALSE;
+ bo->read_domains = 0;
+ bo->write_domain = 0;
+
+ memset (&get_tiling, 0, sizeof (get_tiling));
+ get_tiling.handle = bo->base.handle;
+
+ ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_GET_TILING, &get_tiling);
+ if (unlikely (ret != 0)) {
+ _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
+ _cairo_drm_bo_close (&device->base, &bo->base);
+ _cairo_freepool_free (&device->bo_pool, bo);
+ return NULL;
+ }
+
+ bo->tiling = get_tiling.tiling_mode;
+ bo->swizzle = get_tiling.swizzle_mode;
+ // bo->stride = get_tiling.stride; /* XXX not available from get_tiling */
+
+ return &bo->base;
+}
+
+void
+intel_bo_release (void *_dev, void *_bo)
+{
+ intel_device_t *device = _dev;
+ intel_bo_t *bo = _bo;
+ int bucket;
+
+ bucket = INTEL_BO_CACHE_BUCKETS;
+ if (bo->base.size & -bo->base.size)
+ bucket = ffs (bo->base.size / 4096) - 1;
+
+ CAIRO_MUTEX_LOCK (device->bo_mutex);
+ if (bo->base.name == 0 && bucket < INTEL_BO_CACHE_BUCKETS) {
+ if (++device->bo_cache[bucket].num_entries >
+ device->bo_cache[bucket].min_entries)
+ {
+ device->bo_cache_size += bo->base.size;
+ }
+
+ cairo_list_add_tail (&bo->cache_list, &device->bo_cache[bucket].list);
+
+ intel_bo_madvise (device, bo, I915_MADV_DONTNEED);
+ }
+ else
+ {
+ _cairo_drm_bo_close (&device->base, &bo->base);
+ _cairo_freepool_free (&device->bo_pool, bo);
+ }
+ CAIRO_MUTEX_UNLOCK (device->bo_mutex);
+}
+
+void
+intel_bo_set_tiling (intel_device_t *device,
+ intel_bo_t *bo,
+ uint32_t tiling,
+ uint32_t stride)
+{
+ struct drm_i915_gem_set_tiling set_tiling;
+ int ret;
+
+ if (bo->tiling == tiling &&
+ (tiling == I915_TILING_NONE || bo->stride == stride))
+ {
+ return;
+ }
+
+ assert (! bo->in_batch);
+
+ if (bo->virtual)
+ intel_bo_unmap (bo);
+
+ set_tiling.handle = bo->base.handle;
+ set_tiling.tiling_mode = tiling;
+ set_tiling.stride = stride;
+
+ ret = ioctl (device->base.fd, DRM_IOCTL_I915_GEM_SET_TILING, &set_tiling);
+ if (ret == 0) {
+ bo->tiling = set_tiling.tiling_mode;
+ bo->swizzle = set_tiling.swizzle_mode;
+ bo->stride = set_tiling.stride;
+ }
+}
+
+cairo_surface_t *
+intel_bo_get_image (const intel_device_t *device,
+ intel_bo_t *bo,
+ const cairo_drm_surface_t *surface)
+{
+ cairo_image_surface_t *image;
+ uint8_t *dst;
+ int size, row;
+
+ image = (cairo_image_surface_t *)
+ cairo_image_surface_create (surface->format,
+ surface->width,
+ surface->height);
+ if (unlikely (image->base.status))
+ return &image->base;
+
+ if (bo->tiling == I915_TILING_NONE) {
+ if (image->stride == surface->stride) {
+ size = surface->stride * surface->height;
+ intel_bo_read (device, bo, 0, size, image->data);
+ } else {
+ int offset;
+
+ size = surface->width;
+ if (surface->format != CAIRO_FORMAT_A8)
+ size *= 4;
+
+ offset = 0;
+ row = surface->height;
+ dst = image->data;
+ while (row--) {
+ intel_bo_read (device, bo, offset, size, dst);
+ offset += surface->stride;
+ dst += image->stride;
+ }
+ }
+ } else {
+ const uint8_t *src;
+
+ src = intel_bo_map (device, bo);
+ if (unlikely (src == NULL))
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
+
+ size = surface->width;
+ if (surface->format != CAIRO_FORMAT_A8)
+ size *= 4;
+
+ row = surface->height;
+ dst = image->data;
+ while (row--) {
+ memcpy (dst, src, size);
+ dst += image->stride;
+ src += surface->stride;
+ }
+
+ intel_bo_unmap (bo);
+ }
+
+ return &image->base;
+}
+
+static void
+_intel_device_init_bo_cache (intel_device_t *device)
+{
+ int i;
+
+ CAIRO_MUTEX_INIT (device->bo_mutex);
+ device->bo_cache_size = 0;
+ device->bo_max_cache_size_high = device->gtt_max_size / 2;
+ device->bo_max_cache_size_low = device->gtt_max_size / 4;
+
+ for (i = 0; i < INTEL_BO_CACHE_BUCKETS; i++) {
+ struct _intel_bo_cache *cache = &device->bo_cache[i];
+
+ cairo_list_init (&cache->list);
+
+ /* 256*4k ... 4*16MiB */
+ if (i <= 6)
+ cache->min_entries = 1 << (6 - i);
+ else
+ cache->min_entries = 0;
+ cache->num_entries = 0;
+ }
+
+ _cairo_freepool_init (&device->bo_pool, sizeof (intel_bo_t));
+}
+
+cairo_status_t
+intel_device_init (intel_device_t *device, int fd)
+{
+ struct drm_i915_gem_get_aperture aperture;
+ int ret;
+
+ ret = ioctl (fd, DRM_IOCTL_I915_GEM_GET_APERTURE, &aperture);
+ if (ret != 0)
+ return _cairo_error (CAIRO_STATUS_NO_MEMORY);
+
+ device->gtt_max_size = aperture.aper_size;
+ device->gtt_avail_size = aperture.aper_available_size;
+
+ _intel_device_init_bo_cache (device);
+
+ return CAIRO_STATUS_SUCCESS;
+}
+
+static void
+_intel_bo_cache_fini (intel_device_t *device)
+{
+ int bucket;
+
+ for (bucket = 0; bucket < INTEL_BO_CACHE_BUCKETS; bucket++) {
+ struct _intel_bo_cache *cache = &device->bo_cache[bucket];
+ intel_bo_t *bo;
+
+ cairo_list_foreach_entry (bo, intel_bo_t, &cache->list, cache_list)
+ _cairo_drm_bo_close (&device->base, &bo->base);
+ }
+
+ _cairo_freepool_fini (&device->bo_pool);
+ CAIRO_MUTEX_FINI (device->bo_mutex);
+}
+
+void
+intel_device_fini (intel_device_t *device)
+{
+ _intel_bo_cache_fini (device);
+ _cairo_drm_device_fini (&device->base);
+}
+
+void
+intel_throttle (intel_device_t *device)
+{
+ ioctl (device->base.fd, DRM_IOCTL_I915_GEM_THROTTLE);
+}
diff --git a/src/drm/cairo-drm-ioctl-private.h b/src/drm/cairo-drm-ioctl-private.h
new file mode 100644
index 00000000..4294de2d
--- /dev/null
+++ b/src/drm/cairo-drm-ioctl-private.h
@@ -0,0 +1,12 @@
+#ifndef CAIRO_DRM_IOCTL_PRIVATE_H
+#define CAIRO_DRM_IOCTL_PRIVATE_H
+
+#define DRM_IOCTL_BASE 'd'
+#define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr)
+#define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type)
+#define DRM_IOW(nr,type) _IOW(DRM_IOCTL_BASE,nr,type)
+#define DRM_IOWR(nr,type) _IOWR(DRM_IOCTL_BASE,nr,type)
+
+#define DRM_COMMAND_BASE 0x40
+
+#endif /* CAIRO_DRM_IOCTL_PRIVATE_H */
diff --git a/src/drm/cairo-drm-private.h b/src/drm/cairo-drm-private.h
new file mode 100644
index 00000000..4a32c68a
--- /dev/null
+++ b/src/drm/cairo-drm-private.h
@@ -0,0 +1,257 @@
+/* Cairo - a vector graphics library with display and print output
+ *
+ * Copyright © 2009 Chris Wilson
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ * The Original Code is the cairo graphics library.
+ *
+ * The Initial Developer of the Original Code is Chris Wilson.
+ *
+ * Contributors(s):
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ */
+
+#ifndef CAIRO_DRM_PRIVATE_H
+#define CAIRO_DRM_PRIVATE_H
+
+#include "cairo-drm.h"
+
+#include "cairo-surface-private.h"
+#include "cairo-reference-count-private.h"
+
+#include <sys/types.h> /* dev_t */
+
+typedef cairo_drm_device_t *
+(*cairo_drm_device_create_func_t) (int fd,
+ dev_t dev,
+ int vendor_id,
+ int chip_id);
+
+typedef cairo_int_status_t
+(*cairo_drm_device_throttle_func_t) (cairo_drm_device_t *device);
+
+typedef void
+(*cairo_drm_device_destroy_func_t) (void *data);
+
+typedef cairo_surface_t *
+(*cairo_drm_surface_create_func_t) (cairo_drm_device_t *device,
+ cairo_content_t content,
+ int width, int height);
+
+typedef cairo_surface_t *
+(*cairo_drm_surface_create_for_name_func_t) (cairo_drm_device_t *device,
+ unsigned int name,
+ cairo_format_t format,
+ int width, int height, int stride);
+
+typedef cairo_surface_t *
+(*cairo_drm_surface_create_from_cacheable_image_func_t)
+ (cairo_drm_device_t *device, cairo_surface_t *image);
+
+typedef cairo_int_status_t
+(*cairo_drm_surface_flink_func_t) (void *surface);
+
+typedef cairo_status_t
+(*cairo_drm_surface_enable_scan_out_func_t) (void *surface);
+
+typedef struct _cairo_drm_bo_backend {
+ void (*release) (void *device, void *bo);
+} cairo_drm_bo_backend_t;
+
+typedef struct _cairo_drm_device_backend {
+ cairo_drm_device_throttle_func_t throttle;
+ cairo_drm_device_destroy_func_t destroy;
+} cairo_drm_device_backend_t;
+
+typedef struct _cairo_drm_surface_backend {
+ cairo_drm_surface_create_func_t create;
+ cairo_drm_surface_create_for_name_func_t create_for_name;
+ cairo_drm_surface_create_from_cacheable_image_func_t create_from_cacheable_image;
+ cairo_drm_surface_flink_func_t flink;
+ cairo_drm_surface_enable_scan_out_func_t enable_scan_out;
+} cairo_drm_surface_backend_t;
+
+typedef struct _cairo_drm_bo {
+ cairo_reference_count_t ref_count;
+ uint32_t name;
+ uint32_t handle;
+ uint32_t size;
+} cairo_drm_bo_t;
+
+struct _cairo_drm_device {
+ cairo_reference_count_t ref_count;
+ cairo_status_t status;
+
+ dev_t id;
+ int fd;
+
+ int max_surface_size;
+
+ cairo_drm_bo_backend_t bo;
+ cairo_drm_surface_backend_t surface;
+ cairo_drm_device_backend_t device;
+
+ cairo_drm_device_t *next, *prev;
+};
+
+typedef struct _cairo_drm_surface {
+ cairo_surface_t base;
+
+ cairo_drm_device_t *device;
+ cairo_drm_bo_t *bo;
+
+ cairo_format_t format;
+ int width, height, stride;
+
+ cairo_surface_t *fallback;
+ uint32_t map_count;
+} cairo_drm_surface_t;
+
+static inline cairo_drm_bo_t *
+cairo_drm_bo_reference (cairo_drm_bo_t *bo)
+{
+ _cairo_reference_count_inc (&bo->ref_count);
+ return bo;
+}
+
+static inline void
+cairo_drm_bo_destroy (cairo_drm_device_t *device,
+ cairo_drm_bo_t *bo)
+{
+ if (_cairo_reference_count_dec_and_test (&bo->ref_count))
+ device->bo.release (device, bo);
+}
+
+cairo_private cairo_drm_device_t *
+_cairo_drm_device_create_in_error (cairo_status_t status);
+
+cairo_private cairo_status_t
+_cairo_drm_bo_open_for_name (const cairo_drm_device_t *dev,
+ cairo_drm_bo_t *bo,
+ uint32_t name);
+
+cairo_private cairo_status_t
+_cairo_drm_bo_flink (const cairo_drm_device_t *dev,
+ cairo_drm_bo_t *bo);
+
+cairo_private void
+_cairo_drm_bo_close (const cairo_drm_device_t *dev,
+ cairo_drm_bo_t *bo);
+
+cairo_private void
+_cairo_drm_surface_init (cairo_drm_surface_t *surface,
+ cairo_drm_device_t *device);
+
+cairo_private cairo_status_t
+_cairo_drm_surface_finish (cairo_drm_surface_t *surface);
+
+cairo_private cairo_surface_t *
+_cairo_drm_surface_create_similar (void *abstract_src,
+ cairo_content_t content,
+ int width,
+ int height);
+cairo_private void
+_cairo_drm_surface_get_font_options (void *abstract_surface,
+ cairo_font_options_t *options);
+
+cairo_private cairo_bool_t
+_cairo_drm_surface_get_extents (void *abstract_surface,
+ cairo_rectangle_int_t *rectangle);
+
+cairo_private cairo_int_status_t
+_cairo_drm_surface_paint (void *abstract_surface,
+ cairo_operator_t op,
+ const cairo_pattern_t *source,
+ cairo_clip_t *clip);
+
+cairo_private cairo_int_status_t
+_cairo_drm_surface_mask (void *abstract_surface,
+ cairo_operator_t op,
+ const cairo_pattern_t *source,
+ const cairo_pattern_t *mask,
+ cairo_clip_t *clip);
+
+cairo_private cairo_int_status_t
+_cairo_drm_surface_stroke (void *abstract_surface,
+ cairo_operator_t op,
+ const cairo_pattern_t *source,
+ cairo_path_fixed_t *path,
+ cairo_stroke_style_t *style,
+ cairo_matrix_t *ctm,
+ cairo_matrix_t *ctm_inverse,
+ double tolerance,
+ cairo_antialias_t antialias,
+ cairo_clip_t *clip);
+
+cairo_private cairo_int_status_t
+_cairo_drm_surface_fill (void *abstract_surface,
+ cairo_operator_t op,
+ const cairo_pattern_t *source,
+ cairo_path_fixed_t *path,
+ cairo_fill_rule_t fill_rule,
+ double tolerance,
+ cairo_antialias_t antialias,
+ cairo_clip_t *clip);
+
+cairo_private cairo_int_status_t
+_cairo_drm_surface_show_glyphs (void *abstract_surface,
+ cairo_operator_t op,
+ const cairo_pattern_t *source,
+ cairo_glyph_t *glyphs,
+ int num_glyphs,
+ cairo_scaled_font_t *scaled_font,
+ cairo_clip_t *clip,
+ int *remaining_glyphs);
+
+cairo_private cairo_int_status_t
+_cairo_drm_surface_flink (void *abstract_surface);
+
+cairo_private cairo_drm_device_t *
+_cairo_drm_device_init (cairo_drm_device_t *device,
+ int fd, dev_t id,
+ int max_surface_size);
+
+cairo_private void
+_cairo_drm_device_fini (cairo_drm_device_t *device);
+
+/* h/w specific backends */
+
+cairo_private cairo_drm_device_t *
+_cairo_drm_intel_device_create (int fd, dev_t dev, int vendor_id, int chip_id);
+
+cairo_private cairo_drm_device_t *
+_cairo_drm_radeon_device_create (int fd, dev_t dev, int vendor_id, int chip_id);
+
+#if CAIRO_HAS_GALLIUM_SURFACE
+cairo_private cairo_drm_device_t *
+_cairo_drm_gallium_device_create (int fd, dev_t dev, int vendor_id, int chip_id);
+#endif
+
+slim_hidden_proto (cairo_drm_device_default);
+slim_hidden_proto (cairo_drm_device_destroy);
+slim_hidden_proto (cairo_drm_device_get);
+slim_hidden_proto_no_warn (cairo_drm_device_reference);
+
+#endif /* CAIRO_DRM_PRIVATE_H */
diff --git a/src/drm/cairo-drm-radeon-private.h b/src/drm/cairo-drm-radeon-private.h
new file mode 100644
index 00000000..9c0c3b46
--- /dev/null
+++ b/src/drm/cairo-drm-radeon-private.h
@@ -0,0 +1,110 @@
+/* Cairo - a vector graphics library with display and print output
+ *
+ * Copyright © 2009 Chris Wilson
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#ifndef CAIRO_DRM_RADEON_PRIVATE_H
+#define CAIRO_DRM_RADEON_PRIVATE_H
+
+#include "cairo-compiler-private.h"
+#include "cairo-types-private.h"
+#include "cairo-drm-private.h"
+#include "cairo-freelist-private.h"
+
+#define RADEON_GEM_DOMAIN_CPU 0x1
+#define RADEON_GEM_DOMAIN_GTT 0x2
+#define RADEON_GEM_DOMAIN_VRAM 0x4
+
+typedef struct _radeon_bo {
+ cairo_drm_bo_t base;
+
+ void *virtual;
+
+ cairo_bool_t in_batch;
+ uint32_t read_domains;
+ uint32_t write_domain;
+} radeon_bo_t;
+
+typedef struct _radeon_device {
+ cairo_drm_device_t base;
+ cairo_freepool_t bo_pool;
+
+ uint64_t vram_limit;
+ uint64_t gart_limit;
+} radeon_device_t;
+
+cairo_private cairo_status_t
+radeon_device_init (radeon_device_t *device, int fd);
+
+cairo_private void
+radeon_device_fini (radeon_device_t *device);
+
+cairo_private cairo_bool_t
+radeon_info (int fd,
+ uint64_t *gart_size,
+ uint64_t *vram_size);
+
+cairo_private void
+radeon_bo_write (const radeon_device_t *dev,
+ radeon_bo_t *bo,
+ unsigned long offset,
+ unsigned long size,
+ const void *data);
+
+cairo_private void
+radeon_bo_read (const radeon_device_t *dev,
+ radeon_bo_t *bo,
+ unsigned long offset,
+ unsigned long size,
+ void *data);
+
+cairo_private void
+radeon_bo_wait (const radeon_device_t *dev, radeon_bo_t *bo);
+
+cairo_private void *
+radeon_bo_map (const radeon_device_t *dev, radeon_bo_t *bo);
+
+cairo_private void
+radeon_bo_unmap (radeon_bo_t *bo);
+
+cairo_private cairo_drm_bo_t *
+radeon_bo_create (radeon_device_t *dev,
+ uint32_t size,
+ uint32_t initial_domain);
+
+cairo_private cairo_drm_bo_t *
+radeon_bo_create_for_name (radeon_device_t *dev, uint32_t name);
+
+cairo_private void
+radeon_bo_release (void *_dev, void *_bo);
+
+cairo_private cairo_surface_t *
+radeon_bo_get_image (const radeon_device_t *device,
+ radeon_bo_t *bo,
+ const cairo_drm_surface_t *surface);
+
+#endif /* CAIRO_DRM_RADEON_PRIVATE_H */
diff --git a/src/drm/cairo-drm-radeon-surface.c b/src/drm/cairo-drm-radeon-surface.c
new file mode 100644
index 00000000..94931626
--- /dev/null
+++ b/src/drm/cairo-drm-radeon-surface.c
@@ -0,0 +1,437 @@
+/* Cairo - a vector graphics library with display and print output
+ *
+ * Copyright © 2009 Chris Wilson
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#include "cairoint.h"
+
+#include "cairo-drm-private.h"
+#include "cairo-drm-radeon-private.h"
+
+/* Basic stub surface for radeon chipsets */
+
+#define MAX_SIZE 2048
+
+typedef struct _radeon_surface {
+ cairo_drm_surface_t base;
+} radeon_surface_t;
+
+static inline radeon_device_t *
+to_radeon_device (cairo_drm_device_t *device)
+{
+ return (radeon_device_t *) device;
+}
+
+static inline radeon_bo_t *
+to_radeon_bo (cairo_drm_bo_t *bo)
+{
+ return (radeon_bo_t *) bo;
+}
+
+static cairo_status_t
+radeon_batch_flush (radeon_device_t *device)
+{
+ return CAIRO_STATUS_SUCCESS;
+}
+
+static cairo_status_t
+radeon_surface_batch_flush (radeon_surface_t *surface)
+{
+ if (to_radeon_bo (surface->base.bo)->write_domain)
+ return radeon_batch_flush (to_radeon_device (surface->base.device));
+
+ return CAIRO_STATUS_SUCCESS;
+}
+
+static cairo_status_t
+radeon_surface_finish (void *abstract_surface)
+{
+ radeon_surface_t *surface = abstract_surface;
+
+ return _cairo_drm_surface_finish (&surface->base);
+}
+
+static cairo_status_t
+radeon_surface_acquire_source_image (void *abstract_surface,
+ cairo_image_surface_t **image_out,
+ void **image_extra)
+{
+ radeon_surface_t *surface = abstract_surface;
+ cairo_surface_t *image;
+ cairo_status_t status;
+
+ if (surface->base.fallback != NULL) {
+ image = surface->base.fallback;
+ goto DONE;
+ }
+
+ image = _cairo_surface_has_snapshot (&surface->base.base,
+ &_cairo_image_surface_backend,
+ surface->base.base.content);
+ if (image != NULL)
+ goto DONE;
+
+ status = radeon_surface_batch_flush (surface);
+ if (unlikely (status))
+ return status;
+
+ image = radeon_bo_get_image (to_radeon_device (surface->base.device),
+ to_radeon_bo (surface->base.bo),
+ &surface->base);
+ status = image->status;
+ if (unlikely (status))
+ return status;
+
+ status = _cairo_surface_attach_snapshot (&surface->base.base,
+ image,
+ cairo_surface_destroy);
+ if (unlikely (status)) {
+ cairo_surface_destroy (image);
+ return status;
+ }
+
+DONE:
+ *image_out = (cairo_image_surface_t *) cairo_surface_reference (image);
+ *image_extra = NULL;
+ return CAIRO_STATUS_SUCCESS;
+}
+
+static void
+radeon_surface_release_source_image (void *abstract_surface,
+ cairo_image_surface_t *image,
+ void *image_extra)
+{
+ cairo_surface_destroy (&image->base);
+}
+
+static cairo_surface_t *
+radeon_surface_snapshot (void *abstract_surface)
+{
+ radeon_surface_t *surface = abstract_surface;
+ cairo_status_t status;
+
+ if (surface->base.fallback != NULL)
+ return NULL;
+
+ status = radeon_surface_batch_flush (surface);
+ if (unlikely (status))
+ return _cairo_surface_create_in_error (status);
+
+ return radeon_bo_get_image (to_radeon_device (surface->base.device),
+ to_radeon_bo (surface->base.bo),
+ &surface->base);
+}
+
+static cairo_status_t
+radeon_surface_acquire_dest_image (void *abstract_surface,
+ cairo_rectangle_int_t *interest_rect,
+ cairo_image_surface_t **image_out,
+ cairo_rectangle_int_t *image_rect_out,
+ void **image_extra)
+{
+ radeon_surface_t *surface = abstract_surface;
+ cairo_surface_t *image;
+ cairo_status_t status;
+ void *ptr;
+
+ assert (surface->base.fallback == NULL);
+
+ status = radeon_surface_batch_flush (surface);
+ if (unlikely (status))
+ return status;
+
+ /* Force a read barrier, as well as flushing writes above */
+ radeon_bo_wait (to_radeon_device (surface->base.device),
+ to_radeon_bo (surface->base.bo));
+
+ ptr = radeon_bo_map (to_radeon_device (surface->base.device),
+ to_radeon_bo (surface->base.bo));
+ if (unlikely (ptr == NULL))
+ return _cairo_error (CAIRO_STATUS_NO_MEMORY);
+
+ image = cairo_image_surface_create_for_data (ptr,
+ surface->base.format,
+ surface->base.width,
+ surface->base.height,
+ surface->base.stride);
+ status = image->status;
+ if (unlikely (status)) {
+ radeon_bo_unmap (to_radeon_bo (surface->base.bo));
+ return status;
+ }
+
+ surface->base.fallback = cairo_surface_reference (image);
+
+ *image_out = (cairo_image_surface_t *) image;
+ *image_extra = NULL;
+
+ image_rect_out->x = 0;
+ image_rect_out->y = 0;
+ image_rect_out->width = surface->base.width;
+ image_rect_out->height = surface->base.height;
+
+ return CAIRO_STATUS_SUCCESS;
+}
+
+static void
+radeon_surface_release_dest_image (void *abstract_surface,
+ cairo_rectangle_int_t *interest_rect,
+ cairo_image_surface_t *image,
+ cairo_rectangle_int_t *image_rect,
+ void *image_extra)
+{
+ /* Keep the fallback until we flush, either explicitly or at the
+ * end of this context. The idea is to avoid excess migration of
+ * the buffer between GPU and CPU domains.
+ */
+ cairo_surface_destroy (&image->base);
+}
+
+static cairo_status_t
+radeon_surface_flush (void *abstract_surface)
+{
+ radeon_surface_t *surface = abstract_surface;
+ cairo_status_t status;
+
+ if (surface->base.fallback == NULL)
+ return radeon_surface_batch_flush (surface);
+
+ /* kill any outstanding maps */
+ cairo_surface_finish (surface->base.fallback);
+
+ status = cairo_surface_status (surface->base.fallback);
+ cairo_surface_destroy (surface->base.fallback);
+ surface->base.fallback = NULL;
+
+ radeon_bo_unmap (to_radeon_bo (surface->base.bo));
+
+ return status;
+}
+
+static const cairo_surface_backend_t radeon_surface_backend = {
+ CAIRO_SURFACE_TYPE_DRM,
+ _cairo_drm_surface_create_similar,
+ radeon_surface_finish,
+
+ radeon_surface_acquire_source_image,
+ radeon_surface_release_source_image,
+ radeon_surface_acquire_dest_image,
+ radeon_surface_release_dest_image,
+
+ NULL, //radeon_surface_clone_similar,
+ NULL, //radeon_surface_composite,
+ NULL, //radeon_surface_fill_rectangles,
+ NULL, //radeon_surface_composite_trapezoids,
+ NULL, //radeon_surface_create_span_renderer,
+ NULL, //radeon_surface_check_span_renderer,
+ NULL, /* copy_page */
+ NULL, /* show_page */
+ _cairo_drm_surface_get_extents,
+ NULL, /* old_show_glyphs */
+ _cairo_drm_surface_get_font_options,
+ radeon_surface_flush,
+ NULL, /* mark_dirty_rectangle */
+ NULL, //radeon_surface_scaled_font_fini,
+ NULL, //radeon_surface_scaled_glyph_fini,
+
+ _cairo_drm_surface_paint,
+ _cairo_drm_surface_mask,
+ _cairo_drm_surface_stroke,
+ _cairo_drm_surface_fill,
+ _cairo_drm_surface_show_glyphs,
+
+ radeon_surface_snapshot,
+
+ NULL, /* is_similar */
+
+ NULL, /* reset */
+};
+
+static void
+radeon_surface_init (radeon_surface_t *surface,
+ cairo_content_t content,
+ cairo_drm_device_t *device)
+{
+ _cairo_surface_init (&surface->base.base, &radeon_surface_backend, content);
+ _cairo_drm_surface_init (&surface->base, device);
+
+ switch (content) {
+ case CAIRO_CONTENT_ALPHA:
+ surface->base.format = CAIRO_FORMAT_A8;
+ break;
+ case CAIRO_CONTENT_COLOR:
+ surface->base.format = CAIRO_FORMAT_RGB24;
+ break;
+ default:
+ ASSERT_NOT_REACHED;
+ case CAIRO_CONTENT_COLOR_ALPHA:
+ surface->base.format = CAIRO_FORMAT_ARGB32;
+ break;
+ }
+}
+
+static cairo_surface_t *
+radeon_surface_create_internal (cairo_drm_device_t *device,
+ cairo_content_t content,
+ int width, int height)
+{
+ radeon_surface_t *surface;
+ cairo_status_t status;
+
+ surface = malloc (sizeof (radeon_surface_t));
+ if (unlikely (surface == NULL))
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
+
+ radeon_surface_init (surface, content, device);
+
+ if (width && height) {
+ surface->base.width = width;
+ surface->base.height = height;
+
+ surface->base.stride =
+ cairo_format_stride_for_width (surface->base.format, width);
+
+ surface->base.bo = radeon_bo_create (to_radeon_device (device),
+ surface->base.stride * height,
+ RADEON_GEM_DOMAIN_GTT);
+
+ if (unlikely (surface->base.bo == NULL)) {
+ status = _cairo_drm_surface_finish (&surface->base);
+ free (surface);
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
+ }
+ }
+
+ return &surface->base.base;
+}
+
+static cairo_surface_t *
+radeon_surface_create (cairo_drm_device_t *device,
+ cairo_content_t content,
+ int width, int height)
+{
+ return radeon_surface_create_internal (device, content, width, height);
+}
+
+static cairo_surface_t *
+radeon_surface_create_for_name (cairo_drm_device_t *device,
+ unsigned int name,
+ cairo_format_t format,
+ int width, int height, int stride)
+{
+ radeon_surface_t *surface;
+ cairo_status_t status;
+ cairo_content_t content;
+
+ switch (format) {
+ default:
+ case CAIRO_FORMAT_A1:
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
+ case CAIRO_FORMAT_ARGB32:
+ content = CAIRO_CONTENT_COLOR_ALPHA;
+ break;
+ case CAIRO_FORMAT_RGB24:
+ content = CAIRO_CONTENT_COLOR;
+ break;
+ case CAIRO_FORMAT_A8:
+ content = CAIRO_CONTENT_ALPHA;
+ break;
+ }
+
+ if (stride < cairo_format_stride_for_width (format, width))
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_STRIDE));
+
+ surface = malloc (sizeof (radeon_surface_t));
+ if (unlikely (surface == NULL))
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
+
+ radeon_surface_init (surface, content, device);
+
+ if (width && height) {
+ surface->base.width = width;
+ surface->base.height = height;
+ surface->base.stride = stride;
+
+ surface->base.bo = radeon_bo_create_for_name (to_radeon_device (device),
+ name);
+
+ if (unlikely (surface->base.bo == NULL)) {
+ status = _cairo_drm_surface_finish (&surface->base);
+ free (surface);
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_NO_MEMORY));
+ }
+ }
+
+ return &surface->base.base;
+}
+
+static void
+radeon_device_destroy (void *data)
+{
+ radeon_device_t *device = data;
+
+ radeon_device_fini (device);
+
+ free (data);
+}
+
+cairo_drm_device_t *
+_cairo_drm_radeon_device_create (int fd, dev_t dev, int vendor_id, int chip_id)
+{
+ radeon_device_t *device;
+ uint64_t gart_size, vram_size;
+ cairo_status_t status;
+
+ if (! radeon_info (fd, &gart_size, &vram_size))
+ return NULL;
+
+ device = malloc (sizeof (radeon_device_t));
+ if (device == NULL)
+ return _cairo_drm_device_create_in_error (CAIRO_STATUS_NO_MEMORY);
+
+ status = radeon_device_init (device, fd);
+ if (unlikely (status)) {
+ free (device);
+ return _cairo_drm_device_create_in_error (status);
+ }
+
+ device->base.surface.create = radeon_surface_create;
+ device->base.surface.create_for_name = radeon_surface_create_for_name;
+ device->base.surface.create_from_cacheable_image = NULL;
+ device->base.surface.flink = _cairo_drm_surface_flink;
+ device->base.surface.enable_scan_out = NULL;
+
+ device->base.device.throttle = NULL;
+ device->base.device.destroy = radeon_device_destroy;
+
+ device->base.bo.release = radeon_bo_release;
+
+ device->vram_limit = vram_size;
+ device->gart_limit = gart_size;
+
+ return _cairo_drm_device_init (&device->base, dev, fd, MAX_SIZE);
+}
diff --git a/src/drm/cairo-drm-radeon.c b/src/drm/cairo-drm-radeon.c
new file mode 100644
index 00000000..9654be6c
--- /dev/null
+++ b/src/drm/cairo-drm-radeon.c
@@ -0,0 +1,447 @@
+/* Cairo - a vector graphics library with display and print output
+ *
+ * Copyright © 2009 Chris Wilson
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ */
+
+#include "cairoint.h"
+
+#include "cairo-drm-private.h"
+#include "cairo-drm-radeon-private.h"
+#include "cairo-drm-ioctl-private.h"
+
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <errno.h>
+
+#define DRM_RADEON_GEM_INFO 0x1c
+#define DRM_RADEON_GEM_CREATE 0x1d
+#define DRM_RADEON_GEM_MMAP 0x1e
+#define DRM_RADEON_GEM_PREAD 0x21
+#define DRM_RADEON_GEM_PWRITE 0x22
+#define DRM_RADEON_GEM_SET_DOMAIN 0x23
+#define DRM_RADEON_GEM_WAIT_IDLE 0x24
+#define DRM_RADEON_CS 0x26
+#define DRM_RADEON_INFO 0x27
+
+#define DRM_IOCTL_RADEON_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_INFO, struct drm_radeon_gem_info)
+#define DRM_IOCTL_RADEON_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_CREATE, struct drm_radeon_gem_create)
+#define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle)
+#define DRM_IOCTL_RADEON_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_MMAP, struct drm_radeon_gem_mmap)
+#define DRM_IOCTL_RADEON_GEM_PREAD DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PREAD, struct drm_radeon_gem_pread)
+#define DRM_IOCTL_RADEON_GEM_PWRITE DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_PWRITE, struct drm_radeon_gem_pwrite)
+#define DRM_IOCTL_RADEON_GEM_SET_DOMAIN DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_DOMAIN, struct drm_radeon_gem_set_domain)
+//#define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs)
+
+struct drm_radeon_gem_info {
+ uint64_t gart_size;
+ uint64_t vram_size;
+ uint64_t vram_visible;
+};
+
+#define RADEON_GEM_NO_BACKING_STORE 1
+
+struct drm_radeon_gem_create {
+ uint64_t size;
+ uint64_t alignment;
+ uint32_t handle;
+ uint32_t initial_domain;
+ uint32_t flags;
+};
+
+struct drm_radeon_gem_mmap {
+ uint32_t handle;
+ uint32_t pad;
+ uint64_t offset;
+ uint64_t size;
+ uint64_t addr_ptr;
+};
+
+struct drm_radeon_gem_set_domain {
+ uint32_t handle;
+ uint32_t read_domains;
+ uint32_t write_domain;
+};
+
+struct drm_radeon_gem_wait_idle {
+ uint32_t handle;
+ uint32_t pad;
+};
+
+struct drm_radeon_gem_busy {
+ uint32_t handle;
+ uint32_t busy;
+};
+
+struct drm_radeon_gem_pread {
+ /** Handle for the object being read. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to read from */
+ uint64_t offset;
+ /** Length of data to read */
+ uint64_t size;
+ /** Pointer to write the data into. */
+ /* void *, but pointers are not 32/64 compatible */
+ uint64_t data_ptr;
+};
+
+struct drm_radeon_gem_pwrite {
+ /** Handle for the object being written to. */
+ uint32_t handle;
+ uint32_t pad;
+ /** Offset into the object to write to */
+ uint64_t offset;
+ /** Length of data to write */
+ uint64_t size;
+ /** Pointer to read the data from. */
+ /* void *, but pointers are not 32/64 compatible */
+ uint64_t data_ptr;
+};
+
+#define RADEON_CHUNK_ID_RELOCS 0x01
+#define RADEON_CHUNK_ID_IB 0x02
+
+struct drm_radeon_cs_chunk {
+ uint32_t chunk_id;
+ uint32_t length_dw;
+ uint64_t chunk_data;
+};
+
+struct drm_radeon_cs_reloc {
+ uint32_t handle;
+ uint32_t read_domains;
+ uint32_t write_domain;
+ uint32_t flags;
+};
+
+struct drm_radeon_cs {
+ uint32_t num_chunks;
+ uint32_t cs_id;
+ /* this points to uint64_t * which point to cs chunks */
+ uint64_t chunks;
+ /* updates to the limits after this CS ioctl */
+ uint64_t gart_limit;
+ uint64_t vram_limit;
+};
+
+#define RADEON_INFO_DEVICE_ID 0x00
+#define RADEON_INFO_NUM_GB_PIPES 0x01
+
+struct drm_radeon_info {
+ uint32_t request;
+ uint32_t pad;
+ uint64_t value;
+};
+
+
+cairo_bool_t
+radeon_info (int fd,
+ uint64_t *gart_size,
+ uint64_t *vram_size)
+{
+ struct drm_radeon_gem_info info;
+ int ret;
+
+ ret = ioctl (fd, DRM_IOCTL_RADEON_GEM_INFO, &info);
+ if (ret == -1)
+ return FALSE;
+
+ if (gart_size != NULL)
+ *gart_size = info.gart_size;
+
+ if (vram_size != NULL)
+ *vram_size = info.vram_size;
+
+ return TRUE;
+}
+
+void
+radeon_bo_write (const radeon_device_t *device,
+ radeon_bo_t *bo,
+ unsigned long offset,
+ unsigned long size,
+ const void *data)
+{
+ struct drm_radeon_gem_pwrite pwrite;
+ int ret;
+
+ memset (&pwrite, 0, sizeof (pwrite));
+ pwrite.handle = bo->base.handle;
+ pwrite.offset = offset;
+ pwrite.size = size;
+ pwrite.data_ptr = (uint64_t) (uintptr_t) data;
+ do {
+ ret = ioctl (device->base.fd, DRM_IOCTL_RADEON_GEM_PWRITE, &pwrite);
+ } while (ret == -1 && errno == EINTR);
+
+ /* XXX temporary workaround */
+ if (ret == -1 && errno == ENOSYS) {
+ uint8_t *ptr;
+
+ ptr = radeon_bo_map (device, bo);
+ if (ptr != NULL) {
+ memcpy (ptr + offset, data, size);
+ radeon_bo_unmap (bo);
+ }
+ }
+}
+
+void
+radeon_bo_read (const radeon_device_t *device,
+ radeon_bo_t *bo,
+ unsigned long offset,
+ unsigned long size,
+ void *data)
+{
+ struct drm_radeon_gem_pread pread;
+ int ret;
+
+ memset (&pread, 0, sizeof (pread));
+ pread.handle = bo->base.handle;
+ pread.offset = offset;
+ pread.size = size;
+ pread.data_ptr = (uint64_t) (uintptr_t) data;
+ do {
+ ret = ioctl (device->base.fd, DRM_IOCTL_RADEON_GEM_PREAD, &pread);
+ } while (ret == -1 && errno == EINTR);
+
+ /* XXX temporary workaround */
+ if (ret == -1 && errno == ENOSYS) {
+ uint8_t *ptr;
+
+ ptr = radeon_bo_map (device, bo);
+ if (ptr != NULL) {
+ memcpy (data, ptr + offset, size);
+ radeon_bo_unmap (bo);
+ }
+ }
+
+ VG (VALGRIND_MAKE_MEM_DEFINED (data, size));
+}
+
+void
+radeon_bo_wait (const radeon_device_t *device, radeon_bo_t *bo)
+{
+ struct drm_radeon_gem_wait_idle wait;
+ int ret;
+
+ wait.handle = bo->base.handle;
+ do {
+ ret = ioctl (device->base.fd, DRM_IOCTL_RADEON_GEM_WAIT_IDLE, &wait);
+ } while (ret == -1 && (errno == EINTR || errno == EBUSY));
+}
+
+void *
+radeon_bo_map (const radeon_device_t *device, radeon_bo_t *bo)
+{
+ struct drm_radeon_gem_mmap mmap_arg;
+ void *ptr;
+ int ret;
+
+ assert (bo->virtual == NULL);
+
+ memset (&mmap_arg, 0, sizeof (mmap_arg));
+ mmap_arg.handle = bo->base.handle;
+ mmap_arg.offset = 0;
+ mmap_arg.size = bo->base.size;
+
+ do {
+ ret = ioctl (device->base.fd, DRM_IOCTL_RADEON_GEM_MMAP, &mmap_arg);
+ } while (ret == -1 && errno == EINTR);
+ if (unlikely (ret != 0)) {
+ _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
+ return NULL;
+ }
+
+ VG (VALGRIND_MAKE_MEM_DEFINED (&mmap_arg, sizeof (mmap_arg)));
+
+ /* and mmap it */
+ ptr = mmap (0, bo->base.size, PROT_READ | PROT_WRITE,
+ MAP_SHARED, device->base.fd,
+ mmap_arg.addr_ptr);
+ if (unlikely (ptr == MAP_FAILED)) {
+ _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
+ return NULL;
+ }
+
+ bo->virtual = ptr;
+
+ /* XXX set_domain? */
+ return bo->virtual;
+}
+
+void
+radeon_bo_unmap (radeon_bo_t *bo)
+{
+ assert (bo->virtual != NULL);
+
+ munmap (bo->virtual, bo->base.size);
+ bo->virtual = NULL;
+}
+
+cairo_drm_bo_t *
+radeon_bo_create (radeon_device_t *device,
+ uint32_t size,
+ uint32_t initial_domain)
+{
+ struct drm_radeon_gem_create create;
+ radeon_bo_t *bo;
+ int ret;
+
+ bo = _cairo_freepool_alloc (&device->bo_pool);
+ if (unlikely (bo == NULL))
+ return NULL;
+
+ create.size = size;
+ create.alignment = 0;
+ create.initial_domain = initial_domain;
+ create.flags = 0;
+ create.handle = 0;
+
+ do {
+ ret = ioctl (device->base.fd, DRM_IOCTL_RADEON_GEM_CREATE, &create);
+ } while (ret == -1 && errno == EINTR);
+ if (ret == -1) {
+ _cairo_freepool_free (&device->bo_pool, bo);
+ return NULL;
+ }
+
+ bo->base.handle = create.handle;
+ bo->base.size = size;
+
+ bo->virtual = NULL;
+
+ bo->in_batch = FALSE;
+ bo->read_domains = 0;
+ bo->write_domain = 0;
+
+ CAIRO_REFERENCE_COUNT_INIT (&bo->base.ref_count, 1);
+ return &bo->base;
+}
+
+cairo_drm_bo_t *
+radeon_bo_create_for_name (radeon_device_t *device,
+ uint32_t name)
+{
+ radeon_bo_t *bo;
+ cairo_status_t status;
+
+ bo = _cairo_freepool_alloc (&device->bo_pool);
+ if (unlikely (bo == NULL))
+ return NULL;
+
+ status = _cairo_drm_bo_open_for_name (&device->base, &bo->base, name);
+ if (unlikely (status)) {
+ _cairo_freepool_free (&device->bo_pool, bo);
+ return NULL;
+ }
+
+ bo->virtual = NULL;
+
+ bo->in_batch = FALSE;
+ bo->read_domains = 0;
+ bo->write_domain = 0;
+
+ CAIRO_REFERENCE_COUNT_INIT (&bo->base.ref_count, 1);
+ return &bo->base;
+}
+
+void
+radeon_bo_release (void *_dev, void *_bo)
+{
+ radeon_device_t *device = _dev;
+ radeon_bo_t *bo = _bo;
+
+ _cairo_drm_bo_close (&device->base, &bo->base);
+ _cairo_freepool_free (&device->bo_pool, bo);
+}
+
+cairo_surface_t *
+radeon_bo_get_image (const radeon_device_t *device,
+ radeon_bo_t *bo,
+ const cairo_drm_surface_t *surface)
+{
+ cairo_image_surface_t *image;
+ uint8_t *dst;
+ int size, row;
+
+ image = (cairo_image_surface_t *)
+ cairo_image_surface_create (surface->format,
+ surface->width,
+ surface->height);
+ if (unlikely (image->base.status))
+ return &image->base;
+
+ if (image->stride == surface->stride) {
+ size = surface->stride * surface->height;
+ radeon_bo_read (device, bo, 0, size, image->data);
+ } else {
+ int offset;
+
+ size = surface->width;
+ if (surface->format != CAIRO_FORMAT_A8)
+ size *= 4;
+
+ offset = 0;
+ row = surface->height;
+ dst = image->data;
+ while (row--) {
+ radeon_bo_read (device, bo, offset, size, dst);
+ offset += surface->stride;
+ dst += image->stride;
+ }
+ }
+
+ return &image->base;
+}
+
+static void
+_radeon_device_init_bo_cache (radeon_device_t *device)
+{
+ _cairo_freepool_init (&device->bo_pool, sizeof (radeon_bo_t));
+}
+
+cairo_status_t
+radeon_device_init (radeon_device_t *device, int fd)
+{
+ _radeon_device_init_bo_cache (device);
+
+ return CAIRO_STATUS_SUCCESS;
+}
+
+static void
+_radeon_bo_cache_fini (radeon_device_t *device)
+{
+ _cairo_freepool_fini (&device->bo_pool);
+}
+
+void
+radeon_device_fini (radeon_device_t *device)
+{
+ _radeon_bo_cache_fini (device);
+ _cairo_drm_device_fini (&device->base);
+}
diff --git a/src/drm/cairo-drm-surface.c b/src/drm/cairo-drm-surface.c
new file mode 100644
index 00000000..860d0992
--- /dev/null
+++ b/src/drm/cairo-drm-surface.c
@@ -0,0 +1,517 @@
+/* Cairo - a vector graphics library with display and print output
+ *
+ * Copyright © 2009 Chris Wilson
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ * The Original Code is the cairo graphics library.
+ *
+ * The Initial Developer of the Original Code is Chris Wilson.
+ */
+
+#include "cairoint.h"
+
+#include "cairo-drm-private.h"
+#include "cairo-surface-fallback-private.h"
+
+cairo_surface_t *
+_cairo_drm_surface_create_similar (void *abstract_surface,
+ cairo_content_t content,
+ int width,
+ int height)
+{
+ cairo_drm_surface_t *surface = abstract_surface;
+ cairo_drm_device_t *device;
+
+ if (surface->fallback != NULL)
+ return _cairo_image_surface_create_with_content (content,
+ width, height);
+
+ device = surface->device;
+ if (width > device->max_surface_size || height > device->max_surface_size)
+ return NULL;
+
+ return device->surface.create (device, content, width, height);
+}
+
+void
+_cairo_drm_surface_init (cairo_drm_surface_t *surface,
+ cairo_drm_device_t *device)
+{
+ surface->device = cairo_drm_device_reference (device);
+
+ surface->bo = NULL;
+ surface->width = 0;
+ surface->height = 0;
+ surface->stride = 0;
+
+ surface->fallback = NULL;
+ surface->map_count = 0;
+}
+
+cairo_status_t
+_cairo_drm_surface_finish (cairo_drm_surface_t *surface)
+{
+ if (surface->bo != NULL)
+ cairo_drm_bo_destroy (surface->device, surface->bo);
+
+ cairo_drm_device_destroy (surface->device);
+
+ return CAIRO_STATUS_SUCCESS;
+}
+
+void
+_cairo_drm_surface_get_font_options (void *abstract_surface,
+ cairo_font_options_t *options)
+{
+ _cairo_font_options_init_default (options);
+
+ cairo_font_options_set_hint_metrics (options, CAIRO_HINT_METRICS_ON);
+}
+
+cairo_bool_t
+_cairo_drm_surface_get_extents (void *abstract_surface,
+ cairo_rectangle_int_t *rectangle)
+{
+ cairo_drm_surface_t *surface = abstract_surface;
+
+ rectangle->x = 0;
+ rectangle->y = 0;
+ rectangle->width = surface->width;
+ rectangle->height = surface->height;
+
+ return TRUE;
+}
+
+cairo_int_status_t
+_cairo_drm_surface_paint (void *abstract_surface,
+ cairo_operator_t op,
+ const cairo_pattern_t *source,
+ cairo_clip_t *clip)
+{
+ cairo_drm_surface_t *surface = abstract_surface;
+
+ if (surface->fallback != NULL)
+ return _cairo_surface_paint (surface->fallback, op, source, clip);
+
+ return _cairo_surface_fallback_paint (&surface->base, op, source, clip);
+}
+
+cairo_int_status_t
+_cairo_drm_surface_mask (void *abstract_surface,
+ cairo_operator_t op,
+ const cairo_pattern_t *source,
+ const cairo_pattern_t *mask,
+ cairo_clip_t *clip)
+{
+ cairo_drm_surface_t *surface = abstract_surface;
+
+ if (surface->fallback != NULL) {
+ return _cairo_surface_mask (surface->fallback,
+ op, source, mask,
+ clip);
+ }
+
+ return _cairo_surface_fallback_mask (&surface->base,
+ op, source, mask, clip);
+}
+
+cairo_int_status_t
+_cairo_drm_surface_stroke (void *abstract_surface,
+ cairo_operator_t op,
+ const cairo_pattern_t *source,
+ cairo_path_fixed_t *path,
+ cairo_stroke_style_t *style,
+ cairo_matrix_t *ctm,
+ cairo_matrix_t *ctm_inverse,
+ double tolerance,
+ cairo_antialias_t antialias,
+ cairo_clip_t *clip)
+{
+ cairo_drm_surface_t *surface = abstract_surface;
+
+ if (surface->fallback != NULL) {
+ return _cairo_surface_stroke (surface->fallback,
+ op, source,
+ path, style,
+ ctm, ctm_inverse,
+ tolerance, antialias,
+ clip);
+ }
+
+ return _cairo_surface_fallback_stroke (&surface->base, op, source,
+ path, style,
+ ctm, ctm_inverse,
+ tolerance, antialias,
+ clip);
+}
+
+cairo_int_status_t
+_cairo_drm_surface_fill (void *abstract_surface,
+ cairo_operator_t op,
+ const cairo_pattern_t *source,
+ cairo_path_fixed_t *path,
+ cairo_fill_rule_t fill_rule,
+ double tolerance,
+ cairo_antialias_t antialias,
+ cairo_clip_t *clip)
+{
+ cairo_drm_surface_t *surface = abstract_surface;
+
+ if (surface->fallback != NULL) {
+ return _cairo_surface_fill (surface->fallback,
+ op, source,
+ path, fill_rule,
+ tolerance, antialias,
+ clip);
+ }
+
+ return _cairo_surface_fallback_fill (&surface->base, op, source,
+ path, fill_rule,
+ tolerance, antialias,
+ clip);
+}
+
+cairo_int_status_t
+_cairo_drm_surface_show_glyphs (void *abstract_surface,
+ cairo_operator_t op,
+ const cairo_pattern_t *source,
+ cairo_glyph_t *glyphs,
+ int num_glyphs,
+ cairo_scaled_font_t *scaled_font,
+ cairo_clip_t *clip,
+ int *remaining_glyphs)
+{
+ cairo_drm_surface_t *surface = abstract_surface;
+
+ if (surface->fallback != NULL) {
+ *remaining_glyphs = 0;
+ return _cairo_surface_show_text_glyphs (surface->fallback,
+ op, source,
+ NULL, 0,
+ glyphs, num_glyphs,
+ NULL, 0, 0,
+ scaled_font,
+ clip);
+ }
+
+ return _cairo_surface_fallback_show_glyphs (&surface->base,
+ op, source,
+ glyphs, num_glyphs,
+ scaled_font,
+ clip);
+}
+
+
+cairo_surface_t *
+cairo_drm_surface_create (cairo_drm_device_t *device,
+ cairo_content_t content,
+ int width, int height)
+{
+ cairo_surface_t *surface;
+
+ if (! CAIRO_CONTENT_VALID (content))
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_CONTENT));
+
+ if (device != NULL && device->status)
+ {
+ surface = _cairo_surface_create_in_error (device->status);
+ }
+ else if (device == NULL ||
+ device->surface.create == NULL ||
+ width == 0 || width > device->max_surface_size ||
+ height == 0 || height > device->max_surface_size)
+ {
+ surface = _cairo_image_surface_create_with_content (content,
+ width, height);
+ }
+ else
+ {
+ surface = device->surface.create (device, content, width, height);
+ }
+
+ return surface;
+}
+
+cairo_surface_t *
+cairo_drm_surface_create_for_name (cairo_drm_device_t *device,
+ unsigned int name,
+ cairo_format_t format,
+ int width, int height, int stride)
+{
+ cairo_surface_t *surface;
+
+ if (! CAIRO_FORMAT_VALID (format))
+ return _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
+
+ if (device != NULL && device->status)
+ {
+ surface = _cairo_surface_create_in_error (device->status);
+ }
+ else if (device == NULL || device->surface.create_for_name == NULL)
+ {
+ /* XXX invalid device! */
+ surface = _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
+ }
+ else if (width == 0 || width > device->max_surface_size ||
+ height == 0 || height > device->max_surface_size)
+ {
+ surface = _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_SIZE));
+ }
+ else
+ {
+ surface = device->surface.create_for_name (device,
+ name, format,
+ width, height, stride);
+ }
+
+ return surface;
+}
+
+cairo_surface_t *
+cairo_drm_surface_create_from_cacheable_image (cairo_drm_device_t *dev,
+ cairo_surface_t *surface)
+{
+ if (surface->status) {
+ surface = _cairo_surface_create_in_error (surface->status);
+ } else if (dev != NULL && dev->status) {
+ surface = _cairo_surface_create_in_error (dev->status);
+ } else if (dev == NULL || dev->surface.create_from_cacheable_image == NULL) {
+ /* XXX invalid device! */
+ surface = _cairo_surface_create_in_error (_cairo_error (CAIRO_STATUS_INVALID_FORMAT));
+ } else {
+ surface = dev->surface.create_from_cacheable_image (dev, surface);
+ }
+
+ return surface;
+}
+
+static cairo_drm_surface_t *
+_cairo_surface_as_drm (cairo_surface_t *abstract_surface)
+{
+ if (unlikely (abstract_surface->status))
+ return NULL;
+
+ if (abstract_surface->type != CAIRO_SURFACE_TYPE_DRM)
+ return NULL;
+
+ return (cairo_drm_surface_t *) abstract_surface;
+}
+
+cairo_status_t
+cairo_drm_surface_enable_scan_out (cairo_surface_t *abstract_surface)
+{
+ cairo_drm_surface_t *surface;
+
+ surface = _cairo_surface_as_drm (abstract_surface);
+ if (surface == NULL)
+ return _cairo_error (CAIRO_STATUS_SURFACE_TYPE_MISMATCH);
+
+ if (surface->device->surface.enable_scan_out == NULL)
+ return CAIRO_STATUS_SUCCESS;
+
+ return surface->device->surface.enable_scan_out (abstract_surface);
+}
+
+cairo_drm_device_t *
+cairo_drm_surface_get_device (cairo_surface_t *abstract_surface)
+{
+ cairo_drm_surface_t *surface;
+
+ if (unlikely (abstract_surface->status))
+ return _cairo_drm_device_create_in_error (abstract_surface->status);
+
+ surface = _cairo_surface_as_drm (abstract_surface);
+ if (surface == NULL) {
+ _cairo_error_throw (CAIRO_STATUS_SURFACE_TYPE_MISMATCH);
+ return NULL;
+ }
+
+ return surface->device;
+}
+
+unsigned int
+cairo_drm_surface_get_handle (cairo_surface_t *abstract_surface)
+{
+ cairo_drm_surface_t *surface;
+
+ surface = _cairo_surface_as_drm (abstract_surface);
+ if (surface == NULL) {
+ _cairo_error_throw (CAIRO_STATUS_SURFACE_TYPE_MISMATCH);
+ return 0;
+ }
+
+ return surface->bo->handle;
+}
+
+cairo_int_status_t
+_cairo_drm_surface_flink (void *abstract_surface)
+{
+ cairo_drm_surface_t *surface = abstract_surface;
+
+ return _cairo_drm_bo_flink (surface->device, surface->bo);
+}
+
+unsigned int
+cairo_drm_surface_get_name (cairo_surface_t *abstract_surface)
+{
+ cairo_drm_surface_t *surface;
+ cairo_status_t status;
+
+ surface = _cairo_surface_as_drm (abstract_surface);
+ if (surface == NULL) {
+ _cairo_error_throw (CAIRO_STATUS_SURFACE_TYPE_MISMATCH);
+ return 0;
+ }
+
+ if (surface->bo->name)
+ return surface->bo->name;
+
+ if (surface->device->surface.flink == NULL)
+ return 0;
+
+ status = surface->device->surface.flink (abstract_surface);
+ if (status) {
+ if (_cairo_status_is_error (status))
+ status = _cairo_surface_set_error (abstract_surface, status);
+
+ return 0;
+ }
+
+ return surface->bo->name;
+}
+
+cairo_format_t
+cairo_drm_surface_get_format (cairo_surface_t *abstract_surface)
+{
+ cairo_drm_surface_t *surface;
+
+ surface = _cairo_surface_as_drm (abstract_surface);
+ if (surface == NULL)
+ return cairo_image_surface_get_format (abstract_surface);
+
+ return surface->format;
+}
+
+int
+cairo_drm_surface_get_width (cairo_surface_t *abstract_surface)
+{
+ cairo_drm_surface_t *surface;
+
+ surface = _cairo_surface_as_drm (abstract_surface);
+ if (surface == NULL)
+ return cairo_image_surface_get_width (abstract_surface);
+
+ return surface->width;
+}
+
+int
+cairo_drm_surface_get_height (cairo_surface_t *abstract_surface)
+{
+ cairo_drm_surface_t *surface;
+
+ surface = _cairo_surface_as_drm (abstract_surface);
+ if (surface == NULL)
+ return cairo_image_surface_get_height (abstract_surface);
+
+ return surface->height;
+}
+
+int
+cairo_drm_surface_get_stride (cairo_surface_t *abstract_surface)
+{
+ cairo_drm_surface_t *surface;
+
+ surface = _cairo_surface_as_drm (abstract_surface);
+ if (surface == NULL)
+ return cairo_image_surface_get_stride (abstract_surface);
+
+ return surface->stride;
+}
+
+/* XXX drm or general surface layer? naming? */
+cairo_surface_t *
+cairo_drm_surface_map (cairo_surface_t *abstract_surface)
+{
+ cairo_drm_surface_t *surface;
+ cairo_rectangle_int_t roi;
+ cairo_image_surface_t *image;
+ cairo_status_t status;
+ void *image_extra;
+
+ if (unlikely (abstract_surface->status))
+ return _cairo_surface_create_in_error (abstract_surface->status);
+
+ surface = _cairo_surface_as_drm (abstract_surface);
+ if (surface == NULL) {
+ if (_cairo_surface_is_image (abstract_surface))
+ return cairo_surface_reference (abstract_surface);
+
+ status = _cairo_surface_set_error (abstract_surface,
+ CAIRO_STATUS_SURFACE_TYPE_MISMATCH);
+ return _cairo_surface_create_in_error (status);
+ }
+
+ roi.x = roi.y = 0;
+ roi.width = surface->width;
+ roi.height = surface->height;
+
+ status = _cairo_surface_acquire_dest_image (abstract_surface,
+ &roi,
+ &image,
+ &roi,
+ &image_extra);
+ if (unlikely (status))
+ return _cairo_surface_create_in_error (status);
+
+ assert (image_extra == NULL);
+
+ surface->map_count++;
+
+ return &image->base;
+}
+
+void
+cairo_drm_surface_unmap (cairo_surface_t *abstract_surface,
+ cairo_surface_t *image)
+{
+ cairo_drm_surface_t *surface;
+
+ surface = _cairo_surface_as_drm (abstract_surface);
+ if (surface == NULL) {
+ if (_cairo_surface_is_image (abstract_surface))
+ cairo_surface_destroy (image);
+ else
+ _cairo_error_throw (CAIRO_STATUS_SURFACE_TYPE_MISMATCH);
+ return;
+ }
+
+ /* XXX assert image belongs to drm */
+ //assert (image == drm->fallback);
+ cairo_surface_destroy (image);
+
+ assert (surface->map_count > 0);
+ if (--surface->map_count == 0)
+ cairo_surface_flush (&surface->base);
+}
diff --git a/src/drm/cairo-drm.c b/src/drm/cairo-drm.c
new file mode 100644
index 00000000..9d227b3b
--- /dev/null
+++ b/src/drm/cairo-drm.c
@@ -0,0 +1,362 @@
+/* Cairo - a vector graphics library with display and print output
+ *
+ * Copyright © 2009 Chris Wilson
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it either under the terms of the GNU Lesser General Public
+ * License version 2.1 as published by the Free Software Foundation
+ * (the "LGPL") or, at your option, under the terms of the Mozilla
+ * Public License Version 1.1 (the "MPL"). If you do not alter this
+ * notice, a recipient may use your version of this file under either
+ * the MPL or the LGPL.
+ *
+ * You should have received a copy of the LGPL along with this library
+ * in the file COPYING-LGPL-2.1; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * You should have received a copy of the MPL along with this library
+ * in the file COPYING-MPL-1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License
+ * Version 1.1 (the "License"); you may not use this file except in
+ * compliance with the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY
+ * OF ANY KIND, either express or implied. See the LGPL or the MPL for
+ * the specific language governing rights and limitations.
+ *
+ * The Original Code is the cairo graphics library.
+ *
+ * The Initial Developer of the Original Code is Chris Wilson.
+ */
+
+#include "cairoint.h"
+
+#include "cairo-drm-private.h"
+
+#define LIBUDEV_I_KNOW_THE_API_IS_SUBJECT_TO_CHANGE
+#include <libudev.h>
+#include <fcntl.h>
+#include <unistd.h> /* open(), close() */
+
+static cairo_drm_device_t *_cairo_drm_known_devices;
+static cairo_drm_device_t *_cairo_drm_default_device;
+
+static const cairo_drm_device_t _nil_device = {
+ CAIRO_REFERENCE_COUNT_INVALID,
+ CAIRO_STATUS_NO_MEMORY
+};
+
+static const cairo_drm_device_t _invalid_device = {
+ CAIRO_REFERENCE_COUNT_INVALID,
+ CAIRO_STATUS_INVALID_CONTENT
+};
+
+cairo_drm_device_t *
+_cairo_drm_device_create_in_error (cairo_status_t status)
+{
+ switch ((int) status) {
+ default:
+ ASSERT_NOT_REACHED;
+ case CAIRO_STATUS_NO_MEMORY:
+ return (cairo_drm_device_t *) &_nil_device;
+ case CAIRO_STATUS_INVALID_CONTENT:
+ return (cairo_drm_device_t *) &_invalid_device;
+ }
+}
+
+static const char *
+get_udev_property(struct udev_device *device, const char *name)
+{
+ struct udev_list_entry *entry;
+
+ udev_list_entry_foreach (entry,
+ udev_device_get_properties_list_entry (device))
+ {
+ if (strcmp (udev_list_entry_get_name (entry), name) == 0)
+ return udev_list_entry_get_value (entry);
+ }
+
+ return NULL;
+}
+
+cairo_drm_device_t *
+_cairo_drm_device_init (cairo_drm_device_t *dev,
+ int fd,
+ dev_t devid,
+ int max_surface_size)
+{
+ CAIRO_REFERENCE_COUNT_INIT (&dev->ref_count, 1);
+ dev->status = CAIRO_STATUS_SUCCESS;
+
+ dev->id = devid;
+ dev->fd = fd;
+
+ dev->max_surface_size = max_surface_size;
+
+ dev->prev = NULL;
+ dev->next = _cairo_drm_known_devices;
+ if (_cairo_drm_known_devices != NULL)
+ _cairo_drm_known_devices->prev = dev;
+ _cairo_drm_known_devices = dev;
+
+ if (_cairo_drm_default_device == NULL)
+ _cairo_drm_default_device = cairo_drm_device_reference (dev);
+
+ return dev;
+}
+
+cairo_drm_device_t *
+cairo_drm_device_get (struct udev_device *device)
+{
+ static const struct dri_driver_entry {
+ uint32_t vendor_id;
+ uint32_t chip_id;
+ cairo_drm_device_create_func_t create_func;
+ } driver_map[] = {
+ { 0x8086, ~0, _cairo_drm_intel_device_create },
+ { 0x1002, ~0, _cairo_drm_radeon_device_create },
+#if CAIRO_HAS_GALLIUM_SURFACE
+ { ~0, ~0, _cairo_drm_gallium_device_create },
+#endif
+ };
+
+ cairo_drm_device_t *dev;
+ dev_t devid;
+ struct udev_device *parent;
+ const char *pci_id;
+ uint32_t vendor_id, chip_id;
+ const char *path;
+ int i, fd;
+
+ devid = udev_device_get_devnum (device);
+
+ CAIRO_MUTEX_LOCK (_cairo_drm_device_mutex);
+ for (dev = _cairo_drm_known_devices; dev != NULL; dev = dev->next) {
+ if (dev->id == devid) {
+ dev = cairo_drm_device_reference (dev);
+ goto DONE;
+ }
+ }
+
+ dev = (cairo_drm_device_t *) &_nil_device;
+ parent = udev_device_get_parent (device);
+ pci_id = get_udev_property (parent, "PCI_ID");
+ if (sscanf (pci_id, "%x:%x", &vendor_id, &chip_id) != 2) {
+ _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
+ goto DONE;
+ }
+
+#if CAIRO_HAS_GALLIUM_SURFACE
+ if (getenv ("CAIRO_GALLIUM_FORCE"))
+ {
+ i = ARRAY_LENGTH (driver_map) - 1;
+ }
+ else
+#endif
+ {
+ for (i = 0; i < ARRAY_LENGTH (driver_map); i++) {
+ if (driver_map[i].vendor_id == ~0U)
+ break;
+
+ if (driver_map[i].vendor_id == vendor_id &&
+ (driver_map[i].chip_id == ~0U || driver_map[i].chip_id == chip_id))
+ break;
+ }
+
+ if (i == ARRAY_LENGTH (driver_map)) {
+ /* XXX should be no driver or something*/
+ _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
+ goto DONE;
+ }
+ }
+
+ path = udev_device_get_devnode (device);
+ if (path == NULL)
+ path = "/dev/dri/card0"; /* XXX buggy udev? */
+
+ fd = open (path, O_RDWR);
+ if (fd == -1) {
+ /* XXX more likely to be a permissions issue... */
+ _cairo_error_throw (CAIRO_STATUS_FILE_NOT_FOUND);
+ goto DONE;
+ }
+
+ dev = driver_map[i].create_func (fd, devid, vendor_id, chip_id);
+ if (dev == NULL)
+ close (fd);
+
+ DONE:
+ CAIRO_MUTEX_UNLOCK (_cairo_drm_device_mutex);
+
+ return dev;
+}
+slim_hidden_def (cairo_drm_device_get);
+
+cairo_drm_device_t *
+cairo_drm_device_get_for_fd (int fd)
+{
+ struct stat st;
+ struct udev *udev;
+ struct udev_device *device;
+ cairo_drm_device_t *dev = NULL;
+
+ if (fstat (fd, &st) < 0 || ! S_ISCHR (st.st_mode)) {
+ //_cairo_error_throw (CAIRO_STATUS_INVALID_DEVICE);
+ _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
+ return (cairo_drm_device_t *) &_nil_device;
+ }
+
+ udev = udev_new ();
+
+ device = udev_device_new_from_devnum (udev, 'c', st.st_rdev);
+ if (device != NULL) {
+ dev = cairo_drm_device_get (device);
+ udev_device_unref (device);
+ }
+
+ udev_unref (udev);
+
+ return dev;
+}
+
+cairo_drm_device_t *
+cairo_drm_device_default (void)
+{
+ struct udev *udev;
+ struct udev_enumerate *e;
+ struct udev_list_entry *entry;
+ cairo_drm_device_t *dev;
+
+ /* optimistic atomic pointer read */
+ dev = _cairo_drm_default_device;
+ if (dev != NULL)
+ return dev;
+
+ udev = udev_new();
+ if (udev == NULL) {
+ _cairo_error_throw (CAIRO_STATUS_NO_MEMORY);
+ return (cairo_drm_device_t *) &_nil_device;
+ }
+
+ e = udev_enumerate_new (udev);
+ udev_enumerate_add_match_subsystem (e, "drm");
+ udev_enumerate_scan_devices (e);
+ udev_list_entry_foreach (entry, udev_enumerate_get_list_entry (e)) {
+ struct udev_device *device;
+
+ device =
+ udev_device_new_from_syspath (udev,
+ udev_list_entry_get_name (entry));
+
+ dev = cairo_drm_device_get (device);
+
+ udev_device_unref (device);
+
+ if (dev != NULL) {
+ if (dev->fd == -1) { /* try again, we may find a usable card */
+ cairo_drm_device_destroy (dev);
+ dev = NULL;
+ } else
+ break;
+ }
+ }
+ udev_enumerate_unref (e);
+ udev_unref (udev);
+
+ cairo_drm_device_destroy (dev); /* owned by _cairo_drm_default_device */
+ return dev;
+}
+slim_hidden_def (cairo_drm_device_default);
+
+void
+_cairo_drm_device_reset_static_data (void)
+{
+ if (_cairo_drm_default_device != NULL) {
+ cairo_drm_device_destroy (_cairo_drm_default_device);
+ _cairo_drm_default_device = NULL;
+ }
+}
+
+cairo_drm_device_t *
+cairo_drm_device_reference (cairo_drm_device_t *device)
+{
+ if (device == NULL ||
+ CAIRO_REFERENCE_COUNT_IS_INVALID (&device->ref_count))
+ {
+ return device;
+ }
+
+ assert (CAIRO_REFERENCE_COUNT_HAS_REFERENCE (&device->ref_count));
+ _cairo_reference_count_inc (&device->ref_count);
+
+ return device;
+}
+slim_hidden_def (cairo_drm_device_reference);
+
+int
+cairo_drm_device_get_fd (cairo_drm_device_t *device)
+{
+ if (device->status)
+ return -1;
+
+ return device->fd;
+}
+
+cairo_status_t
+cairo_drm_device_status (cairo_drm_device_t *device)
+{
+ if (device == NULL)
+ return CAIRO_STATUS_NULL_POINTER;
+
+ return device->status;
+}
+
+void
+_cairo_drm_device_fini (cairo_drm_device_t *device)
+{
+ CAIRO_MUTEX_LOCK (_cairo_drm_device_mutex);
+ if (device->prev != NULL)
+ device->prev->next = device->next;
+ else
+ _cairo_drm_known_devices = device->next;
+ if (device->next != NULL)
+ device->next->prev = device->prev;
+ CAIRO_MUTEX_UNLOCK (_cairo_drm_device_mutex);
+
+ if (device->fd != -1)
+ close (device->fd);
+}
+
+void
+cairo_drm_device_destroy (cairo_drm_device_t *device)
+{
+ if (device == NULL ||
+ CAIRO_REFERENCE_COUNT_IS_INVALID (&device->ref_count))
+ {
+ return;
+ }
+
+ assert (CAIRO_REFERENCE_COUNT_HAS_REFERENCE (&device->ref_count));
+ if (! _cairo_reference_count_dec_and_test (&device->ref_count))
+ return;
+
+ device->device.destroy (device);
+}
+slim_hidden_def (cairo_drm_device_destroy);
+
+void
+cairo_drm_device_throttle (cairo_drm_device_t *dev)
+{
+ cairo_status_t status;
+
+ if (unlikely (dev->status))
+ return;
+
+ if (dev->device.throttle == NULL)
+ return;
+
+ status = dev->device.throttle (dev);
+ if (unlikely (status))
+ _cairo_status_set_error (&dev->status, status);
+}