blob: 03bae200a4fdc97a1f7e366ebb7791a0a370745b (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
|
#include <assert.h>
#include <stdio.h>
#include <gbm.h>
#include "common.h"
static struct gbm g_gbm;
/**
* After we have found a crtc+connector+mode combination,
* we need to actually create a suitable framebuffer that
* we can use with it. There are two ways to do that:
*
* - We create a so-called "dumb buffer"
* This is a buffer that we can mmap() and every driver
* supports this. We can use it for unaccelerated software
* rendering on the CPU.
*
* - We use libgbm to create buffers available for hardware
* accelerated rendering. libgbm is an abstraction layer
* that creates these buffers for each available DRM driver.
* As there is no generic API for this, each driver provides
* its own mechanism to create these buffers. We can then
* use such buffers to create OpenGL contexts with the Mesa3D
* library. (We do this here).
*/
const struct gbm * init_gbm(int drm_fd, int w, int h)
{
unsigned flags = GBM_BO_USE_RENDERING | GBM_BO_USE_SCANOUT;
int format = GBM_FORMAT_XRGB8888;
g_gbm.dev = gbm_create_device(drm_fd);
if (!g_gbm.dev) {
fprintf(stderr, "cannot create gbm device\n");
return NULL;
}
g_gbm.surface = gbm_surface_create(g_gbm.dev, w, h, format, flags);
if (!g_gbm.surface) {
fprintf(stderr, "failed to create gbm surface\n");
gbm_device_destroy(g_gbm.dev);
g_gbm.dev = NULL;
return NULL;
}
g_gbm.width = w;
g_gbm.height = h;
return &g_gbm;
}
void deinit_gbm()
{
assert(g_gbm.surface && g_gbm.dev);
gbm_surface_destroy(g_gbm.surface);
gbm_device_destroy(g_gbm.dev);
}
|