summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarcin Slusarz <marcin.slusarz@gmail.com>2011-06-05 19:57:14 +0200
committerMarcin Slusarz <marcin.slusarz@gmail.com>2011-09-07 14:52:28 +0200
commit84556ed7a14d696bac6b63beafa51ce4ac6cb639 (patch)
treea52705ac5f9dfff8f82e19a181843d6866c638dc
parent0fa8437054924a0463ae9001bd65587b564d46f7 (diff)
core: add support for drm core and nouveau ioctls
-rw-r--r--coregrind/m_syswrap/syswrap-linux.c1066
-rw-r--r--include/vki/vki-linux.h579
2 files changed, 1645 insertions, 0 deletions
diff --git a/coregrind/m_syswrap/syswrap-linux.c b/coregrind/m_syswrap/syswrap-linux.c
index 1cfd7b78..9399cdbf 100644
--- a/coregrind/m_syswrap/syswrap-linux.c
+++ b/coregrind/m_syswrap/syswrap-linux.c
@@ -59,6 +59,7 @@
#include "priv_syswrap-generic.h"
#include "priv_syswrap-linux.h"
+#define max(a,b) ((a)>(b)?(a):(b))
// Run a thread from beginning to end and return the thread's
// scheduler-return-code.
@@ -5022,6 +5023,683 @@ PRE(sys_ioctl)
sizeof(struct vki_sockaddr));
}
break;
+ case VKI_DRM_IOCTL_VERSION:
+ if (ARG3) {
+ struct vki_drm_version *req = (struct vki_drm_version *)ARG3;
+
+ PRE_FIELD_WRITE("ioctl(DRM_VERSION).version_major", req->version_major);
+ PRE_FIELD_WRITE("ioctl(DRM_VERSION).version_minor", req->version_minor);
+ PRE_FIELD_WRITE("ioctl(DRM_VERSION).version_patchlevel", req->version_patchlevel);
+
+ PRE_FIELD_READ("ioctl(DRM_VERSION).name_len", req->name_len);
+ PRE_FIELD_READ("ioctl(DRM_VERSION).date_len", req->date_len);
+ PRE_FIELD_READ("ioctl(DRM_VERSION).desc_len", req->desc_len);
+
+ PRE_MEM_WRITE("ioctl(DRM_VERSION).name[]", (Addr)req->name, req->name_len);
+ PRE_MEM_WRITE("ioctl(DRM_VERSION).date[]", (Addr)req->date, req->date_len);
+ PRE_MEM_WRITE("ioctl(DRM_VERSION).desc[]", (Addr)req->desc, req->desc_len);
+
+ PRE_FIELD_WRITE("ioctl(DRM_VERSION).name_len", req->name_len);
+ PRE_FIELD_WRITE("ioctl(DRM_VERSION).date_len", req->date_len);
+ PRE_FIELD_WRITE("ioctl(DRM_VERSION).desc_len", req->desc_len);
+ }
+ break;
+ case VKI_DRM_IOCTL_GET_UNIQUE:
+ if (ARG3) {
+ struct vki_drm_unique *req = (struct vki_drm_unique *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_UNIQUE).unique_len", req->unique_len);
+ if (req->unique_len) {
+ PRE_FIELD_READ("ioctl(DRM_UNIQUE).unique", req->unique);
+ PRE_MEM_WRITE("ioctl(DRM_UNIQUE).unique[]", (Addr)req->unique, req->unique_len);
+ }
+ PRE_FIELD_WRITE("ioctl(DRM_UNIQUE).unique_len", req->unique_len);
+ }
+ break;
+ case VKI_DRM_IOCTL_GET_MAGIC:
+ if (ARG3) {
+ struct vki_drm_auth *req = (struct vki_drm_auth *)ARG3;
+
+ PRE_FIELD_WRITE("ioctl(DRM_GET_MAGIC).magic", req->magic);
+ }
+ break;
+ case VKI_DRM_IOCTL_IRQ_BUSID:
+ if (ARG3) {
+ struct vki_drm_irq_busid *req = (struct vki_drm_irq_busid *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_IRQ_BUSID).busnum", req->busnum);
+ PRE_FIELD_READ("ioctl(DRM_IRQ_BUSID).devnum", req->devnum);
+ PRE_FIELD_READ("ioctl(DRM_IRQ_BUSID).funcnum", req->funcnum);
+ PRE_FIELD_WRITE("ioctl(DRM_IRQ_BUSID).irq", req->irq);
+ }
+ break;
+ case VKI_DRM_IOCTL_GEM_CLOSE:
+ if (ARG3) {
+ struct vki_drm_gem_close *req = (struct vki_drm_gem_close *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_GEM_CLOSE).handle", req->handle);
+ }
+ break;
+ case VKI_DRM_IOCTL_GEM_FLINK:
+ if (ARG3) {
+ struct vki_drm_gem_flink *req = (struct vki_drm_gem_flink *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_GEM_FLINK).handle", req->handle);
+ PRE_FIELD_WRITE("ioctl(DRM_GEM_FLINK).name", req->name);
+ }
+ break;
+ case VKI_DRM_IOCTL_GEM_OPEN:
+ if (ARG3) {
+ struct vki_drm_gem_open *req = (struct vki_drm_gem_open *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_GEM_OPEN).name", req->name);
+ PRE_FIELD_WRITE("ioctl(DRM_GEM_OPEN).handle", req->handle);
+ PRE_FIELD_WRITE("ioctl(DRM_GEM_OPEN).size", req->size);
+ }
+ break;
+ case VKI_DRM_IOCTL_SET_MASTER:
+ break;
+ case VKI_DRM_IOCTL_DROP_MASTER:
+ break;
+ case VKI_DRM_IOCTL_ADD_CTX:
+ if (ARG3) {
+ struct vki_drm_ctx *req = (struct vki_drm_ctx *)ARG3;
+
+ PRE_FIELD_WRITE("ioctl(DRM_ADD_CTX).handle", req->handle);
+ }
+ break;
+ case VKI_DRM_IOCTL_RM_CTX:
+ if (ARG3) {
+ struct vki_drm_ctx *req = (struct vki_drm_ctx *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_RM_CTX).handle", req->handle);
+ }
+ break;
+ case VKI_DRM_IOCTL_MOD_CTX:
+ break;
+ case VKI_DRM_IOCTL_GET_CTX:
+ if (ARG3) {
+ struct vki_drm_ctx *req = (struct vki_drm_ctx *)ARG3;
+
+ PRE_FIELD_WRITE("ioctl(DRM_GET_CTX).flags", req->flags);
+ }
+ break;
+ case VKI_DRM_IOCTL_SWITCH_CTX:
+ if (ARG3) {
+ struct vki_drm_ctx *req = (struct vki_drm_ctx *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_SWITCH_CTX).handle", req->handle);
+ }
+ break;
+ case VKI_DRM_IOCTL_NEW_CTX:
+ if (ARG3) {
+ struct vki_drm_ctx *req = (struct vki_drm_ctx *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_NEW_CTX).handle", req->handle);
+ }
+ break;
+ case VKI_DRM_IOCTL_RES_CTX:
+ if (ARG3) {
+ struct vki_drm_ctx_res *req = (struct vki_drm_ctx_res *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_RES_CTX).count", req->count);
+ if (req->count) {
+ PRE_FIELD_READ("ioctl(DRM_RES_CTX).contexts", req->contexts);
+
+ PRE_MEM_WRITE("ioctl(DRM_RES_CTX).contexts[]", (Addr)req->contexts,
+ req->count * sizeof (req->contexts[0]));
+ }
+ PRE_FIELD_WRITE("ioctl(DRM_RES_CTX).count", req->count);
+ }
+ break;
+ case VKI_DRM_IOCTL_WAIT_VBLANK:
+ if (ARG3) {
+ union vki_drm_wait_vblank *req = (union vki_drm_wait_vblank *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_WAIT_VBLANK).request.type", req->request.type);
+ PRE_FIELD_READ("ioctl(DRM_WAIT_VBLANK).request.sequence", req->request.sequence);
+
+ if (req->request.type & VKI_DRM_VBLANK_EVENT) {
+ PRE_FIELD_READ("ioctl(DRM_WAIT_VBLANK).request.signal", req->request.signal);
+ } else {
+ PRE_FIELD_WRITE("ioctl(DRM_WAIT_VBLANK).reply.tval_sec", req->reply.tval_sec);
+ PRE_FIELD_WRITE("ioctl(DRM_WAIT_VBLANK).reply.tval_usec", req->reply.tval_usec);
+ }
+
+ PRE_FIELD_WRITE("ioctl(DRM_WAIT_VBLANK).reply.sequence", req->reply.sequence);
+ }
+ break;
+ case VKI_DRM_IOCTL_UPDATE_DRAW:
+ break;
+ case VKI_DRM_IOCTL_MODE_GETRESOURCES:
+ if (ARG3) {
+ struct vki_drm_mode_card_res *req = (struct vki_drm_mode_card_res *)ARG3;
+
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETRESOURCES).min_width", req->min_width);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETRESOURCES).max_width", req->max_width);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETRESOURCES).min_height", req->min_height);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETRESOURCES).max_height", req->max_height);
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETRESOURCES).count_fbs", req->count_fbs);
+ if (req->count_fbs) {
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETRESOURCES).fb_id_ptr", req->fb_id_ptr);
+ PRE_MEM_WRITE("ioctl(DRM_MODE_GETRESOURCES).fb_id_ptr[]", (Addr)req->fb_id_ptr,
+ req->count_fbs * sizeof (__vki_u32));
+ }
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETRESOURCES).count_fbs", req->count_fbs);
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETRESOURCES).count_crtcs", req->count_crtcs);
+ if (req->count_crtcs) {
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETRESOURCES).crtc_id_ptr", req->crtc_id_ptr);
+ PRE_MEM_WRITE("ioctl(DRM_MODE_GETRESOURCES).crtc_id_ptr[]", (Addr)req->crtc_id_ptr,
+ req->count_crtcs * sizeof (__vki_u32));
+ }
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETRESOURCES).count_crtcs", req->count_crtcs);
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETRESOURCES).count_encoders", req->count_encoders);
+ if (req->count_encoders) {
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETRESOURCES).encoder_id_ptr", req->encoder_id_ptr);
+ PRE_MEM_WRITE("ioctl(DRM_MODE_GETRESOURCES).encoder_id_ptr[]", (Addr)req->encoder_id_ptr,
+ req->count_encoders * sizeof (__vki_u32));
+ }
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETRESOURCES).count_encoders", req->count_encoders);
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETRESOURCES).count_connectors", req->count_connectors);
+ if (req->count_connectors) {
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETRESOURCES).connector_id_ptr", req->connector_id_ptr);
+ PRE_MEM_WRITE("ioctl(DRM_MODE_GETRESOURCES).connector_id_ptr[]", (Addr)req->connector_id_ptr,
+ req->count_connectors * sizeof (__vki_u32));
+ }
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETRESOURCES).count_connectors", req->count_connectors);
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_GETCRTC):
+ if (ARG3) {
+ struct vki_drm_mode_crtc *req = (struct vki_drm_mode_crtc *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETCRTC).crtc_id", req->crtc_id);
+
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).x", req->x);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).y", req->y);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).gamma_size", req->gamma_size);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).fb_id", req->fb_id);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).mode_valid", req->mode_valid);
+
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).mode.clock", req->mode.clock);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).mode.hdisplay", req->mode.hdisplay);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).mode.hsync_start", req->mode.hsync_start);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).mode.hsync_end", req->mode.hsync_end);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).mode.htotal", req->mode.htotal);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).mode.hskew", req->mode.hskew);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).mode.vdisplay", req->mode.vdisplay);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).mode.vsync_start", req->mode.vsync_start);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).mode.vsync_end", req->mode.vsync_end);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).mode.vtotal", req->mode.vtotal);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).mode.vscan", req->mode.vscan);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).mode.vrefresh", req->mode.vrefresh);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).mode.flags", req->mode.flags);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).mode.type", req->mode.type);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCRTC).mode.name", req->mode.name);
+
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_SETCRTC):
+ if (ARG3) {
+ struct vki_drm_mode_crtc *req = (struct vki_drm_mode_crtc *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).crtc_id", req->crtc_id);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).mode_valid", req->mode_valid);
+
+ if (req->mode_valid) {
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).fb_id", req->fb_id);
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).mode.clock", req->mode.clock);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).mode.hdisplay", req->mode.hdisplay);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).mode.hsync_start", req->mode.hsync_start);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).mode.hsync_end", req->mode.hsync_end);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).mode.htotal", req->mode.htotal);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).mode.hskew", req->mode.hskew);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).mode.vdisplay", req->mode.vdisplay);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).mode.vsync_start", req->mode.vsync_start);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).mode.vsync_end", req->mode.vsync_end);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).mode.vtotal", req->mode.vtotal);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).mode.vscan", req->mode.vscan);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).mode.vrefresh", req->mode.vrefresh);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).mode.flags", req->mode.flags);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).mode.type", req->mode.type);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).mode.name", req->mode.name);
+
+ }
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).count_connectors", req->count_connectors);
+ if (req->count_connectors > 0) {
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).set_connectors_ptr", req->set_connectors_ptr);
+ PRE_MEM_READ("ioctl(DRM_MODE_SETCRTC).set_connectors_ptr[]",
+ (Addr)req->set_connectors_ptr, req->count_connectors * sizeof(__vki_u32));
+ }
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).x", req->x);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETCRTC).y", req->y);
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_CURSOR):
+ if (ARG3) {
+ struct vki_drm_mode_cursor *req = (struct vki_drm_mode_cursor *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_CURSOR).flags", req->flags);
+ if (req->flags)
+ PRE_FIELD_READ("ioctl(DRM_MODE_CURSOR).crtc_id", req->crtc_id);
+
+ if (req->flags & VKI_DRM_MODE_CURSOR_BO) {
+ PRE_FIELD_READ("ioctl(DRM_MODE_CURSOR).handle", req->handle);
+ PRE_FIELD_READ("ioctl(DRM_MODE_CURSOR).width", req->width);
+ PRE_FIELD_READ("ioctl(DRM_MODE_CURSOR).height", req->height);
+ }
+
+ if (req->flags & VKI_DRM_MODE_CURSOR_MOVE) {
+ PRE_FIELD_READ("ioctl(DRM_MODE_CURSOR).x", req->x);
+ PRE_FIELD_READ("ioctl(DRM_MODE_CURSOR).y", req->y);
+ }
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_GETGAMMA):
+ if (ARG3) {
+ struct vki_drm_mode_crtc_lut *req = (struct vki_drm_mode_crtc_lut *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETGAMMA).crtc_id", req->crtc_id);
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETGAMMA).gamma_size", req->gamma_size);
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETGAMMA).red", req->red);
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETGAMMA).green", req->green);
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETGAMMA).blue", req->blue);
+
+ PRE_MEM_WRITE("ioctl(DRM_MODE_GETGAMMA).red[]", (Addr)req->red, req->gamma_size * sizeof(__vki_u16));
+ PRE_MEM_WRITE("ioctl(DRM_MODE_GETGAMMA).green[]", (Addr)req->green, req->gamma_size * sizeof(__vki_u16));
+ PRE_MEM_WRITE("ioctl(DRM_MODE_GETGAMMA).blue[]", (Addr)req->blue, req->gamma_size * sizeof(__vki_u16));
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_SETGAMMA):
+ if (ARG3) {
+ struct vki_drm_mode_crtc_lut *req = (struct vki_drm_mode_crtc_lut *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETGAMMA).crtc_id", req->crtc_id);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETGAMMA).gamma_size", req->gamma_size);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETGAMMA).red", req->red);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETGAMMA).green", req->green);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETGAMMA).blue", req->blue);
+
+ PRE_MEM_READ("ioctl(DRM_MODE_SETGAMMA).red[]", (Addr)req->red, req->gamma_size * sizeof(__vki_u16));
+ PRE_MEM_READ("ioctl(DRM_MODE_SETGAMMA).green[]", (Addr)req->green, req->gamma_size * sizeof(__vki_u16));
+ PRE_MEM_READ("ioctl(DRM_MODE_SETGAMMA).blue[]", (Addr)req->blue, req->gamma_size * sizeof(__vki_u16));
+ }
+ break;
+ case VKI_DRM_IOCTL_MODE_GETENCODER:
+ if (ARG3) {
+ struct vki_drm_mode_get_encoder *req = (struct vki_drm_mode_get_encoder *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETENCODER).encoder_id", req->encoder_id);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETENCODER).crtc_id", req->crtc_id);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETENCODER).encoder_type", req->encoder_type);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETENCODER).encoder_id", req->encoder_id);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETENCODER).possible_clones", req->possible_clones);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETENCODER).possible_crtcs", req->possible_crtcs);
+ }
+ break;
+ case VKI_DRM_IOCTL_MODE_GETCONNECTOR:
+ if (ARG3) {
+ struct vki_drm_mode_get_connector *req = (struct vki_drm_mode_get_connector *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETCONNECTOR).connector_id", req->connector_id);
+
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCONNECTOR).connector_id", req->connector_id);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCONNECTOR).connector_type", req->connector_type);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCONNECTOR).connector_type_id", req->connector_type_id);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCONNECTOR).mm_width", req->mm_width);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCONNECTOR).mm_height", req->mm_height);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCONNECTOR).subpixel", req->subpixel);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCONNECTOR).connection", req->connection);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCONNECTOR).encoder_id", req->encoder_id);
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETCONNECTOR).count_modes", req->count_modes);
+ if (req->count_modes > 0) {
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETCONNECTOR).modes_ptr", req->modes_ptr);
+ PRE_MEM_WRITE("ioctl(DRM_MODE_GETCONNECTOR).modes_ptr[]", (Addr)req->modes_ptr,
+ req->count_modes * sizeof(struct vki_drm_mode_modeinfo));
+ }
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCONNECTOR).count_modes", req->count_modes);
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETCONNECTOR).count_props", req->count_props);
+ if (req->count_props > 0) {
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETCONNECTOR).props_ptr", req->props_ptr);
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETCONNECTOR).prop_values_ptr", req->prop_values_ptr);
+ PRE_MEM_WRITE("ioctl(DRM_MODE_GETCONNECTOR).props_ptr[]", (Addr)req->props_ptr,
+ req->count_props * sizeof(__vki_u32));
+ PRE_MEM_WRITE("ioctl(DRM_MODE_GETCONNECTOR).prop_values_ptr[]", (Addr)req->prop_values_ptr,
+ req->count_props * sizeof(__vki_u64));
+ }
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCONNECTOR).count_props", req->count_props);
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETCONNECTOR).count_encoders", req->count_encoders);
+ if (req->count_encoders > 0) {
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETCONNECTOR).encoders_ptr", req->encoders_ptr);
+ PRE_MEM_WRITE("ioctl(DRM_MODE_GETCONNECTOR).encoders_ptr[]", (Addr)req->encoders_ptr,
+ req->count_encoders * sizeof(__vki_u32));
+ }
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETCONNECTOR).count_encoders", req->count_encoders);
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_ATTACHMODE):
+ if (ARG3) {
+ struct vki_drm_mode_mode_cmd *req = (struct vki_drm_mode_mode_cmd *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_ATTACHMODE).connector_id", req->connector_id);
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_ATTACHMODE).mode.clock", req->mode.clock);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ATTACHMODE).mode.hdisplay", req->mode.hdisplay);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ATTACHMODE).mode.hsync_start", req->mode.hsync_start);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ATTACHMODE).mode.hsync_end", req->mode.hsync_end);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ATTACHMODE).mode.htotal", req->mode.htotal);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ATTACHMODE).mode.hskew", req->mode.hskew);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ATTACHMODE).mode.vdisplay", req->mode.vdisplay);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ATTACHMODE).mode.vsync_start", req->mode.vsync_start);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ATTACHMODE).mode.vsync_end", req->mode.vsync_end);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ATTACHMODE).mode.vtotal", req->mode.vtotal);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ATTACHMODE).mode.vscan", req->mode.vscan);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ATTACHMODE).mode.vrefresh", req->mode.vrefresh);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ATTACHMODE).mode.flags", req->mode.flags);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ATTACHMODE).mode.type", req->mode.type);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ATTACHMODE).mode.name", req->mode.name);
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_DETACHMODE):
+ if (ARG3) {
+ struct vki_drm_mode_mode_cmd *req = (struct vki_drm_mode_mode_cmd *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_DETACHMODE).connector_id", req->connector_id);
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_DETACHMODE).mode.clock", req->mode.clock);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DETACHMODE).mode.hdisplay", req->mode.hdisplay);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DETACHMODE).mode.hsync_start", req->mode.hsync_start);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DETACHMODE).mode.hsync_end", req->mode.hsync_end);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DETACHMODE).mode.htotal", req->mode.htotal);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DETACHMODE).mode.hskew", req->mode.hskew);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DETACHMODE).mode.vdisplay", req->mode.vdisplay);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DETACHMODE).mode.vsync_start", req->mode.vsync_start);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DETACHMODE).mode.vsync_end", req->mode.vsync_end);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DETACHMODE).mode.vtotal", req->mode.vtotal);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DETACHMODE).mode.vscan", req->mode.vscan);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DETACHMODE).mode.vrefresh", req->mode.vrefresh);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DETACHMODE).mode.flags", req->mode.flags);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DETACHMODE).mode.type", req->mode.type);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DETACHMODE).mode.name", req->mode.name);
+ }
+ break;
+ case VKI_DRM_IOCTL_MODE_GETPROPERTY:
+ if (ARG3) {
+ struct vki_drm_mode_get_property *req = (struct vki_drm_mode_get_property *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETPROPERTY).prop_id", req->prop_id);
+
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETPROPERTY).name", req->name);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETPROPERTY).flags", req->flags);
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETPROPERTY).count_values", req->count_values);
+ // TODO: figure out how many bytes kernel is going to write, based on type of property
+ if (req->count_values)
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETPROPERTY).values_ptr", req->values_ptr);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETPROPERTY).count_values", req->count_values);
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETPROPERTY).count_enum_blobs", req->count_enum_blobs);
+ // TODO: as above
+ if (req->count_enum_blobs) {
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETPROPERTY).enum_blob_ptr", req->enum_blob_ptr);
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETPROPERTY).values_ptr", req->values_ptr);
+ }
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETPROPERTY).count_enum_blobs", req->count_enum_blobs);
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_SETPROPERTY):
+ if (ARG3) {
+ struct vki_drm_mode_connector_set_property *req = (struct vki_drm_mode_connector_set_property *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETPROPERTY).connector_id", req->connector_id);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETPROPERTY).prop_id", req->prop_id);
+ PRE_FIELD_READ("ioctl(DRM_MODE_SETPROPERTY).value", req->value);
+ }
+ break;
+ case VKI_DRM_IOCTL_MODE_GETPROPBLOB:
+ if (ARG3) {
+ struct vki_drm_mode_get_blob *req = (struct vki_drm_mode_get_blob *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETPROPBLOB).blob_id", req->blob_id);
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETPROPBLOB).length", req->length);
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETPROPBLOB).data", req->data);
+
+ PRE_MEM_WRITE("ioctl(DRM_MODE_GETPROPBLOB).data[]", (Addr)req->data, req->length);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETPROPBLOB).length", req->length);
+ }
+ break;
+ case VKI_DRM_IOCTL_MODE_GETFB:
+ if (ARG3) {
+ struct vki_drm_mode_fb_cmd *req = (struct vki_drm_mode_fb_cmd *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_GETFB).fb_id", req->fb_id);
+
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETFB).height", req->height);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETFB).width", req->width);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETFB).depth", req->depth);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETFB).bpp", req->bpp);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETFB).pitch", req->pitch);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_GETFB).handle", req->handle);
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_ADDFB):
+ if (ARG3) {
+ struct vki_drm_mode_fb_cmd *req = (struct vki_drm_mode_fb_cmd *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_ADDFB).width", req->width);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ADDFB).height", req->height);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ADDFB).handle", req->handle);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ADDFB).pitch", req->pitch);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ADDFB).bpp", req->bpp);
+ PRE_FIELD_READ("ioctl(DRM_MODE_ADDFB).depth", req->depth);
+
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_ADDFB).fb_id", req->fb_id);
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_RMFB):
+ PRE_MEM_READ("ioctl(DRM_MODE_RMFB)", ARG3, sizeof(unsigned int));
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_PAGE_FLIP):
+ if (ARG3) {
+ struct vki_drm_mode_crtc_page_flip *req = (struct vki_drm_mode_crtc_page_flip *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_PAGE_FLIP).flags", req->flags);
+ PRE_FIELD_READ("ioctl(DRM_MODE_PAGE_FLIP).reserved", req->reserved);
+ PRE_FIELD_READ("ioctl(DRM_MODE_PAGE_FLIP).crtc_id", req->crtc_id);
+ PRE_FIELD_READ("ioctl(DRM_MODE_PAGE_FLIP).fb_id", req->fb_id);
+ PRE_FIELD_READ("ioctl(DRM_MODE_PAGE_FLIP).user_data", req->user_data);
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_DIRTYFB):
+ if (ARG3) {
+ struct vki_drm_mode_fb_dirty_cmd *req = (struct vki_drm_mode_fb_dirty_cmd *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_DIRTYFB).fb_id", req->fb_id);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DIRTYFB).num_clips", req->num_clips);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DIRTYFB).flags", req->flags);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DIRTYFB).color", req->color);
+ PRE_FIELD_READ("ioctl(DRM_MODE_DIRTYFB).clips_ptr", req->clips_ptr);
+ PRE_MEM_READ("ioctl(DRM_MODE_DIRTYFB).clips_ptr[]", (Addr)req->clips_ptr,
+ req->num_clips * sizeof(struct vki_drm_clip_rect));
+ }
+ break;
+ case VKI_DRM_IOCTL_MODE_CREATE_DUMB:
+ if (ARG3) {
+ struct vki_drm_mode_create_dumb *req = (struct vki_drm_mode_create_dumb *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_CREATE_DUMB).width", req->width);
+ PRE_FIELD_READ("ioctl(DRM_MODE_CREATE_DUMB).height", req->height);
+ PRE_FIELD_READ("ioctl(DRM_MODE_CREATE_DUMB).bpp", req->bpp);
+
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_CREATE_DUMB).pitch", req->pitch);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_CREATE_DUMB).size", req->size);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_CREATE_DUMB).handle", req->handle);
+ }
+ break;
+ case VKI_DRM_IOCTL_MODE_MAP_DUMB:
+ if (ARG3) {
+ struct vki_drm_mode_map_dumb *req = (struct vki_drm_mode_map_dumb *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_MAP_DUMB).handle", req->handle);
+ PRE_FIELD_WRITE("ioctl(DRM_MODE_MAP_DUMB).offset", req->offset);
+ }
+ break;
+ case VKI_DRM_IOCTL_MODE_DESTROY_DUMB:
+ if (ARG3) {
+ struct vki_drm_mode_destroy_dumb *req = (struct vki_drm_mode_destroy_dumb *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_MODE_DESTROY_DUMB).handle", req->handle);
+ }
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_GETPARAM:
+ if (ARG3) {
+ struct vki_drm_nouveau_getparam *req = (struct vki_drm_nouveau_getparam *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GETPARAM).param", req->param);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GETPARAM).value", req->value);
+ }
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_SETPARAM:
+ if (ARG3) {
+ struct vki_drm_nouveau_setparam *req = (struct vki_drm_nouveau_setparam *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_SETPARAM).param", req->param);
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_SETPARAM).value", req->value);
+ }
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC:
+ if (ARG3) {
+ struct vki_drm_nouveau_channel_alloc *req = (struct vki_drm_nouveau_channel_alloc *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_CHANNEL_ALLOC).fb_ctxdma_handle", req->fb_ctxdma_handle);
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_CHANNEL_ALLOC).tt_ctxdma_handle", req->tt_ctxdma_handle);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_CHANNEL_ALLOC).channel", req->channel);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_CHANNEL_ALLOC).pushbuf_domains", req->pushbuf_domains);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_CHANNEL_ALLOC).nr_subchan", req->nr_subchan);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_CHANNEL_ALLOC).subchan[0].handle", req->subchan[0].handle);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_CHANNEL_ALLOC).subchan[0].grclass", req->subchan[0].grclass);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_CHANNEL_ALLOC).notifier_handle", req->notifier_handle);
+ }
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_CHANNEL_FREE:
+ if (ARG3) {
+ struct vki_drm_nouveau_channel_free *req = (struct vki_drm_nouveau_channel_free *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_CHANNEL_FREE).channel", req->channel);
+ }
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_GROBJ_ALLOC:
+ if (ARG3) {
+ struct vki_drm_nouveau_grobj_alloc *req = (struct vki_drm_nouveau_grobj_alloc *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GROBJ_ALLOC).handle", req->handle);
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GROBJ_ALLOC).channel", req->channel);
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GROBJ_ALLOC).class", req->class);
+ }
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC:
+ if (ARG3) {
+ struct vki_drm_nouveau_notifierobj_alloc *req = (struct vki_drm_nouveau_notifierobj_alloc *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC).channel", req->channel);
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC).channel", req->handle);
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC).size", req->size);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_NOTIFIEROBJ_ALLOC).offset", req->offset);
+ }
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_GPUOBJ_FREE:
+ if (ARG3) {
+ struct vki_drm_nouveau_gpuobj_free *req = (struct vki_drm_nouveau_gpuobj_free *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GPUOBJ_FREE).channel", req->channel);
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GPUOBJ_FREE).handle", req->handle);
+ }
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_GEM_NEW:
+ if (ARG3) {
+ struct vki_drm_nouveau_gem_new *req = (struct vki_drm_nouveau_gem_new *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GEM_NEW).info.tile_flags", req->info.tile_flags);
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GEM_NEW).channel_hint", req->channel_hint);
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GEM_NEW).info.size", req->info.size);
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GEM_NEW).align", req->align);
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GEM_NEW).info.domain", req->info.domain);
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GEM_NEW).info.tile_mode", req->info.tile_mode);
+
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GEM_NEW).info.domain", req->info.domain);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GEM_NEW).info.size", req->info.size);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GEM_NEW).info.offset", req->info.offset);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GEM_NEW).info.map_handle", req->info.map_handle);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GEM_NEW).info.map_handle", req->info.tile_mode);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GEM_NEW).info.tile_flags", req->info.tile_flags);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GEM_NEW).info.handle", req->info.handle);
+ }
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_GEM_PUSHBUF:
+ if (ARG3) {
+ struct vki_drm_nouveau_gem_pushbuf *req = (struct vki_drm_nouveau_gem_pushbuf *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GEM_PUSHBUF).channel", req->channel);
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GEM_PUSHBUF).nr_push", req->nr_push);
+ if (req->nr_push) {
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GEM_PUSHBUF).push", req->push);
+ PRE_MEM_READ("ioctl(DRM_NOUVEAU_GEM_PUSHBUF).push[]", (Addr)req->push,
+ req->nr_push * sizeof(struct vki_drm_nouveau_gem_pushbuf_push));
+
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GEM_PUSHBUF).nr_buffers", req->nr_buffers);
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GEM_PUSHBUF).buffers", req->buffers);
+ if (req->nr_buffers)
+ PRE_MEM_READ("ioctl(DRM_NOUVEAU_GEM_PUSHBUF).buffers[]", (Addr)req->buffers,
+ req->nr_buffers * sizeof(struct vki_drm_nouveau_gem_pushbuf_bo));
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GEM_PUSHBUF).suffix0", req->suffix0);
+
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GEM_PUSHBUF).nr_relocs", req->nr_relocs);
+ }
+
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GEM_PUSHBUF).vram_available", req->vram_available);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GEM_PUSHBUF).gart_available", req->gart_available);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GEM_PUSHBUF).suffix0", req->suffix0);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GEM_PUSHBUF).suffix1", req->suffix1);
+ }
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_GEM_CPU_PREP:
+ if (ARG3) {
+ struct vki_drm_nouveau_gem_cpu_prep *req = (struct vki_drm_nouveau_gem_cpu_prep *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GEM_CPU_PREP).flags", req->flags);
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GEM_CPU_PREP).handle", req->handle);
+ }
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_GEM_CPU_FINI:
+ if (ARG3) {
+ struct vki_drm_nouveau_gem_cpu_fini *req = (struct vki_drm_nouveau_gem_cpu_fini *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GEM_CPU_FINI).handle", req->handle);
+ }
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_GEM_INFO:
+ if (ARG3) {
+ struct vki_drm_nouveau_gem_new *req = (struct vki_drm_nouveau_gem_new *)ARG3;
+
+ PRE_FIELD_READ("ioctl(DRM_NOUVEAU_GEM_NEW).info.handle", req->info.handle);
+
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GEM_NEW).info.domain", req->info.domain);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GEM_NEW).info.size", req->info.size);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GEM_NEW).info.offset", req->info.offset);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GEM_NEW).info.map_handle", req->info.map_handle);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GEM_NEW).info.map_handle", req->info.tile_mode);
+ PRE_FIELD_WRITE("ioctl(DRM_NOUVEAU_GEM_NEW).info.tile_flags", req->info.tile_flags);
+ }
+ break;
default:
/* EVIOC* are variable length and return size written on success */
@@ -5837,6 +6515,394 @@ POST(sys_ioctl)
sizeof(struct vki_sockaddr));
}
break;
+ case VKI_DRM_IOCTL_VERSION:
+ if (ARG3) {
+ struct vki_drm_version *req = (struct vki_drm_version *)ARG3;
+
+ POST_FIELD_WRITE(req->version_major);
+ POST_FIELD_WRITE(req->version_minor);
+ POST_FIELD_WRITE(req->version_patchlevel);
+
+ if (req->name_len && req->name)
+ POST_MEM_WRITE((Addr)req->name, req->name_len);
+ if (req->date_len && req->date)
+ POST_MEM_WRITE((Addr)req->date, req->date_len);
+ if (req->desc_len && req->desc)
+ POST_MEM_WRITE((Addr)req->desc, req->desc_len);
+
+ POST_FIELD_WRITE(req->name_len);
+ POST_FIELD_WRITE(req->date_len);
+ POST_FIELD_WRITE(req->desc_len);
+ }
+ break;
+ case VKI_DRM_IOCTL_GET_UNIQUE:
+ if (ARG3) {
+ struct vki_drm_unique *req = (struct vki_drm_unique *)ARG3;
+
+ if (req->unique_len && req->unique)
+ POST_MEM_WRITE((Addr)req->unique, req->unique_len);
+ POST_FIELD_WRITE(req->unique_len);
+ }
+ break;
+ case VKI_DRM_IOCTL_GET_MAGIC:
+ if (ARG3) {
+ struct vki_drm_auth *req = (struct vki_drm_auth *)ARG3;
+
+ POST_FIELD_WRITE(req->magic);
+ }
+ break;
+ case VKI_DRM_IOCTL_IRQ_BUSID:
+ if (ARG3) {
+ struct vki_drm_irq_busid *req = (struct vki_drm_irq_busid *)ARG3;
+
+ POST_FIELD_WRITE(req->irq);
+ }
+ break;
+ case VKI_DRM_IOCTL_GEM_CLOSE:
+ break;
+ case VKI_DRM_IOCTL_GEM_FLINK:
+ if (ARG3) {
+ struct vki_drm_gem_flink *req = (struct vki_drm_gem_flink *)ARG3;
+
+ POST_FIELD_WRITE(req->name);
+ }
+ break;
+ case VKI_DRM_IOCTL_GEM_OPEN:
+ if (ARG3) {
+ struct vki_drm_gem_open *req = (struct vki_drm_gem_open *)ARG3;
+
+ POST_FIELD_WRITE(req->handle);
+ POST_FIELD_WRITE(req->size);
+ }
+ break;
+ case VKI_DRM_IOCTL_SET_MASTER:
+ break;
+ case VKI_DRM_IOCTL_DROP_MASTER:
+ break;
+ case VKI_DRM_IOCTL_ADD_CTX:
+ if (ARG3) {
+ struct vki_drm_ctx *req = (struct vki_drm_ctx *)ARG3;
+
+ POST_FIELD_WRITE(req->handle);
+ }
+ break;
+ case VKI_DRM_IOCTL_RM_CTX:
+ break;
+ case VKI_DRM_IOCTL_MOD_CTX:
+ break;
+ case VKI_DRM_IOCTL_GET_CTX:
+ if (ARG3) {
+ struct vki_drm_ctx *req = (struct vki_drm_ctx *)ARG3;
+
+ POST_FIELD_WRITE(req->flags);
+ }
+ break;
+ case VKI_DRM_IOCTL_SWITCH_CTX:
+ break;
+ case VKI_DRM_IOCTL_NEW_CTX:
+ break;
+ case VKI_DRM_IOCTL_RES_CTX:
+ if (ARG3) {
+ struct vki_drm_ctx_res *req = (struct vki_drm_ctx_res *)ARG3;
+
+ if (req->count && req->contexts) {
+ POST_MEM_WRITE((Addr)req->contexts,
+ req->count * sizeof (req->contexts[0]));
+ }
+ POST_FIELD_WRITE(req->count);
+ }
+ break;
+ case VKI_DRM_IOCTL_WAIT_VBLANK:
+ if (ARG3) {
+ union vki_drm_wait_vblank *req = (union vki_drm_wait_vblank *)ARG3;
+
+ if (!(req->request.type & VKI_DRM_VBLANK_EVENT)) {
+ POST_FIELD_WRITE(req->reply.tval_sec);
+ POST_FIELD_WRITE(req->reply.tval_usec);
+ }
+
+ POST_FIELD_WRITE(req->reply.sequence);
+ }
+ break;
+ case VKI_DRM_IOCTL_UPDATE_DRAW:
+ break;
+ case VKI_DRM_IOCTL_MODE_GETRESOURCES:
+ if (ARG3) {
+ struct vki_drm_mode_card_res *req = (struct vki_drm_mode_card_res *)ARG3;
+
+ POST_FIELD_WRITE(req->min_width);
+ POST_FIELD_WRITE(req->max_width);
+ POST_FIELD_WRITE(req->min_height);
+ POST_FIELD_WRITE(req->max_height);
+
+ // everything below is not quite true - kernel always writes to
+ // count_*, but writes to *_ptr only if initial count_* is bigger
+ // than needed; we don't have initial count_* in POST ioctl, so it's
+ // impossible to tell whether kernel wrote to *_ptr or not;
+ // so assume "null *_ptr" means "query count_*"
+ POST_FIELD_WRITE(req->count_fbs);
+ if (req->count_fbs && req->fb_id_ptr)
+ POST_MEM_WRITE((Addr)req->fb_id_ptr, req->count_fbs * sizeof (__vki_u32));
+
+ POST_FIELD_WRITE(req->count_crtcs);
+ if (req->count_crtcs && req->crtc_id_ptr)
+ POST_MEM_WRITE((Addr)req->crtc_id_ptr, req->count_crtcs * sizeof (__vki_u32));
+
+ POST_FIELD_WRITE(req->count_encoders);
+ if (req->count_encoders && req->encoder_id_ptr)
+ POST_MEM_WRITE((Addr)req->encoder_id_ptr, req->count_encoders * sizeof (__vki_u32));
+
+ POST_FIELD_WRITE(req->count_connectors);
+ if (req->count_connectors && req->connector_id_ptr)
+ POST_MEM_WRITE((Addr)req->connector_id_ptr, req->count_connectors * sizeof (__vki_u32));
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_GETCRTC):
+ if (ARG3) {
+ struct vki_drm_mode_crtc *req = (struct vki_drm_mode_crtc *)ARG3;
+
+ POST_FIELD_WRITE(req->x);
+ POST_FIELD_WRITE(req->y);
+ POST_FIELD_WRITE(req->gamma_size);
+ POST_FIELD_WRITE(req->fb_id);
+ POST_FIELD_WRITE(req->mode_valid);
+
+ if (req->mode_valid) {
+ POST_FIELD_WRITE(req->mode.clock);
+ POST_FIELD_WRITE(req->mode.hdisplay);
+ POST_FIELD_WRITE(req->mode.clock);
+ POST_FIELD_WRITE(req->mode.hsync_end);
+ POST_FIELD_WRITE(req->mode.htotal);
+ POST_FIELD_WRITE(req->mode.hskew);
+ POST_FIELD_WRITE(req->mode.vdisplay);
+ POST_FIELD_WRITE(req->mode.vsync_start);
+ POST_FIELD_WRITE(req->mode.vsync_end);
+ POST_FIELD_WRITE(req->mode.vtotal);
+ POST_FIELD_WRITE(req->mode.vscan);
+ POST_FIELD_WRITE(req->mode.vrefresh);
+ POST_FIELD_WRITE(req->mode.flags);
+ POST_FIELD_WRITE(req->mode.type);
+ POST_FIELD_WRITE(req->mode.name);
+ }
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_SETCRTC):
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_CURSOR):
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_GETGAMMA):
+ if (ARG3) {
+ struct vki_drm_mode_crtc_lut *req = (struct vki_drm_mode_crtc_lut *)ARG3;
+
+ POST_MEM_WRITE((Addr)req->red, req->gamma_size * sizeof(__vki_u16));
+ POST_MEM_WRITE((Addr)req->green, req->gamma_size * sizeof(__vki_u16));
+ POST_MEM_WRITE((Addr)req->blue, req->gamma_size * sizeof(__vki_u16));
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_SETGAMMA):
+ break;
+ case VKI_DRM_IOCTL_MODE_GETENCODER:
+ if (ARG3) {
+ struct vki_drm_mode_get_encoder *req = (struct vki_drm_mode_get_encoder *)ARG3;
+
+ POST_FIELD_WRITE(req->crtc_id);
+ POST_FIELD_WRITE(req->encoder_type);
+ POST_FIELD_WRITE(req->encoder_id);
+ POST_FIELD_WRITE(req->possible_clones);
+ POST_FIELD_WRITE(req->possible_crtcs);
+ }
+ break;
+ case VKI_DRM_IOCTL_MODE_GETCONNECTOR:
+ if (ARG3) {
+ struct vki_drm_mode_get_connector *req = (struct vki_drm_mode_get_connector *)ARG3;
+
+ POST_FIELD_WRITE(req->connector_id);
+ POST_FIELD_WRITE(req->connector_type);
+ POST_FIELD_WRITE(req->connector_type_id);
+ POST_FIELD_WRITE(req->mm_width);
+ POST_FIELD_WRITE(req->mm_height);
+ POST_FIELD_WRITE(req->subpixel);
+ POST_FIELD_WRITE(req->connection);
+ POST_FIELD_WRITE(req->encoder_id);
+ POST_FIELD_WRITE(req->count_modes);
+ POST_FIELD_WRITE(req->count_props);
+ POST_FIELD_WRITE(req->count_encoders);
+
+ // see comment near VKI_DRM_IOCTL_MODE_GETRESOURCES
+ if (req->count_modes > 0 && req->modes_ptr)
+ POST_MEM_WRITE((Addr)req->modes_ptr, req->count_modes * sizeof(struct vki_drm_mode_modeinfo));
+
+ if (req->count_props > 0) {
+ if (req->props_ptr)
+ POST_MEM_WRITE((Addr)req->props_ptr, req->count_props * sizeof(__vki_u32));
+ if (req->prop_values_ptr)
+ POST_MEM_WRITE((Addr)req->prop_values_ptr, req->count_props * sizeof(__vki_u64));
+ }
+
+ if (req->count_encoders > 0 && req->encoders_ptr)
+ POST_MEM_WRITE((Addr)req->encoders_ptr, req->count_encoders * sizeof(__vki_u32));
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_ATTACHMODE):
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_DETACHMODE):
+ break;
+ case VKI_DRM_IOCTL_MODE_GETPROPERTY:
+ if (ARG3) {
+ struct vki_drm_mode_get_property *req = (struct vki_drm_mode_get_property *)ARG3;
+
+ POST_FIELD_WRITE(req->name);
+ POST_FIELD_WRITE(req->flags);
+ POST_FIELD_WRITE(req->count_values);
+
+ // this API is misdesigned, you can't tell exactly how many bytes
+ // it written without knowing the type of property
+ // let's assume maximum possible
+
+ // additionally see comment near VKI_DRM_IOCTL_MODE_GETRESOURCES
+
+ if ((req->count_values || req->count_enum_blobs) && req->values_ptr)
+ POST_MEM_WRITE(req->values_ptr,
+ max(req->count_values * sizeof(__vki_u64),
+ req->count_enum_blobs * sizeof(__vki_u32)));
+
+ POST_FIELD_WRITE(req->count_enum_blobs);
+
+ if (req->count_enum_blobs && req->enum_blob_ptr) {
+ POST_MEM_WRITE((Addr)req->enum_blob_ptr,
+ req->count_enum_blobs *
+ max(sizeof(struct vki_drm_mode_property_enum), sizeof(__vki_u32)));
+ }
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_SETPROPERTY):
+ break;
+ case VKI_DRM_IOCTL_MODE_GETPROPBLOB:
+ if (ARG3) {
+ struct vki_drm_mode_get_blob *req = (struct vki_drm_mode_get_blob *)ARG3;
+
+ POST_MEM_WRITE((Addr)req->data, req->length);
+ }
+ break;
+ case VKI_DRM_IOCTL_MODE_GETFB:
+ if (ARG3) {
+ struct vki_drm_mode_fb_cmd *req = (struct vki_drm_mode_fb_cmd *)ARG3;
+
+ POST_FIELD_WRITE(req->height);
+ POST_FIELD_WRITE(req->width);
+ POST_FIELD_WRITE(req->depth);
+ POST_FIELD_WRITE(req->bpp);
+ POST_FIELD_WRITE(req->pitch);
+ POST_FIELD_WRITE(req->handle);
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_ADDFB):
+ if (ARG3) {
+ struct vki_drm_mode_fb_cmd *req = (struct vki_drm_mode_fb_cmd *)ARG3;
+
+ POST_FIELD_WRITE(req->fb_id);
+ }
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_RMFB):
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_PAGE_FLIP):
+ break;
+ VKI_DRM_IOCTL_DOUBLE(VKI_DRM_IOCTL_MODE_DIRTYFB):
+ break;
+ case VKI_DRM_IOCTL_MODE_CREATE_DUMB:
+ if (ARG3) {
+ struct vki_drm_mode_create_dumb *req = (struct vki_drm_mode_create_dumb *)ARG3;
+
+ POST_FIELD_WRITE(req->pitch);
+ POST_FIELD_WRITE(req->size);
+ POST_FIELD_WRITE(req->handle);
+ }
+ break;
+ case VKI_DRM_IOCTL_MODE_MAP_DUMB:
+ if (ARG3) {
+ struct vki_drm_mode_map_dumb *req = (struct vki_drm_mode_map_dumb *)ARG3;
+
+ POST_FIELD_WRITE(req->offset);
+ }
+ break;
+ case VKI_DRM_IOCTL_MODE_DESTROY_DUMB:
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_GETPARAM:
+ if (ARG3) {
+ struct vki_drm_nouveau_getparam *req = (struct vki_drm_nouveau_getparam *)ARG3;
+
+ POST_FIELD_WRITE(req->value);
+ }
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_SETPARAM:
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC:
+ if (ARG3) {
+ struct vki_drm_nouveau_channel_alloc *req = (struct vki_drm_nouveau_channel_alloc *)ARG3;
+ int i;
+
+ POST_FIELD_WRITE(req->channel);
+ POST_FIELD_WRITE(req->pushbuf_domains);
+ POST_FIELD_WRITE(req->nr_subchan);
+ for (i = 0; i < req->nr_subchan; ++i) {
+ POST_FIELD_WRITE(req->subchan[i].handle);
+ POST_FIELD_WRITE(req->subchan[i].grclass);
+ }
+ POST_FIELD_WRITE(req->notifier_handle);
+ }
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_CHANNEL_FREE:
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_GROBJ_ALLOC:
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC:
+ if (ARG3) {
+ struct vki_drm_nouveau_notifierobj_alloc *req = (struct vki_drm_nouveau_notifierobj_alloc *)ARG3;
+
+ POST_FIELD_WRITE(req->offset);
+ }
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_GPUOBJ_FREE:
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_GEM_NEW:
+ if (ARG3) {
+ struct vki_drm_nouveau_gem_new *req = (struct vki_drm_nouveau_gem_new *)ARG3;
+
+ POST_FIELD_WRITE(req->info.domain);
+ POST_FIELD_WRITE(req->info.size);
+ POST_FIELD_WRITE(req->info.offset);
+ POST_FIELD_WRITE(req->info.map_handle);
+ POST_FIELD_WRITE(req->info.tile_mode);
+ POST_FIELD_WRITE(req->info.tile_flags);
+ POST_FIELD_WRITE(req->info.handle);
+ }
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_GEM_PUSHBUF:
+ if (ARG3) {
+ struct vki_drm_nouveau_gem_pushbuf *req = (struct vki_drm_nouveau_gem_pushbuf *)ARG3;
+
+ POST_FIELD_WRITE(req->vram_available);
+ POST_FIELD_WRITE(req->gart_available);
+ POST_FIELD_WRITE(req->suffix0);
+ POST_FIELD_WRITE(req->suffix1);
+ }
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_GEM_CPU_PREP:
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_GEM_CPU_FINI:
+ break;
+ case VKI_DRM_IOCTL_NOUVEAU_GEM_INFO:
+ if (ARG3) {
+ struct vki_drm_nouveau_gem_new *req = (struct vki_drm_nouveau_gem_new *)ARG3;
+
+ POST_FIELD_WRITE(req->info.domain);
+ POST_FIELD_WRITE(req->info.size);
+ POST_FIELD_WRITE(req->info.offset);
+ POST_FIELD_WRITE(req->info.map_handle);
+ POST_FIELD_WRITE(req->info.tile_mode);
+ POST_FIELD_WRITE(req->info.tile_flags);
+ }
+ break;
default:
/* EVIOC* are variable length and return size written on success */
diff --git a/include/vki/vki-linux.h b/include/vki/vki-linux.h
index a7861a0b..b2fb895e 100644
--- a/include/vki/vki-linux.h
+++ b/include/vki/vki-linux.h
@@ -2761,6 +2761,585 @@ struct vki_getcpu_cache {
#define VKI_FIOQSIZE 0x5460 /* Value differs on some platforms */
#endif
+// drm
+
+// drm_mode.h
+#define VKI_DRM_DISPLAY_MODE_LEN 32
+#define VKI_DRM_PROP_NAME_LEN 32
+
+struct vki_drm_mode_modeinfo {
+ __vki_u32 clock;
+ __vki_u16 hdisplay, hsync_start, hsync_end, htotal, hskew;
+ __vki_u16 vdisplay, vsync_start, vsync_end, vtotal, vscan;
+
+ __vki_u32 vrefresh;
+
+ __vki_u32 flags;
+ __vki_u32 type;
+ char name[VKI_DRM_DISPLAY_MODE_LEN];
+};
+
+struct vki_drm_mode_card_res {
+ __vki_u64 fb_id_ptr;
+ __vki_u64 crtc_id_ptr;
+ __vki_u64 connector_id_ptr;
+ __vki_u64 encoder_id_ptr;
+ __vki_u32 count_fbs;
+ __vki_u32 count_crtcs;
+ __vki_u32 count_connectors;
+ __vki_u32 count_encoders;
+ __vki_u32 min_width, max_width;
+ __vki_u32 min_height, max_height;
+};
+
+struct vki_drm_mode_crtc {
+ __vki_u64 set_connectors_ptr;
+ __vki_u32 count_connectors;
+
+ __vki_u32 crtc_id; /**< Id */
+ __vki_u32 fb_id; /**< Id of framebuffer */
+
+ __vki_u32 x, y; /**< Position on the frameuffer */
+
+ __vki_u32 gamma_size;
+ __vki_u32 mode_valid;
+ struct vki_drm_mode_modeinfo mode;
+};
+
+struct vki_drm_mode_get_encoder {
+ __vki_u32 encoder_id;
+ __vki_u32 encoder_type;
+
+ __vki_u32 crtc_id; /**< Id of crtc */
+
+ __vki_u32 possible_crtcs;
+ __vki_u32 possible_clones;
+};
+
+struct vki_drm_mode_get_connector {
+ __vki_u64 encoders_ptr;
+ __vki_u64 modes_ptr;
+ __vki_u64 props_ptr;
+ __vki_u64 prop_values_ptr;
+
+ __vki_u32 count_modes;
+ __vki_u32 count_props;
+ __vki_u32 count_encoders;
+
+ __vki_u32 encoder_id; /**< Current Encoder */
+ __vki_u32 connector_id; /**< Id */
+ __vki_u32 connector_type;
+ __vki_u32 connector_type_id;
+
+ __vki_u32 connection;
+ __vki_u32 mm_width, mm_height; /**< HxW in millimeters */
+ __vki_u32 subpixel;
+};
+
+struct vki_drm_mode_property_enum {
+ __vki_u64 value;
+ char name[VKI_DRM_PROP_NAME_LEN];
+};
+
+struct vki_drm_mode_get_property {
+ __vki_u64 values_ptr; /* values and blob lengths */
+ __vki_u64 enum_blob_ptr; /* enum and blob id ptrs */
+
+ __vki_u32 prop_id;
+ __vki_u32 flags;
+ char name[VKI_DRM_PROP_NAME_LEN];
+
+ __vki_u32 count_values;
+ __vki_u32 count_enum_blobs;
+};
+
+struct vki_drm_mode_connector_set_property {
+ __vki_u64 value;
+ __vki_u32 prop_id;
+ __vki_u32 connector_id;
+};
+
+struct vki_drm_mode_get_blob {
+ __vki_u32 blob_id;
+ __vki_u32 length;
+ __vki_u64 data;
+};
+
+struct vki_drm_mode_fb_cmd {
+ __vki_u32 fb_id;
+ __vki_u32 width, height;
+ __vki_u32 pitch;
+ __vki_u32 bpp;
+ __vki_u32 depth;
+ /* driver specific handle */
+ __vki_u32 handle;
+};
+
+struct vki_drm_mode_fb_dirty_cmd {
+ __vki_u32 fb_id;
+ __vki_u32 flags;
+ __vki_u32 color;
+ __vki_u32 num_clips;
+ __vki_u64 clips_ptr;
+};
+
+struct vki_drm_mode_mode_cmd {
+ __vki_u32 connector_id;
+ struct vki_drm_mode_modeinfo mode;
+};
+
+#define VKI_DRM_MODE_CURSOR_BO (1<<0)
+#define VKI_DRM_MODE_CURSOR_MOVE (1<<1)
+
+struct vki_drm_mode_cursor {
+ __vki_u32 flags;
+ __vki_u32 crtc_id;
+ __vki_s32 x;
+ __vki_s32 y;
+ __vki_u32 width;
+ __vki_u32 height;
+ /* driver specific handle */
+ __vki_u32 handle;
+};
+
+struct vki_drm_mode_crtc_lut {
+ __vki_u32 crtc_id;
+ __vki_u32 gamma_size;
+
+ /* pointers to arrays */
+ __vki_u64 red;
+ __vki_u64 green;
+ __vki_u64 blue;
+};
+
+struct vki_drm_mode_crtc_page_flip {
+ __vki_u32 crtc_id;
+ __vki_u32 fb_id;
+ __vki_u32 flags;
+ __vki_u32 reserved;
+ __vki_u64 user_data;
+};
+
+/* create a dumb scanout buffer */
+struct vki_drm_mode_create_dumb {
+ __vki_u32 height;
+ __vki_u32 width;
+ __vki_u32 bpp;
+ __vki_u32 flags;
+ /* handle, pitch, size will be returned */
+ __vki_u32 handle;
+ __vki_u32 pitch;
+ __vki_u64 size;
+};
+
+/* set up for mmap of a dumb scanout buffer */
+struct vki_drm_mode_map_dumb {
+ /** Handle for the object being mapped. */
+ __vki_u32 handle;
+ __vki_u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __vki_u64 offset;
+};
+
+struct vki_drm_mode_destroy_dumb {
+ __vki_u32 handle;
+};
+
+
+// drm.h
+
+typedef unsigned int vki_drm_drawable_t;
+typedef unsigned int vki_drm_magic_t;
+
+struct vki_drm_clip_rect {
+ unsigned short x1;
+ unsigned short y1;
+ unsigned short x2;
+ unsigned short y2;
+};
+
+struct vki_drm_version {
+ int version_major; /**< Major version */
+ int version_minor; /**< Minor version */
+ int version_patchlevel; /**< Patch level */
+ vki_size_t name_len; /**< Length of name buffer */
+ char *name; /**< Name of driver */
+ vki_size_t date_len; /**< Length of date buffer */
+ char *date; /**< User-space buffer to hold date */
+ vki_size_t desc_len; /**< Length of desc buffer */
+ char *desc; /**< User-space buffer to hold desc */
+};
+
+struct vki_drm_unique {
+ vki_size_t unique_len; /**< Length of unique */
+ char *unique; /**< Unique name for driver instantiation */
+};
+
+struct vki_drm_update_draw {
+ vki_drm_drawable_t handle;
+ unsigned int type;
+ unsigned int num;
+ unsigned long long data;
+};
+
+struct vki_drm_auth {
+ vki_drm_magic_t magic;
+};
+
+struct vki_drm_irq_busid {
+ int irq; /**< IRQ number */
+ int busnum; /**< bus number */
+ int devnum; /**< device number */
+ int funcnum; /**< function number */
+};
+
+/** DRM_IOCTL_GEM_CLOSE ioctl argument type */
+struct vki_drm_gem_close {
+ /** Handle of the object to be closed. */
+ __vki_u32 handle;
+ __vki_u32 pad;
+};
+
+/** DRM_IOCTL_GEM_FLINK ioctl argument type */
+struct vki_drm_gem_flink {
+ /** Handle for the object being named */
+ __vki_u32 handle;
+
+ /** Returned global name */
+ __vki_u32 name;
+};
+
+/** DRM_IOCTL_GEM_OPEN ioctl argument type */
+struct vki_drm_gem_open {
+ /** Name of object being opened */
+ __vki_u32 name;
+
+ /** Returned handle for the object */
+ __vki_u32 handle;
+
+ /** Returned size of the object */
+ __vki_u64 size;
+};
+
+// xf86drm.h
+#define VKI_DRM_IOCTL_NR(n) _VKI_IOC_NR(n)
+#define VKI_DRM_IOC_VOID _VKI_IOC_NONE
+#define VKI_DRM_IOC_READ _VKI_IOC_READ
+#define VKI_DRM_IOC_WRITE _VKI_IOC_WRITE
+#define VKI_DRM_IOC_READWRITE _VKI_IOC_READ|_IOC_WRITE
+#define VKI_DRM_IOC(dir, group, nr, size) _VKI_IOC(dir, group, nr, size)
+
+enum vki_drm_vblank_seq_type {
+ VKI_DRM_VBLANK_ABSOLUTE = 0x0, /**< Wait for specific vblank sequence number */
+ VKI_DRM_VBLANK_RELATIVE = 0x1, /**< Wait for given number of vblanks */
+ VKI_DRM_VBLANK_EVENT = 0x4000000, /**< Send event instead of blocking */
+ VKI_DRM_VBLANK_FLIP = 0x8000000, /**< Scheduled buffer swap should flip */
+ VKI_DRM_VBLANK_NEXTONMISS = 0x10000000, /**< If missed, wait for next vblank */
+ VKI_DRM_VBLANK_SECONDARY = 0x20000000, /**< Secondary display controller */
+ VKI_DRM_VBLANK_SIGNAL = 0x40000000 /**< Send signal instead of blocking, unsupported */
+};
+
+struct vki_drm_wait_vblank_request {
+ enum vki_drm_vblank_seq_type type;
+ unsigned int sequence;
+ unsigned long signal;
+};
+
+struct vki_drm_wait_vblank_reply {
+ enum vki_drm_vblank_seq_type type;
+ unsigned int sequence;
+ long tval_sec;
+ long tval_usec;
+};
+
+/**
+ * DRM_IOCTL_WAIT_VBLANK ioctl argument type.
+ *
+ * \sa drmWaitVBlank().
+ */
+union vki_drm_wait_vblank {
+ struct vki_drm_wait_vblank_request request;
+ struct vki_drm_wait_vblank_reply reply;
+};
+
+//drm.h
+typedef unsigned int vki_drm_context_t;
+enum vki_drm_ctx_flags {
+ VKI_DRM_CONTEXT_PRESERVED = 0x01,
+ VKI_DRM_CONTEXT_2DONLY = 0x02
+};
+struct vki_drm_ctx {
+ vki_drm_context_t handle;
+ enum vki_drm_ctx_flags flags;
+};
+
+struct vki_drm_ctx_res {
+ int count;
+ struct vki_drm_ctx *contexts;
+};
+
+/* because of bug in libdrm, some of the ioctls were passed as int, so when
+ * they were casted to ulong most significant bit was extended to higher word;
+ * kernel drm sees only lower part, so nobody noticed */
+#if VG_WORDSIZE == 8
+#define VKI_DRM_IOCTL_DOUBLE(X) case (unsigned long)(int)(X): case X
+#else
+#define VKI_DRM_IOCTL_DOUBLE(X) case X
+#endif
+
+#define VKI_DRM_IOCTL_BASE 'd'
+#define VKI_DRM_IO(nr) _VKI_IO(VKI_DRM_IOCTL_BASE,nr)
+#define VKI_DRM_IOR(nr,type) _VKI_IOR(VKI_DRM_IOCTL_BASE,nr,type)
+#define VKI_DRM_IOW(nr,type) _VKI_IOW(VKI_DRM_IOCTL_BASE,nr,type)
+#define VKI_DRM_IOWR(nr,type) _VKI_IOWR(VKI_DRM_IOCTL_BASE,nr,type)
+
+#define VKI_DRM_IOCTL_VERSION VKI_DRM_IOWR(0x00, struct vki_drm_version)
+#define VKI_DRM_IOCTL_GET_UNIQUE VKI_DRM_IOWR(0x01, struct vki_drm_unique)
+#define VKI_DRM_IOCTL_GET_MAGIC VKI_DRM_IOR( 0x02, struct vki_drm_auth)
+#define VKI_DRM_IOCTL_IRQ_BUSID VKI_DRM_IOWR(0x03, struct vki_drm_irq_busid)
+//#define VKI_DRM_IOCTL_GET_MAP VKI_DRM_IOWR(0x04, struct vki_drm_map)
+//#define VKI_DRM_IOCTL_GET_CLIENT VKI_DRM_IOWR(0x05, struct vki_drm_client)
+//#define VKI_DRM_IOCTL_GET_STATS VKI_DRM_IOR( 0x06, struct vki_drm_stats)
+//#define VKI_DRM_IOCTL_SET_VERSION VKI_DRM_IOWR(0x07, struct vki_drm_set_version)
+//#define VKI_DRM_IOCTL_MODESET_CTL VKI_DRM_IOW(0x08, struct vki_drm_modeset_ctl)
+#define VKI_DRM_IOCTL_GEM_CLOSE VKI_DRM_IOW (0x09, struct vki_drm_gem_close)
+#define VKI_DRM_IOCTL_GEM_FLINK VKI_DRM_IOWR(0x0a, struct vki_drm_gem_flink)
+#define VKI_DRM_IOCTL_GEM_OPEN VKI_DRM_IOWR(0x0b, struct vki_drm_gem_open)
+//#define VKI_DRM_IOCTL_GET_CAP VKI_DRM_IOWR(0x0c, struct vki_drm_get_cap)
+
+//#define VKI_DRM_IOCTL_SET_UNIQUE VKI_DRM_IOW( 0x10, struct vki_drm_unique)
+//#define VKI_DRM_IOCTL_AUTH_MAGIC VKI_DRM_IOW( 0x11, struct vki_drm_auth)
+//#define VKI_DRM_IOCTL_BLOCK VKI_DRM_IOWR(0x12, struct vki_drm_block)
+//#define VKI_DRM_IOCTL_UNBLOCK VKI_DRM_IOWR(0x13, struct vki_drm_block)
+//#define VKI_DRM_IOCTL_CONTROL VKI_DRM_IOW( 0x14, struct vki_drm_control)
+//#define VKI_DRM_IOCTL_ADD_MAP VKI_DRM_IOWR(0x15, struct vki_drm_map)
+//#define VKI_DRM_IOCTL_ADD_BUFS VKI_DRM_IOWR(0x16, struct vki_drm_buf_desc)
+//#define VKI_DRM_IOCTL_MARK_BUFS VKI_DRM_IOW( 0x17, struct vki_drm_buf_desc)
+//#define VKI_DRM_IOCTL_INFO_BUFS VKI_DRM_IOWR(0x18, struct vki_drm_buf_info)
+//#define VKI_DRM_IOCTL_MAP_BUFS VKI_DRM_IOWR(0x19, struct vki_drm_buf_map)
+//#define VKI_DRM_IOCTL_FREE_BUFS VKI_DRM_IOW( 0x1a, struct vki_drm_buf_free)
+
+//#define VKI_DRM_IOCTL_RM_MAP VKI_DRM_IOW( 0x1b, struct vki_drm_map)
+
+//#define VKI_DRM_IOCTL_SET_SAREA_CTX VKI_DRM_IOW( 0x1c, struct vki_drm_ctx_priv_map)
+//#define VKI_DRM_IOCTL_GET_SAREA_CTX VKI_DRM_IOWR(0x1d, struct vki_drm_ctx_priv_map)
+
+#define VKI_DRM_IOCTL_SET_MASTER VKI_DRM_IO(0x1e)
+#define VKI_DRM_IOCTL_DROP_MASTER VKI_DRM_IO(0x1f)
+
+#define VKI_DRM_IOCTL_ADD_CTX VKI_DRM_IOWR(0x20, struct vki_drm_ctx)
+#define VKI_DRM_IOCTL_RM_CTX VKI_DRM_IOWR(0x21, struct vki_drm_ctx)
+#define VKI_DRM_IOCTL_MOD_CTX VKI_DRM_IOW( 0x22, struct vki_drm_ctx)
+#define VKI_DRM_IOCTL_GET_CTX VKI_DRM_IOWR(0x23, struct vki_drm_ctx)
+#define VKI_DRM_IOCTL_SWITCH_CTX VKI_DRM_IOW( 0x24, struct vki_drm_ctx)
+#define VKI_DRM_IOCTL_NEW_CTX VKI_DRM_IOW( 0x25, struct vki_drm_ctx)
+#define VKI_DRM_IOCTL_RES_CTX VKI_DRM_IOWR(0x26, struct vki_drm_ctx_res)
+//#define VKI_DRM_IOCTL_ADD_DRAW VKI_DRM_IOWR(0x27, struct vki_drm_draw)
+//#define VKI_DRM_IOCTL_RM_DRAW VKI_DRM_IOWR(0x28, struct vki_drm_draw)
+//#define VKI_DRM_IOCTL_DMA VKI_DRM_IOWR(0x29, struct vki_drm_dma)
+//#define VKI_DRM_IOCTL_LOCK VKI_DRM_IOW( 0x2a, struct vki_drm_lock)
+//#define VKI_DRM_IOCTL_UNLOCK VKI_DRM_IOW( 0x2b, struct vki_drm_lock)
+//#define VKI_DRM_IOCTL_FINISH VKI_DRM_IOW( 0x2c, struct vki_drm_lock)
+
+//#define VKI_DRM_IOCTL_AGP_ACQUIRE VKI_DRM_IO( 0x30)
+//#define VKI_DRM_IOCTL_AGP_RELEASE VKI_DRM_IO( 0x31)
+//#define VKI_DRM_IOCTL_AGP_ENABLE VKI_DRM_IOW( 0x32, struct vki_drm_agp_mode)
+//#define VKI_DRM_IOCTL_AGP_INFO VKI_DRM_IOR( 0x33, struct vki_drm_agp_info)
+//#define VKI_DRM_IOCTL_AGP_ALLOC VKI_DRM_IOWR(0x34, struct vki_drm_agp_buffer)
+//#define VKI_DRM_IOCTL_AGP_FREE VKI_DRM_IOW( 0x35, struct vki_drm_agp_buffer)
+//#define VKI_DRM_IOCTL_AGP_BIND VKI_DRM_IOW( 0x36, struct vki_drm_agp_binding)
+//#define VKI_DRM_IOCTL_AGP_UNBIND VKI_DRM_IOW( 0x37, struct vki_drm_agp_binding)
+
+//#define VKI_DRM_IOCTL_SG_ALLOC VKI_DRM_IOWR(0x38, struct vki_drm_scatter_gather)
+//#define VKI_DRM_IOCTL_SG_FREE VKI_DRM_IOW( 0x39, struct vki_drm_scatter_gather)
+
+#define VKI_DRM_IOCTL_WAIT_VBLANK VKI_DRM_IOWR(0x3a, union vki_drm_wait_vblank)
+
+#define VKI_DRM_IOCTL_UPDATE_DRAW VKI_DRM_IOW(0x3f, struct vki_drm_update_draw)
+
+#define VKI_DRM_IOCTL_MODE_GETRESOURCES VKI_DRM_IOWR(0xA0, struct vki_drm_mode_card_res)
+#define VKI_DRM_IOCTL_MODE_GETCRTC VKI_DRM_IOWR(0xA1, struct vki_drm_mode_crtc)
+#define VKI_DRM_IOCTL_MODE_SETCRTC VKI_DRM_IOWR(0xA2, struct vki_drm_mode_crtc)
+#define VKI_DRM_IOCTL_MODE_CURSOR VKI_DRM_IOWR(0xA3, struct vki_drm_mode_cursor)
+#define VKI_DRM_IOCTL_MODE_GETGAMMA VKI_DRM_IOWR(0xA4, struct vki_drm_mode_crtc_lut)
+#define VKI_DRM_IOCTL_MODE_SETGAMMA VKI_DRM_IOWR(0xA5, struct vki_drm_mode_crtc_lut)
+#define VKI_DRM_IOCTL_MODE_GETENCODER VKI_DRM_IOWR(0xA6, struct vki_drm_mode_get_encoder)
+#define VKI_DRM_IOCTL_MODE_GETCONNECTOR VKI_DRM_IOWR(0xA7, struct vki_drm_mode_get_connector)
+#define VKI_DRM_IOCTL_MODE_ATTACHMODE VKI_DRM_IOWR(0xA8, struct vki_drm_mode_mode_cmd)
+#define VKI_DRM_IOCTL_MODE_DETACHMODE VKI_DRM_IOWR(0xA9, struct vki_drm_mode_mode_cmd)
+
+#define VKI_DRM_IOCTL_MODE_GETPROPERTY VKI_DRM_IOWR(0xAA, struct vki_drm_mode_get_property)
+#define VKI_DRM_IOCTL_MODE_SETPROPERTY VKI_DRM_IOWR(0xAB, struct vki_drm_mode_connector_set_property)
+#define VKI_DRM_IOCTL_MODE_GETPROPBLOB VKI_DRM_IOWR(0xAC, struct vki_drm_mode_get_blob)
+#define VKI_DRM_IOCTL_MODE_GETFB VKI_DRM_IOWR(0xAD, struct vki_drm_mode_fb_cmd)
+#define VKI_DRM_IOCTL_MODE_ADDFB VKI_DRM_IOWR(0xAE, struct vki_drm_mode_fb_cmd)
+#define VKI_DRM_IOCTL_MODE_RMFB VKI_DRM_IOWR(0xAF, unsigned int)
+#define VKI_DRM_IOCTL_MODE_PAGE_FLIP VKI_DRM_IOWR(0xB0, struct vki_drm_mode_crtc_page_flip)
+#define VKI_DRM_IOCTL_MODE_DIRTYFB VKI_DRM_IOWR(0xB1, struct vki_drm_mode_fb_dirty_cmd)
+
+#define VKI_DRM_IOCTL_MODE_CREATE_DUMB VKI_DRM_IOWR(0xB2, struct vki_drm_mode_create_dumb)
+#define VKI_DRM_IOCTL_MODE_MAP_DUMB VKI_DRM_IOWR(0xB3, struct vki_drm_mode_map_dumb)
+#define VKI_DRM_IOCTL_MODE_DESTROY_DUMB VKI_DRM_IOWR(0xB4, struct vki_drm_mode_destroy_dumb)
+
+#define VKI_DRM_COMMAND_BASE 0x40
+//#define VKI_DRM_COMMAND_END 0xA0
+
+// xf86drm.c: drmCommandWriteRead
+#define VKI_DRM_COMMAND(dir, index, size) \
+ VKI_DRM_IOC(dir, VKI_DRM_IOCTL_BASE, VKI_DRM_COMMAND_BASE + index, size)
+
+#define VKI_DRM_COMMAND_RW(index, size) \
+ VKI_DRM_COMMAND(VKI_DRM_IOC_READ | VKI_DRM_IOC_WRITE, index, size)
+
+#define VKI_DRM_COMMAND_W(index, size) \
+ VKI_DRM_COMMAND(VKI_DRM_IOC_WRITE, index, size)
+
+#define VKI_DRM_COMMAND_R(index, size) \
+ VKI_DRM_COMMAND(VKI_DRM_IOC_READ, index, size)
+
+// nouveau_drm.h
+#define VKI_DRM_NOUVEAU_GETPARAM 0x00
+#define VKI_DRM_NOUVEAU_SETPARAM 0x01
+#define VKI_DRM_NOUVEAU_CHANNEL_ALLOC 0x02
+#define VKI_DRM_NOUVEAU_CHANNEL_FREE 0x03
+#define VKI_DRM_NOUVEAU_GROBJ_ALLOC 0x04
+#define VKI_DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05
+#define VKI_DRM_NOUVEAU_GPUOBJ_FREE 0x06
+#define VKI_DRM_NOUVEAU_GEM_NEW 0x40
+#define VKI_DRM_NOUVEAU_GEM_PUSHBUF 0x41
+#define VKI_DRM_NOUVEAU_GEM_CPU_PREP 0x42
+#define VKI_DRM_NOUVEAU_GEM_CPU_FINI 0x43
+#define VKI_DRM_NOUVEAU_GEM_INFO 0x44
+
+struct vki_drm_nouveau_channel_alloc {
+ __vki_u32 fb_ctxdma_handle;
+ __vki_u32 tt_ctxdma_handle;
+
+ int channel;
+ __vki_u32 pushbuf_domains;
+
+ /* Notifier memory */
+ __vki_u32 notifier_handle;
+
+ /* DRM-enforced subchannel assignments */
+ struct {
+ __vki_u32 handle;
+ __vki_u32 grclass;
+ } subchan[8];
+ __vki_u32 nr_subchan;
+};
+
+struct vki_drm_nouveau_channel_free {
+ int channel;
+};
+
+struct vki_drm_nouveau_grobj_alloc {
+ int channel;
+ __vki_u32 handle;
+ int class;
+};
+
+struct vki_drm_nouveau_notifierobj_alloc {
+ __vki_u32 channel;
+ __vki_u32 handle;
+ __vki_u32 size;
+ __vki_u32 offset;
+};
+
+struct vki_drm_nouveau_gpuobj_free {
+ int channel;
+ __vki_u32 handle;
+};
+
+struct vki_drm_nouveau_getparam {
+ __vki_u64 param;
+ __vki_u64 value;
+};
+
+struct vki_drm_nouveau_setparam {
+ __vki_u64 param;
+ __vki_u64 value;
+};
+
+struct vki_drm_nouveau_gem_info {
+ __vki_u32 handle;
+ __vki_u32 domain;
+ __vki_u64 size;
+ __vki_u64 offset;
+ __vki_u64 map_handle;
+ __vki_u32 tile_mode;
+ __vki_u32 tile_flags;
+};
+
+struct vki_drm_nouveau_gem_new {
+ struct vki_drm_nouveau_gem_info info;
+ __vki_u32 channel_hint;
+ __vki_u32 align;
+};
+
+
+struct vki_drm_nouveau_gem_pushbuf_bo_presumed {
+ __vki_u32 valid;
+ __vki_u32 domain;
+ __vki_u64 offset;
+};
+
+struct vki_drm_nouveau_gem_pushbuf_bo {
+ __vki_u64 user_priv;
+ __vki_u32 handle;
+ __vki_u32 read_domains;
+ __vki_u32 write_domains;
+ __vki_u32 valid_domains;
+ struct vki_drm_nouveau_gem_pushbuf_bo_presumed presumed;
+};
+
+struct vki_drm_nouveau_gem_pushbuf_push {
+ __vki_u32 bo_index;
+ __vki_u32 pad;
+ __vki_u64 offset;
+ __vki_u64 length;
+};
+
+struct vki_drm_nouveau_gem_pushbuf {
+ __vki_u32 channel;
+ __vki_u32 nr_buffers;
+ __vki_u64 buffers;
+ __vki_u32 nr_relocs;
+ __vki_u32 nr_push;
+ __vki_u64 relocs;
+ __vki_u64 push;
+ __vki_u32 suffix0;
+ __vki_u32 suffix1;
+ __vki_u64 vram_available;
+ __vki_u64 gart_available;
+};
+
+struct vki_drm_nouveau_gem_cpu_prep {
+ __vki_u32 handle;
+ __vki_u32 flags;
+};
+
+struct vki_drm_nouveau_gem_cpu_fini {
+ __vki_u32 handle;
+};
+
+#define VKI_DRM_IOCTL_NOUVEAU_GETPARAM VKI_DRM_COMMAND_RW(VKI_DRM_NOUVEAU_GETPARAM, sizeof(struct vki_drm_nouveau_getparam))
+#define VKI_DRM_IOCTL_NOUVEAU_SETPARAM VKI_DRM_COMMAND_RW(VKI_DRM_NOUVEAU_SETPARAM, sizeof(struct vki_drm_nouveau_setparam))
+#define VKI_DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC VKI_DRM_COMMAND_RW(VKI_DRM_NOUVEAU_CHANNEL_ALLOC, sizeof(struct vki_drm_nouveau_channel_alloc))
+#define VKI_DRM_IOCTL_NOUVEAU_CHANNEL_FREE VKI_DRM_COMMAND_W (VKI_DRM_NOUVEAU_CHANNEL_FREE, sizeof(struct vki_drm_nouveau_channel_free))
+#define VKI_DRM_IOCTL_NOUVEAU_GROBJ_ALLOC VKI_DRM_COMMAND_W (VKI_DRM_NOUVEAU_GROBJ_ALLOC, sizeof(struct vki_drm_nouveau_grobj_alloc))
+#define VKI_DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC VKI_DRM_COMMAND_RW(VKI_DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, sizeof(struct vki_drm_nouveau_notifierobj_alloc))
+#define VKI_DRM_IOCTL_NOUVEAU_GPUOBJ_FREE VKI_DRM_COMMAND_W (VKI_DRM_NOUVEAU_GPUOBJ_FREE, sizeof(struct vki_drm_nouveau_gpuobj_free))
+#define VKI_DRM_IOCTL_NOUVEAU_GEM_NEW VKI_DRM_COMMAND_RW(VKI_DRM_NOUVEAU_GEM_NEW, sizeof(struct vki_drm_nouveau_gem_new))
+#define VKI_DRM_IOCTL_NOUVEAU_GEM_PUSHBUF VKI_DRM_COMMAND_RW(VKI_DRM_NOUVEAU_GEM_PUSHBUF, sizeof(struct vki_drm_nouveau_gem_pushbuf))
+#define VKI_DRM_IOCTL_NOUVEAU_GEM_CPU_PREP VKI_DRM_COMMAND_W (VKI_DRM_NOUVEAU_GEM_CPU_PREP, sizeof(struct vki_drm_nouveau_gem_cpu_prep))
+#define VKI_DRM_IOCTL_NOUVEAU_GEM_CPU_FINI VKI_DRM_COMMAND_W (VKI_DRM_NOUVEAU_GEM_CPU_FINI, sizeof(struct vki_drm_nouveau_gem_cpu_fini))
+#define VKI_DRM_IOCTL_NOUVEAU_GEM_INFO VKI_DRM_COMMAND_RW(VKI_DRM_NOUVEAU_GEM_INFO, sizeof(struct vki_drm_nouveau_gem_info))
+
#endif // __VKI_LINUX_H
/*--------------------------------------------------------------------*/