summaryrefslogtreecommitdiff
path: root/include/uapi
diff options
context:
space:
mode:
Diffstat (limited to 'include/uapi')
-rw-r--r--include/uapi/drm/drm.h72
-rw-r--r--include/uapi/drm/drm_fourcc.h10
-rw-r--r--include/uapi/drm/drm_mode.h45
-rw-r--r--include/uapi/drm/habanalabs_accel.h28
-rw-r--r--include/uapi/drm/ivpu_accel.h2
-rw-r--r--include/uapi/drm/msm_drm.h3
-rw-r--r--include/uapi/drm/pvr_drm.h1295
-rw-r--r--include/uapi/drm/qaic_accel.h5
-rw-r--r--include/uapi/drm/v3d_drm.h245
-rw-r--r--include/uapi/drm/virtgpu_drm.h2
-rw-r--r--include/uapi/drm/xe_drm.h1347
-rw-r--r--include/uapi/linux/sync_file.h22
12 files changed, 3057 insertions, 19 deletions
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index de723566c5ae..16122819edfe 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -713,7 +713,8 @@ struct drm_gem_open {
/**
* DRM_CAP_ASYNC_PAGE_FLIP
*
- * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC.
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for legacy
+ * page-flips.
*/
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
/**
@@ -773,6 +774,13 @@ struct drm_gem_open {
* :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
+/**
+ * DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP
+ *
+ * If set to 1, the driver supports &DRM_MODE_PAGE_FLIP_ASYNC for atomic
+ * commits.
+ */
+#define DRM_CAP_ATOMIC_ASYNC_PAGE_FLIP 0x15
/* DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
@@ -842,6 +850,31 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
+/**
+ * DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT
+ *
+ * Drivers for para-virtualized hardware (e.g. vmwgfx, qxl, virtio and
+ * virtualbox) have additional restrictions for cursor planes (thus
+ * making cursor planes on those drivers not truly universal,) e.g.
+ * they need cursor planes to act like one would expect from a mouse
+ * cursor and have correctly set hotspot properties.
+ * If this client cap is not set the DRM core will hide cursor plane on
+ * those virtualized drivers because not setting it implies that the
+ * client is not capable of dealing with those extra restictions.
+ * Clients which do set cursor hotspot and treat the cursor plane
+ * like a mouse cursor should set this property.
+ * The client must enable &DRM_CLIENT_CAP_ATOMIC first.
+ *
+ * Setting this property on drivers which do not special case
+ * cursor planes (i.e. non-virtualized drivers) will return
+ * EOPNOTSUPP, which can be used by userspace to gauge
+ * requirements of the hardware/drivers they're running on.
+ *
+ * This capability is always supported for atomic-capable virtualized
+ * drivers starting from kernel version 6.6.
+ */
+#define DRM_CLIENT_CAP_CURSOR_PLANE_HOTSPOT 6
+
/* DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
@@ -893,6 +926,7 @@ struct drm_syncobj_transfer {
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL (1 << 0)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT (1 << 1)
#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE (1 << 2) /* wait for time point to become available */
+#define DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE (1 << 3) /* set fence deadline to deadline_nsec */
struct drm_syncobj_wait {
__u64 handles;
/* absolute timeout */
@@ -901,6 +935,14 @@ struct drm_syncobj_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
+ /**
+ * @deadline_nsec - fence deadline hint
+ *
+ * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+ * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+ * set.
+ */
+ __u64 deadline_nsec;
};
struct drm_syncobj_timeline_wait {
@@ -913,6 +955,14 @@ struct drm_syncobj_timeline_wait {
__u32 flags;
__u32 first_signaled; /* only valid when not waiting all */
__u32 pad;
+ /**
+ * @deadline_nsec - fence deadline hint
+ *
+ * Deadline hint, in absolute CLOCK_MONOTONIC, to set on backing
+ * fence(s) if the DRM_SYNCOBJ_WAIT_FLAGS_WAIT_DEADLINE flag is
+ * set.
+ */
+ __u64 deadline_nsec;
};
/**
@@ -1218,6 +1268,26 @@ extern "C" {
#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
+/**
+ * DRM_IOCTL_MODE_CLOSEFB - Close a framebuffer.
+ *
+ * This closes a framebuffer previously added via ADDFB/ADDFB2. The IOCTL
+ * argument is a framebuffer object ID.
+ *
+ * This IOCTL is similar to &DRM_IOCTL_MODE_RMFB, except it doesn't disable
+ * planes and CRTCs. As long as the framebuffer is used by a plane, it's kept
+ * alive. When the plane no longer uses the framebuffer (because the
+ * framebuffer is replaced with another one, or the plane is disabled), the
+ * framebuffer is cleaned up.
+ *
+ * This is useful to implement flicker-free transitions between two processes.
+ *
+ * Depending on the threat model, user-space may want to ensure that the
+ * framebuffer doesn't expose any sensitive user information: closed
+ * framebuffers attached to a plane can be read back by the next DRM master.
+ */
+#define DRM_IOCTL_MODE_CLOSEFB DRM_IOWR(0xD0, struct drm_mode_closefb)
+
/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 3151f1fc7ebb..84d502e42961 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -54,7 +54,7 @@ extern "C" {
* Format modifiers may change any property of the buffer, including the number
* of planes and/or the required allocation size. Format modifiers are
* vendor-namespaced, and as such the relationship between a fourcc code and a
- * modifier is specific to the modifer being used. For example, some modifiers
+ * modifier is specific to the modifier being used. For example, some modifiers
* may preserve meaning - such as number of planes - from the fourcc code,
* whereas others may not.
*
@@ -79,7 +79,7 @@ extern "C" {
* format.
* - Higher-level programs interfacing with KMS/GBM/EGL/Vulkan/etc: these users
* see modifiers as opaque tokens they can check for equality and intersect.
- * These users musn't need to know to reason about the modifier value
+ * These users mustn't need to know to reason about the modifier value
* (i.e. they are not expected to extract information out of the modifier).
*
* Vendors should document their modifier usage in as much detail as
@@ -540,7 +540,7 @@ extern "C" {
* This is a tiled layout using 4Kb tiles in row-major layout.
* Within the tile pixels are laid out in 16 256 byte units / sub-tiles which
* are arranged in four groups (two wide, two high) with column-major layout.
- * Each group therefore consits out of four 256 byte units, which are also laid
+ * Each group therefore consists out of four 256 byte units, which are also laid
* out as 2x2 column-major.
* 256 byte units are made out of four 64 byte blocks of pixels, producing
* either a square block or a 2:1 unit.
@@ -1103,7 +1103,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
*/
/*
- * The top 4 bits (out of the 56 bits alloted for specifying vendor specific
+ * The top 4 bits (out of the 56 bits allotted for specifying vendor specific
* modifiers) denote the category for modifiers. Currently we have three
* categories of modifiers ie AFBC, MISC and AFRC. We can have a maximum of
* sixteen different categories.
@@ -1419,7 +1419,7 @@ drm_fourcc_canonicalize_nvidia_format_mod(__u64 modifier)
* Amlogic FBC Memory Saving mode
*
* Indicates the storage is packed when pixel size is multiple of word
- * boudaries, i.e. 8bit should be stored in this mode to save allocation
+ * boundaries, i.e. 8bit should be stored in this mode to save allocation
* memory.
*
* This mode reduces body layout to 3072 bytes per 64x32 superblock with
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 128d09138ceb..7040e7ea80c7 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -36,10 +36,10 @@ extern "C" {
/**
* DOC: overview
*
- * DRM exposes many UAPI and structure definition to have a consistent
- * and standardized interface with user.
+ * DRM exposes many UAPI and structure definitions to have a consistent
+ * and standardized interface with users.
* Userspace can refer to these structure definitions and UAPI formats
- * to communicate to driver
+ * to communicate to drivers.
*/
#define DRM_CONNECTOR_NAME_LEN 32
@@ -540,7 +540,7 @@ struct drm_mode_get_connector {
/* the PROP_ATOMIC flag is used to hide properties from userspace that
* is not aware of atomic properties. This is mostly to work around
* older userspace (DDX drivers) that read/write each prop they find,
- * witout being aware that this could be triggering a lengthy modeset.
+ * without being aware that this could be triggering a lengthy modeset.
*/
#define DRM_MODE_PROP_ATOMIC 0x80000000
@@ -664,7 +664,7 @@ struct drm_mode_fb_cmd {
};
#define DRM_MODE_FB_INTERLACED (1<<0) /* for interlaced framebuffers */
-#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifer[] */
+#define DRM_MODE_FB_MODIFIERS (1<<1) /* enables ->modifier[] */
/**
* struct drm_mode_fb_cmd2 - Frame-buffer metadata.
@@ -846,6 +846,14 @@ struct drm_color_ctm {
__u64 matrix[9];
};
+struct drm_color_ctm_3x4 {
+ /*
+ * Conversion matrix with 3x4 dimensions in S31.32 sign-magnitude
+ * (not two's complement!) format.
+ */
+ __u64 matrix[12];
+};
+
struct drm_color_lut {
/*
* Values are mapped linearly to 0.0 - 1.0 range, with 0x0 == 0.0 and
@@ -881,8 +889,8 @@ struct hdr_metadata_infoframe {
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
- * @display_primaries.x: X cordinate of color primary.
- * @display_primaries.y: Y cordinate of color primary.
+ * @display_primaries.x: X coordinate of color primary.
+ * @display_primaries.y: Y coordinate of color primary.
*/
struct {
__u16 x, y;
@@ -892,8 +900,8 @@ struct hdr_metadata_infoframe {
* These are coded as unsigned 16-bit values in units of
* 0.00002, where 0x0000 represents zero and 0xC350
* represents 1.0000.
- * @white_point.x: X cordinate of whitepoint of color primary.
- * @white_point.y: Y cordinate of whitepoint of color primary.
+ * @white_point.x: X coordinate of whitepoint of color primary.
+ * @white_point.y: Y coordinate of whitepoint of color primary.
*/
struct {
__u16 x, y;
@@ -957,6 +965,15 @@ struct hdr_output_metadata {
* Request that the page-flip is performed as soon as possible, ie. with no
* delay due to waiting for vblank. This may cause tearing to be visible on
* the screen.
+ *
+ * When used with atomic uAPI, the driver will return an error if the hardware
+ * doesn't support performing an asynchronous page-flip for this update.
+ * User-space should handle this, e.g. by falling back to a regular page-flip.
+ *
+ * Note, some hardware might need to perform one last synchronous page-flip
+ * before being able to switch to asynchronous page-flips. As an exception,
+ * the driver will return success even though that first page-flip is not
+ * asynchronous.
*/
#define DRM_MODE_PAGE_FLIP_ASYNC 0x02
#define DRM_MODE_PAGE_FLIP_TARGET_ABSOLUTE 0x4
@@ -1323,6 +1340,16 @@ struct drm_mode_rect {
__s32 y2;
};
+/**
+ * struct drm_mode_closefb
+ * @fb_id: Framebuffer ID.
+ * @pad: Must be zero.
+ */
+struct drm_mode_closefb {
+ __u32 fb_id;
+ __u32 pad;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/habanalabs_accel.h b/include/uapi/drm/habanalabs_accel.h
index 347c7b62e60e..a512dc4cffd0 100644
--- a/include/uapi/drm/habanalabs_accel.h
+++ b/include/uapi/drm/habanalabs_accel.h
@@ -846,6 +846,7 @@ enum hl_server_type {
#define HL_INFO_HW_ERR_EVENT 36
#define HL_INFO_FW_ERR_EVENT 37
#define HL_INFO_USER_ENGINE_ERR_EVENT 38
+#define HL_INFO_DEV_SIGNED 40
#define HL_INFO_VERSION_MAX_LEN 128
#define HL_INFO_CARD_NAME_MAX_LEN 16
@@ -1256,6 +1257,7 @@ struct hl_info_dev_memalloc_page_sizes {
#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */
#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */
+#define SEC_DEV_INFO_BUF_SZ 5120
/*
* struct hl_info_sec_attest - attestation report of the boot
@@ -1290,6 +1292,32 @@ struct hl_info_sec_attest {
__u8 pad0[2];
};
+/*
+ * struct hl_info_signed - device information signed by a secured device.
+ * @nonce: number only used once. random number provided by host. this also passed to the quote
+ * command as a qualifying data.
+ * @pub_data_len: length of the public data (bytes)
+ * @certificate_len: length of the certificate (bytes)
+ * @info_sig_len: length of the attestation signature (bytes)
+ * @public_data: public key info signed info data (outPublic + name + qualifiedName)
+ * @certificate: certificate for the signing key
+ * @info_sig: signature of the info + nonce data.
+ * @dev_info_len: length of device info (bytes)
+ * @dev_info: device info as byte array.
+ */
+struct hl_info_signed {
+ __u32 nonce;
+ __u16 pub_data_len;
+ __u16 certificate_len;
+ __u8 info_sig_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+ __u8 info_sig[SEC_SIGNATURE_BUF_SZ];
+ __u16 dev_info_len;
+ __u8 dev_info[SEC_DEV_INFO_BUF_SZ];
+ __u8 pad[2];
+};
+
/**
* struct hl_page_fault_info - page fault information.
* @timestamp: timestamp of page fault.
diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
index 262db0c3beee..de1944e42c65 100644
--- a/include/uapi/drm/ivpu_accel.h
+++ b/include/uapi/drm/ivpu_accel.h
@@ -196,7 +196,7 @@ struct drm_ivpu_bo_create {
*
* %DRM_IVPU_BO_UNCACHED:
*
- * Allocated BO will not be cached on host side nor snooped on the VPU side.
+ * Not supported. Use DRM_IVPU_BO_WC instead.
*
* %DRM_IVPU_BO_WC:
*
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 6c34272a13fd..d8a6b3472760 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -86,6 +86,7 @@ struct drm_msm_timespec {
#define MSM_PARAM_CMDLINE 0x0d /* WO: override for task cmdline */
#define MSM_PARAM_VA_START 0x0e /* RO: start of valid GPU iova range */
#define MSM_PARAM_VA_SIZE 0x0f /* RO: size of valid GPU iova range (bytes) */
+#define MSM_PARAM_HIGHEST_BANK_BIT 0x10 /* RO */
/* For backwards compat. The original support for preemption was based on
* a single ring per priority level so # of priority levels equals the #
@@ -139,6 +140,8 @@ struct drm_msm_gem_new {
#define MSM_INFO_GET_NAME 0x03 /* get debug name, returned by pointer */
#define MSM_INFO_SET_IOVA 0x04 /* set the iova, passed by value */
#define MSM_INFO_GET_FLAGS 0x05 /* get the MSM_BO_x flags */
+#define MSM_INFO_SET_METADATA 0x06 /* set userspace metadata */
+#define MSM_INFO_GET_METADATA 0x07 /* get userspace metadata */
struct drm_msm_gem_info {
__u32 handle; /* in */
diff --git a/include/uapi/drm/pvr_drm.h b/include/uapi/drm/pvr_drm.h
new file mode 100644
index 000000000000..ccf6c2112468
--- /dev/null
+++ b/include/uapi/drm/pvr_drm.h
@@ -0,0 +1,1295 @@
+/* SPDX-License-Identifier: (GPL-2.0-only WITH Linux-syscall-note) OR MIT */
+/* Copyright (c) 2023 Imagination Technologies Ltd. */
+
+#ifndef PVR_DRM_UAPI_H
+#define PVR_DRM_UAPI_H
+
+#include "drm.h"
+
+#include <linux/const.h>
+#include <linux/types.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/**
+ * DOC: PowerVR UAPI
+ *
+ * The PowerVR IOCTL argument structs have a few limitations in place, in
+ * addition to the standard kernel restrictions:
+ *
+ * - All members must be type-aligned.
+ * - The overall struct must be padded to 64-bit alignment.
+ * - Explicit padding is almost always required. This takes the form of
+ * ``_padding_[x]`` members of sufficient size to pad to the next power-of-two
+ * alignment, where [x] is the offset into the struct in hexadecimal. Arrays
+ * are never used for alignment. Padding fields must be zeroed; this is
+ * always checked.
+ * - Unions may only appear as the last member of a struct.
+ * - Individual union members may grow in the future. The space between the
+ * end of a union member and the end of its containing union is considered
+ * "implicit padding" and must be zeroed. This is always checked.
+ *
+ * In addition to the IOCTL argument structs, the PowerVR UAPI makes use of
+ * DEV_QUERY argument structs. These are used to fetch information about the
+ * device and runtime. These structs are subject to the same rules set out
+ * above.
+ */
+
+/**
+ * struct drm_pvr_obj_array - Container used to pass arrays of objects
+ *
+ * It is not unusual to have to extend objects to pass new parameters, and the DRM
+ * ioctl infrastructure is supporting that by padding ioctl arguments with zeros
+ * when the data passed by userspace is smaller than the struct defined in the
+ * drm_ioctl_desc, thus keeping things backward compatible. This type is just
+ * applying the same concepts to indirect objects passed through arrays referenced
+ * from the main ioctl arguments structure: the stride basically defines the size
+ * of the object passed by userspace, which allows the kernel driver to pad with
+ * zeros when it's smaller than the size of the object it expects.
+ *
+ * Use ``DRM_PVR_OBJ_ARRAY()`` to fill object array fields, unless you
+ * have a very good reason not to.
+ */
+struct drm_pvr_obj_array {
+ /** @stride: Stride of object struct. Used for versioning. */
+ __u32 stride;
+
+ /** @count: Number of objects in the array. */
+ __u32 count;
+
+ /** @array: User pointer to an array of objects. */
+ __u64 array;
+};
+
+/**
+ * DRM_PVR_OBJ_ARRAY() - Helper macro for filling &struct drm_pvr_obj_array.
+ * @cnt: Number of elements pointed to py @ptr.
+ * @ptr: Pointer to start of a C array.
+ *
+ * Return: Literal of type &struct drm_pvr_obj_array.
+ */
+#define DRM_PVR_OBJ_ARRAY(cnt, ptr) \
+ { .stride = sizeof((ptr)[0]), .count = (cnt), .array = (__u64)(uintptr_t)(ptr) }
+
+/**
+ * DOC: PowerVR IOCTL interface
+ */
+
+/**
+ * PVR_IOCTL() - Build a PowerVR IOCTL number
+ * @_ioctl: An incrementing id for this IOCTL. Added to %DRM_COMMAND_BASE.
+ * @_mode: Must be one of %DRM_IOR, %DRM_IOW or %DRM_IOWR.
+ * @_data: The type of the args struct passed by this IOCTL.
+ *
+ * The struct referred to by @_data must have a ``drm_pvr_ioctl_`` prefix and an
+ * ``_args suffix``. They are therefore omitted from @_data.
+ *
+ * This should only be used to build the constants described below; it should
+ * never be used to call an IOCTL directly.
+ *
+ * Return: An IOCTL number to be passed to ioctl() from userspace.
+ */
+#define PVR_IOCTL(_ioctl, _mode, _data) \
+ _mode(DRM_COMMAND_BASE + (_ioctl), struct drm_pvr_ioctl_##_data##_args)
+
+#define DRM_IOCTL_PVR_DEV_QUERY PVR_IOCTL(0x00, DRM_IOWR, dev_query)
+#define DRM_IOCTL_PVR_CREATE_BO PVR_IOCTL(0x01, DRM_IOWR, create_bo)
+#define DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET PVR_IOCTL(0x02, DRM_IOWR, get_bo_mmap_offset)
+#define DRM_IOCTL_PVR_CREATE_VM_CONTEXT PVR_IOCTL(0x03, DRM_IOWR, create_vm_context)
+#define DRM_IOCTL_PVR_DESTROY_VM_CONTEXT PVR_IOCTL(0x04, DRM_IOW, destroy_vm_context)
+#define DRM_IOCTL_PVR_VM_MAP PVR_IOCTL(0x05, DRM_IOW, vm_map)
+#define DRM_IOCTL_PVR_VM_UNMAP PVR_IOCTL(0x06, DRM_IOW, vm_unmap)
+#define DRM_IOCTL_PVR_CREATE_CONTEXT PVR_IOCTL(0x07, DRM_IOWR, create_context)
+#define DRM_IOCTL_PVR_DESTROY_CONTEXT PVR_IOCTL(0x08, DRM_IOW, destroy_context)
+#define DRM_IOCTL_PVR_CREATE_FREE_LIST PVR_IOCTL(0x09, DRM_IOWR, create_free_list)
+#define DRM_IOCTL_PVR_DESTROY_FREE_LIST PVR_IOCTL(0x0a, DRM_IOW, destroy_free_list)
+#define DRM_IOCTL_PVR_CREATE_HWRT_DATASET PVR_IOCTL(0x0b, DRM_IOWR, create_hwrt_dataset)
+#define DRM_IOCTL_PVR_DESTROY_HWRT_DATASET PVR_IOCTL(0x0c, DRM_IOW, destroy_hwrt_dataset)
+#define DRM_IOCTL_PVR_SUBMIT_JOBS PVR_IOCTL(0x0d, DRM_IOW, submit_jobs)
+
+/**
+ * DOC: PowerVR IOCTL DEV_QUERY interface
+ */
+
+/**
+ * struct drm_pvr_dev_query_gpu_info - Container used to fetch information about
+ * the graphics processor.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_GPU_INFO_GET.
+ */
+struct drm_pvr_dev_query_gpu_info {
+ /**
+ * @gpu_id: GPU identifier.
+ *
+ * For all currently supported GPUs this is the BVNC encoded as a 64-bit
+ * value as follows:
+ *
+ * +--------+--------+--------+-------+
+ * | 63..48 | 47..32 | 31..16 | 15..0 |
+ * +========+========+========+=======+
+ * | B | V | N | C |
+ * +--------+--------+--------+-------+
+ */
+ __u64 gpu_id;
+
+ /**
+ * @num_phantoms: Number of Phantoms present.
+ */
+ __u32 num_phantoms;
+
+ /** @_padding_c: Reserved. This field must be zeroed. */
+ __u32 _padding_c;
+};
+
+/**
+ * struct drm_pvr_dev_query_runtime_info - Container used to fetch information
+ * about the graphics runtime.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET.
+ */
+struct drm_pvr_dev_query_runtime_info {
+ /**
+ * @free_list_min_pages: Minimum allowed free list size,
+ * in PM physical pages.
+ */
+ __u64 free_list_min_pages;
+
+ /**
+ * @free_list_max_pages: Maximum allowed free list size,
+ * in PM physical pages.
+ */
+ __u64 free_list_max_pages;
+
+ /**
+ * @common_store_alloc_region_size: Size of the Allocation
+ * Region within the Common Store used for coefficient and shared
+ * registers, in dwords.
+ */
+ __u32 common_store_alloc_region_size;
+
+ /**
+ * @common_store_partition_space_size: Size of the
+ * Partition Space within the Common Store for output buffers, in
+ * dwords.
+ */
+ __u32 common_store_partition_space_size;
+
+ /**
+ * @max_coeffs: Maximum coefficients, in dwords.
+ */
+ __u32 max_coeffs;
+
+ /**
+ * @cdm_max_local_mem_size_regs: Maximum amount of local
+ * memory available to a compute kernel, in dwords.
+ */
+ __u32 cdm_max_local_mem_size_regs;
+};
+
+/**
+ * struct drm_pvr_dev_query_quirks - Container used to fetch information about
+ * hardware fixes for which the device may require support in the user mode
+ * driver.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_QUIRKS_GET.
+ */
+struct drm_pvr_dev_query_quirks {
+ /**
+ * @quirks: A userspace address for the hardware quirks __u32 array.
+ *
+ * The first @musthave_count items in the list are quirks that the
+ * client must support for this device. If userspace does not support
+ * all these quirks then functionality is not guaranteed and client
+ * initialisation must fail.
+ * The remaining quirks in the list affect userspace and the kernel or
+ * firmware. They are disabled by default and require userspace to
+ * opt-in. The opt-in mechanism depends on the quirk.
+ */
+ __u64 quirks;
+
+ /** @count: Length of @quirks (number of __u32). */
+ __u16 count;
+
+ /**
+ * @musthave_count: The number of entries in @quirks that are
+ * mandatory, starting at index 0.
+ */
+ __u16 musthave_count;
+
+ /** @_padding_c: Reserved. This field must be zeroed. */
+ __u32 _padding_c;
+};
+
+/**
+ * struct drm_pvr_dev_query_enhancements - Container used to fetch information
+ * about optional enhancements supported by the device that require support in
+ * the user mode driver.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_ENHANCEMENTS_GET.
+ */
+struct drm_pvr_dev_query_enhancements {
+ /**
+ * @enhancements: A userspace address for the hardware enhancements
+ * __u32 array.
+ *
+ * These enhancements affect userspace and the kernel or firmware. They
+ * are disabled by default and require userspace to opt-in. The opt-in
+ * mechanism depends on the enhancement.
+ */
+ __u64 enhancements;
+
+ /** @count: Length of @enhancements (number of __u32). */
+ __u16 count;
+
+ /** @_padding_a: Reserved. This field must be zeroed. */
+ __u16 _padding_a;
+
+ /** @_padding_c: Reserved. This field must be zeroed. */
+ __u32 _padding_c;
+};
+
+/**
+ * enum drm_pvr_heap_id - Array index for heap info data returned by
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ *
+ * For compatibility reasons all indices will be present in the returned array,
+ * however some heaps may not be present. These are indicated where
+ * &struct drm_pvr_heap.size is set to zero.
+ */
+enum drm_pvr_heap_id {
+ /** @DRM_PVR_HEAP_GENERAL: General purpose heap. */
+ DRM_PVR_HEAP_GENERAL = 0,
+ /** @DRM_PVR_HEAP_PDS_CODE_DATA: PDS code and data heap. */
+ DRM_PVR_HEAP_PDS_CODE_DATA,
+ /** @DRM_PVR_HEAP_USC_CODE: USC code heap. */
+ DRM_PVR_HEAP_USC_CODE,
+ /** @DRM_PVR_HEAP_RGNHDR: Region header heap. Only used if GPU has BRN63142. */
+ DRM_PVR_HEAP_RGNHDR,
+ /** @DRM_PVR_HEAP_VIS_TEST: Visibility test heap. */
+ DRM_PVR_HEAP_VIS_TEST,
+ /** @DRM_PVR_HEAP_TRANSFER_FRAG: Transfer fragment heap. */
+ DRM_PVR_HEAP_TRANSFER_FRAG,
+
+ /**
+ * @DRM_PVR_HEAP_COUNT: The number of heaps returned by
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ *
+ * More heaps may be added, so this also serves as the copy limit when
+ * sent by the caller.
+ */
+ DRM_PVR_HEAP_COUNT
+ /* Please only add additional heaps above DRM_PVR_HEAP_COUNT! */
+};
+
+/**
+ * struct drm_pvr_heap - Container holding information about a single heap.
+ *
+ * This will always be fetched as an array.
+ */
+struct drm_pvr_heap {
+ /** @base: Base address of heap. */
+ __u64 base;
+
+ /** @size: Size of heap, in bytes. Will be 0 if the heap is not present. */
+ __u64 size;
+
+ /** @flags: Flags for this heap. Currently always 0. */
+ __u32 flags;
+
+ /** @page_size_log2: Log2 of page size. */
+ __u32 page_size_log2;
+};
+
+/**
+ * struct drm_pvr_dev_query_heap_info - Container used to fetch information
+ * about heaps supported by the device driver.
+ *
+ * Please note all driver-supported heaps will be returned up to &heaps.count.
+ * Some heaps will not be present in all devices, which will be indicated by
+ * &struct drm_pvr_heap.size being set to zero.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ */
+struct drm_pvr_dev_query_heap_info {
+ /**
+ * @heaps: Array of &struct drm_pvr_heap. If pointer is NULL, the count
+ * and stride will be updated with those known to the driver version, to
+ * facilitate allocation by the caller.
+ */
+ struct drm_pvr_obj_array heaps;
+};
+
+/**
+ * enum drm_pvr_static_data_area_usage - Array index for static data area info
+ * returned by %DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET.
+ *
+ * For compatibility reasons all indices will be present in the returned array,
+ * however some areas may not be present. These are indicated where
+ * &struct drm_pvr_static_data_area.size is set to zero.
+ */
+enum drm_pvr_static_data_area_usage {
+ /**
+ * @DRM_PVR_STATIC_DATA_AREA_EOT: End of Tile PDS program code segment.
+ *
+ * The End of Tile PDS task runs at completion of a tile during a fragment job, and is
+ * responsible for emitting the tile to the Pixel Back End.
+ */
+ DRM_PVR_STATIC_DATA_AREA_EOT = 0,
+
+ /**
+ * @DRM_PVR_STATIC_DATA_AREA_FENCE: MCU fence area, used during cache flush and
+ * invalidation.
+ *
+ * This must point to valid physical memory but the contents otherwise are not used.
+ */
+ DRM_PVR_STATIC_DATA_AREA_FENCE,
+
+ /**
+ * @DRM_PVR_STATIC_DATA_AREA_VDM_SYNC: VDM sync program.
+ *
+ * The VDM sync program is used to synchronise multiple areas of the GPU hardware.
+ */
+ DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
+
+ /**
+ * @DRM_PVR_STATIC_DATA_AREA_YUV_CSC: YUV coefficients.
+ *
+ * Area contains up to 16 slots with stride of 64 bytes. Each is a 3x4 matrix of u16 fixed
+ * point numbers, with 1 sign bit, 2 integer bits and 13 fractional bits.
+ *
+ * The slots are :
+ * 0 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_RGB_IDENTITY_KHR
+ * 1 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR (full range)
+ * 2 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_IDENTITY_KHR (conformant range)
+ * 3 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (full range)
+ * 4 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (conformant range)
+ * 5 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (full range)
+ * 6 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (conformant range)
+ * 7 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (full range)
+ * 8 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (conformant range)
+ * 9 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_601_KHR (conformant range, 10 bit)
+ * 10 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_709_KHR (conformant range, 10 bit)
+ * 11 = VK_SAMPLER_YCBCR_MODEL_CONVERSION_YCBCR_2020_KHR (conformant range, 10 bit)
+ * 14 = Identity (biased)
+ * 15 = Identity
+ */
+ DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
+};
+
+/**
+ * struct drm_pvr_static_data_area - Container holding information about a
+ * single static data area.
+ *
+ * This will always be fetched as an array.
+ */
+struct drm_pvr_static_data_area {
+ /**
+ * @area_usage: Usage of static data area.
+ * See &enum drm_pvr_static_data_area_usage.
+ */
+ __u16 area_usage;
+
+ /**
+ * @location_heap_id: Array index of heap where this of static data
+ * area is located. This array is fetched using
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ */
+ __u16 location_heap_id;
+
+ /** @size: Size of static data area. Not present if set to zero. */
+ __u32 size;
+
+ /** @offset: Offset of static data area from start of heap. */
+ __u64 offset;
+};
+
+/**
+ * struct drm_pvr_dev_query_static_data_areas - Container used to fetch
+ * information about the static data areas in heaps supported by the device
+ * driver.
+ *
+ * Please note all driver-supported static data areas will be returned up to
+ * &static_data_areas.count. Some will not be present for all devices which,
+ * will be indicated by &struct drm_pvr_static_data_area.size being set to zero.
+ *
+ * Further, some heaps will not be present either. See &struct
+ * drm_pvr_dev_query_heap_info.
+ *
+ * When fetching this type &struct drm_pvr_ioctl_dev_query_args.type must be set
+ * to %DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET.
+ */
+struct drm_pvr_dev_query_static_data_areas {
+ /**
+ * @static_data_areas: Array of &struct drm_pvr_static_data_area. If
+ * pointer is NULL, the count and stride will be updated with those
+ * known to the driver version, to facilitate allocation by the caller.
+ */
+ struct drm_pvr_obj_array static_data_areas;
+};
+
+/**
+ * enum drm_pvr_dev_query - For use with &drm_pvr_ioctl_dev_query_args.type to
+ * indicate the type of the receiving container.
+ *
+ * Append only. Do not reorder.
+ */
+enum drm_pvr_dev_query {
+ /**
+ * @DRM_PVR_DEV_QUERY_GPU_INFO_GET: The dev query args contain a pointer
+ * to &struct drm_pvr_dev_query_gpu_info.
+ */
+ DRM_PVR_DEV_QUERY_GPU_INFO_GET = 0,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET: The dev query args contain a
+ * pointer to &struct drm_pvr_dev_query_runtime_info.
+ */
+ DRM_PVR_DEV_QUERY_RUNTIME_INFO_GET,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_QUIRKS_GET: The dev query args contain a pointer
+ * to &struct drm_pvr_dev_query_quirks.
+ */
+ DRM_PVR_DEV_QUERY_QUIRKS_GET,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET: The dev query args contain a
+ * pointer to &struct drm_pvr_dev_query_enhancements.
+ */
+ DRM_PVR_DEV_QUERY_ENHANCEMENTS_GET,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_HEAP_INFO_GET: The dev query args contain a
+ * pointer to &struct drm_pvr_dev_query_heap_info.
+ */
+ DRM_PVR_DEV_QUERY_HEAP_INFO_GET,
+
+ /**
+ * @DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET: The dev query args contain
+ * a pointer to &struct drm_pvr_dev_query_static_data_areas.
+ */
+ DRM_PVR_DEV_QUERY_STATIC_DATA_AREAS_GET,
+};
+
+/**
+ * struct drm_pvr_ioctl_dev_query_args - Arguments for %DRM_IOCTL_PVR_DEV_QUERY.
+ */
+struct drm_pvr_ioctl_dev_query_args {
+ /**
+ * @type: Type of query and output struct. See &enum drm_pvr_dev_query.
+ */
+ __u32 type;
+
+ /**
+ * @size: Size of the receiving struct, see @type.
+ *
+ * After a successful call this will be updated to the written byte
+ * length.
+ * Can also be used to get the minimum byte length (see @pointer).
+ * This allows additional fields to be appended to the structs in
+ * future.
+ */
+ __u32 size;
+
+ /**
+ * @pointer: Pointer to struct @type.
+ *
+ * Must be large enough to contain @size bytes.
+ * If pointer is NULL, the expected size will be returned in the @size
+ * field, but no other data will be written.
+ */
+ __u64 pointer;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_BO interface
+ */
+
+/**
+ * DOC: Flags for CREATE_BO
+ *
+ * We use "device" to refer to the GPU here because of the ambiguity between CPU and GPU in some
+ * fonts.
+ *
+ * Device mapping options
+ * :DRM_PVR_BO_BYPASS_DEVICE_CACHE: Specify that device accesses to this memory will bypass the
+ * cache. This is used for buffers that will either be regularly updated by the CPU (eg free
+ * lists) or will be accessed only once and therefore isn't worth caching (eg partial render
+ * buffers).
+ * By default, the device flushes its memory caches after every job, so this is not normally
+ * required for coherency.
+ * :DRM_PVR_BO_PM_FW_PROTECT: Specify that only the Parameter Manager (PM) and/or firmware
+ * processor should be allowed to access this memory when mapped to the device. It is not
+ * valid to specify this flag with DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS.
+ *
+ * CPU mapping options
+ * :DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS: Allow userspace to map and access the contents of this
+ * memory. It is not valid to specify this flag with DRM_PVR_BO_PM_FW_PROTECT.
+ */
+#define DRM_PVR_BO_BYPASS_DEVICE_CACHE _BITULL(0)
+#define DRM_PVR_BO_PM_FW_PROTECT _BITULL(1)
+#define DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS _BITULL(2)
+/* Bits 3..63 are reserved. */
+
+#define DRM_PVR_BO_FLAGS_MASK (DRM_PVR_BO_BYPASS_DEVICE_CACHE | DRM_PVR_BO_PM_FW_PROTECT | \
+ DRM_PVR_BO_ALLOW_CPU_USERSPACE_ACCESS)
+
+/**
+ * struct drm_pvr_ioctl_create_bo_args - Arguments for %DRM_IOCTL_PVR_CREATE_BO
+ */
+struct drm_pvr_ioctl_create_bo_args {
+ /**
+ * @size: [IN] Size of buffer object to create. This must be page size
+ * aligned.
+ */
+ __u64 size;
+
+ /**
+ * @handle: [OUT] GEM handle of the new buffer object for use in
+ * userspace.
+ */
+ __u32 handle;
+
+ /** @_padding_c: Reserved. This field must be zeroed. */
+ __u32 _padding_c;
+
+ /**
+ * @flags: [IN] Options which will affect the behaviour of this
+ * creation operation and future mapping operations on the created
+ * object. This field must be a valid combination of ``DRM_PVR_BO_*``
+ * values, with all bits marked as reserved set to zero.
+ */
+ __u64 flags;
+};
+
+/**
+ * DOC: PowerVR IOCTL GET_BO_MMAP_OFFSET interface
+ */
+
+/**
+ * struct drm_pvr_ioctl_get_bo_mmap_offset_args - Arguments for
+ * %DRM_IOCTL_PVR_GET_BO_MMAP_OFFSET
+ *
+ * Like other DRM drivers, the "mmap" IOCTL doesn't actually map any memory.
+ * Instead, it allocates a fake offset which refers to the specified buffer
+ * object. This offset can be used with a real mmap call on the DRM device
+ * itself.
+ */
+struct drm_pvr_ioctl_get_bo_mmap_offset_args {
+ /** @handle: [IN] GEM handle of the buffer object to be mapped. */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+
+ /** @offset: [OUT] Fake offset to use in the real mmap call. */
+ __u64 offset;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_VM_CONTEXT and DESTROY_VM_CONTEXT interfaces
+ */
+
+/**
+ * struct drm_pvr_ioctl_create_vm_context_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_VM_CONTEXT
+ */
+struct drm_pvr_ioctl_create_vm_context_args {
+ /** @handle: [OUT] Handle for new VM context. */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_vm_context_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_VM_CONTEXT
+ */
+struct drm_pvr_ioctl_destroy_vm_context_args {
+ /**
+ * @handle: [IN] Handle for VM context to be destroyed.
+ */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * DOC: PowerVR IOCTL VM_MAP and VM_UNMAP interfaces
+ *
+ * The VM UAPI allows userspace to create buffer object mappings in GPU virtual address space.
+ *
+ * The client is responsible for managing GPU address space. It should allocate mappings within
+ * the heaps returned by %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ *
+ * %DRM_IOCTL_PVR_VM_MAP creates a new mapping. The client provides the target virtual address for
+ * the mapping. Size and offset within the mapped buffer object can be specified, so the client can
+ * partially map a buffer.
+ *
+ * %DRM_IOCTL_PVR_VM_UNMAP removes a mapping. The entire mapping will be removed from GPU address
+ * space only if the size of the mapping matches that known to the driver.
+ */
+
+/**
+ * struct drm_pvr_ioctl_vm_map_args - Arguments for %DRM_IOCTL_PVR_VM_MAP.
+ */
+struct drm_pvr_ioctl_vm_map_args {
+ /**
+ * @vm_context_handle: [IN] Handle for VM context for this mapping to
+ * exist in.
+ */
+ __u32 vm_context_handle;
+
+ /** @flags: [IN] Flags which affect this mapping. Currently always 0. */
+ __u32 flags;
+
+ /**
+ * @device_addr: [IN] Requested device-virtual address for the mapping.
+ * This must be non-zero and aligned to the device page size for the
+ * heap containing the requested address. It is an error to specify an
+ * address which is not contained within one of the heaps returned by
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET.
+ */
+ __u64 device_addr;
+
+ /**
+ * @handle: [IN] Handle of the target buffer object. This must be a
+ * valid handle returned by %DRM_IOCTL_PVR_CREATE_BO.
+ */
+ __u32 handle;
+
+ /** @_padding_14: Reserved. This field must be zeroed. */
+ __u32 _padding_14;
+
+ /**
+ * @offset: [IN] Offset into the target bo from which to begin the
+ * mapping.
+ */
+ __u64 offset;
+
+ /**
+ * @size: [IN] Size of the requested mapping. Must be aligned to
+ * the device page size for the heap containing the requested address,
+ * as well as the host page size. When added to @device_addr, the
+ * result must not overflow the heap which contains @device_addr (i.e.
+ * the range specified by @device_addr and @size must be completely
+ * contained within a single heap specified by
+ * %DRM_PVR_DEV_QUERY_HEAP_INFO_GET).
+ */
+ __u64 size;
+};
+
+/**
+ * struct drm_pvr_ioctl_vm_unmap_args - Arguments for %DRM_IOCTL_PVR_VM_UNMAP.
+ */
+struct drm_pvr_ioctl_vm_unmap_args {
+ /**
+ * @vm_context_handle: [IN] Handle for VM context that this mapping
+ * exists in.
+ */
+ __u32 vm_context_handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+
+ /**
+ * @device_addr: [IN] Device-virtual address at the start of the target
+ * mapping. This must be non-zero.
+ */
+ __u64 device_addr;
+
+ /**
+ * @size: Size in bytes of the target mapping. This must be non-zero.
+ */
+ __u64 size;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_CONTEXT and DESTROY_CONTEXT interfaces
+ */
+
+/**
+ * enum drm_pvr_ctx_priority - Arguments for
+ * &drm_pvr_ioctl_create_context_args.priority
+ */
+enum drm_pvr_ctx_priority {
+ /** @DRM_PVR_CTX_PRIORITY_LOW: Priority below normal. */
+ DRM_PVR_CTX_PRIORITY_LOW = -512,
+
+ /** @DRM_PVR_CTX_PRIORITY_NORMAL: Normal priority. */
+ DRM_PVR_CTX_PRIORITY_NORMAL = 0,
+
+ /**
+ * @DRM_PVR_CTX_PRIORITY_HIGH: Priority above normal.
+ * Note this requires ``CAP_SYS_NICE`` or ``DRM_MASTER``.
+ */
+ DRM_PVR_CTX_PRIORITY_HIGH = 512,
+};
+
+/**
+ * enum drm_pvr_ctx_type - Arguments for
+ * &struct drm_pvr_ioctl_create_context_args.type
+ */
+enum drm_pvr_ctx_type {
+ /**
+ * @DRM_PVR_CTX_TYPE_RENDER: Render context.
+ */
+ DRM_PVR_CTX_TYPE_RENDER = 0,
+
+ /**
+ * @DRM_PVR_CTX_TYPE_COMPUTE: Compute context.
+ */
+ DRM_PVR_CTX_TYPE_COMPUTE,
+
+ /**
+ * @DRM_PVR_CTX_TYPE_TRANSFER_FRAG: Transfer context for fragment data
+ * master.
+ */
+ DRM_PVR_CTX_TYPE_TRANSFER_FRAG,
+};
+
+/**
+ * struct drm_pvr_ioctl_create_context_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_CONTEXT
+ */
+struct drm_pvr_ioctl_create_context_args {
+ /**
+ * @type: [IN] Type of context to create.
+ *
+ * This must be one of the values defined by &enum drm_pvr_ctx_type.
+ */
+ __u32 type;
+
+ /** @flags: [IN] Flags for context. */
+ __u32 flags;
+
+ /**
+ * @priority: [IN] Priority of new context.
+ *
+ * This must be one of the values defined by &enum drm_pvr_ctx_priority.
+ */
+ __s32 priority;
+
+ /** @handle: [OUT] Handle for new context. */
+ __u32 handle;
+
+ /**
+ * @static_context_state: [IN] Pointer to static context state stream.
+ */
+ __u64 static_context_state;
+
+ /**
+ * @static_context_state_len: [IN] Length of static context state, in bytes.
+ */
+ __u32 static_context_state_len;
+
+ /**
+ * @vm_context_handle: [IN] Handle for VM context that this context is
+ * associated with.
+ */
+ __u32 vm_context_handle;
+
+ /**
+ * @callstack_addr: [IN] Address for initial call stack pointer. Only valid
+ * if @type is %DRM_PVR_CTX_TYPE_RENDER, otherwise must be 0.
+ */
+ __u64 callstack_addr;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_context_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_CONTEXT
+ */
+struct drm_pvr_ioctl_destroy_context_args {
+ /**
+ * @handle: [IN] Handle for context to be destroyed.
+ */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_FREE_LIST and DESTROY_FREE_LIST interfaces
+ */
+
+/**
+ * struct drm_pvr_ioctl_create_free_list_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_FREE_LIST
+ *
+ * Free list arguments have the following constraints :
+ *
+ * - @max_num_pages must be greater than zero.
+ * - @grow_threshold must be between 0 and 100.
+ * - @grow_num_pages must be less than or equal to &max_num_pages.
+ * - @initial_num_pages, @max_num_pages and @grow_num_pages must be multiples
+ * of 4.
+ * - When &grow_num_pages is 0, @initial_num_pages must be equal to
+ * @max_num_pages.
+ * - When &grow_num_pages is non-zero, @initial_num_pages must be less than
+ * @max_num_pages.
+ */
+struct drm_pvr_ioctl_create_free_list_args {
+ /**
+ * @free_list_gpu_addr: [IN] Address of GPU mapping of buffer object
+ * containing memory to be used by free list.
+ *
+ * The mapped region of the buffer object must be at least
+ * @max_num_pages * ``sizeof(__u32)``.
+ *
+ * The buffer object must have been created with
+ * %DRM_PVR_BO_DEVICE_PM_FW_PROTECT set and
+ * %DRM_PVR_BO_CPU_ALLOW_USERSPACE_ACCESS not set.
+ */
+ __u64 free_list_gpu_addr;
+
+ /** @initial_num_pages: [IN] Pages initially allocated to free list. */
+ __u32 initial_num_pages;
+
+ /** @max_num_pages: [IN] Maximum number of pages in free list. */
+ __u32 max_num_pages;
+
+ /** @grow_num_pages: [IN] Pages to grow free list by per request. */
+ __u32 grow_num_pages;
+
+ /**
+ * @grow_threshold: [IN] Percentage of FL memory used that should
+ * trigger a new grow request.
+ */
+ __u32 grow_threshold;
+
+ /**
+ * @vm_context_handle: [IN] Handle for VM context that the free list buffer
+ * object is mapped in.
+ */
+ __u32 vm_context_handle;
+
+ /**
+ * @handle: [OUT] Handle for created free list.
+ */
+ __u32 handle;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_free_list_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_FREE_LIST
+ */
+struct drm_pvr_ioctl_destroy_free_list_args {
+ /**
+ * @handle: [IN] Handle for free list to be destroyed.
+ */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * DOC: PowerVR IOCTL CREATE_HWRT_DATASET and DESTROY_HWRT_DATASET interfaces
+ */
+
+/**
+ * struct drm_pvr_create_hwrt_geom_data_args - Geometry data arguments used for
+ * &struct drm_pvr_ioctl_create_hwrt_dataset_args.geom_data_args.
+ */
+struct drm_pvr_create_hwrt_geom_data_args {
+ /** @tpc_dev_addr: [IN] Tail pointer cache GPU virtual address. */
+ __u64 tpc_dev_addr;
+
+ /** @tpc_size: [IN] Size of TPC, in bytes. */
+ __u32 tpc_size;
+
+ /** @tpc_stride: [IN] Stride between layers in TPC, in pages */
+ __u32 tpc_stride;
+
+ /** @vheap_table_dev_addr: [IN] VHEAP table GPU virtual address. */
+ __u64 vheap_table_dev_addr;
+
+ /** @rtc_dev_addr: [IN] Render Target Cache virtual address. */
+ __u64 rtc_dev_addr;
+};
+
+/**
+ * struct drm_pvr_create_hwrt_rt_data_args - Render target arguments used for
+ * &struct drm_pvr_ioctl_create_hwrt_dataset_args.rt_data_args.
+ */
+struct drm_pvr_create_hwrt_rt_data_args {
+ /** @pm_mlist_dev_addr: [IN] PM MLIST GPU virtual address. */
+ __u64 pm_mlist_dev_addr;
+
+ /** @macrotile_array_dev_addr: [IN] Macrotile array GPU virtual address. */
+ __u64 macrotile_array_dev_addr;
+
+ /** @region_header_dev_addr: [IN] Region header array GPU virtual address. */
+ __u64 region_header_dev_addr;
+};
+
+#define PVR_DRM_HWRT_FREE_LIST_LOCAL 0
+#define PVR_DRM_HWRT_FREE_LIST_GLOBAL 1U
+
+/**
+ * struct drm_pvr_ioctl_create_hwrt_dataset_args - Arguments for
+ * %DRM_IOCTL_PVR_CREATE_HWRT_DATASET
+ */
+struct drm_pvr_ioctl_create_hwrt_dataset_args {
+ /** @geom_data_args: [IN] Geometry data arguments. */
+ struct drm_pvr_create_hwrt_geom_data_args geom_data_args;
+
+ /**
+ * @rt_data_args: [IN] Array of render target arguments.
+ *
+ * Each entry in this array represents a render target in a double buffered
+ * setup.
+ */
+ struct drm_pvr_create_hwrt_rt_data_args rt_data_args[2];
+
+ /**
+ * @free_list_handles: [IN] Array of free list handles.
+ *
+ * free_list_handles[PVR_DRM_HWRT_FREE_LIST_LOCAL] must have initial
+ * size of at least that reported by
+ * &drm_pvr_dev_query_runtime_info.free_list_min_pages.
+ */
+ __u32 free_list_handles[2];
+
+ /** @width: [IN] Width in pixels. */
+ __u32 width;
+
+ /** @height: [IN] Height in pixels. */
+ __u32 height;
+
+ /** @samples: [IN] Number of samples. */
+ __u32 samples;
+
+ /** @layers: [IN] Number of layers. */
+ __u32 layers;
+
+ /** @isp_merge_lower_x: [IN] Lower X coefficient for triangle merging. */
+ __u32 isp_merge_lower_x;
+
+ /** @isp_merge_lower_y: [IN] Lower Y coefficient for triangle merging. */
+ __u32 isp_merge_lower_y;
+
+ /** @isp_merge_scale_x: [IN] Scale X coefficient for triangle merging. */
+ __u32 isp_merge_scale_x;
+
+ /** @isp_merge_scale_y: [IN] Scale Y coefficient for triangle merging. */
+ __u32 isp_merge_scale_y;
+
+ /** @isp_merge_upper_x: [IN] Upper X coefficient for triangle merging. */
+ __u32 isp_merge_upper_x;
+
+ /** @isp_merge_upper_y: [IN] Upper Y coefficient for triangle merging. */
+ __u32 isp_merge_upper_y;
+
+ /**
+ * @region_header_size: [IN] Size of region header array. This common field is used by
+ * both render targets in this data set.
+ *
+ * The units for this field differ depending on what version of the simple internal
+ * parameter format the device uses. If format 2 is in use then this is interpreted as the
+ * number of region headers. For other formats it is interpreted as the size in dwords.
+ */
+ __u32 region_header_size;
+
+ /**
+ * @handle: [OUT] Handle for created HWRT dataset.
+ */
+ __u32 handle;
+};
+
+/**
+ * struct drm_pvr_ioctl_destroy_hwrt_dataset_args - Arguments for
+ * %DRM_IOCTL_PVR_DESTROY_HWRT_DATASET
+ */
+struct drm_pvr_ioctl_destroy_hwrt_dataset_args {
+ /**
+ * @handle: [IN] Handle for HWRT dataset to be destroyed.
+ */
+ __u32 handle;
+
+ /** @_padding_4: Reserved. This field must be zeroed. */
+ __u32 _padding_4;
+};
+
+/**
+ * DOC: PowerVR IOCTL SUBMIT_JOBS interface
+ */
+
+/**
+ * DOC: Flags for the drm_pvr_sync_op object.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_HANDLE_TYPE_MASK
+ *
+ * Handle type mask for the drm_pvr_sync_op::flags field.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ
+ *
+ * Indicates the handle passed in drm_pvr_sync_op::handle is a syncobj handle.
+ * This is the default type.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ
+ *
+ * Indicates the handle passed in drm_pvr_sync_op::handle is a timeline syncobj handle.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_SIGNAL
+ *
+ * Signal operation requested. The out-fence bound to the job will be attached to
+ * the syncobj whose handle is passed in drm_pvr_sync_op::handle.
+ *
+ * .. c:macro:: DRM_PVR_SYNC_OP_FLAG_WAIT
+ *
+ * Wait operation requested. The job will wait for this particular syncobj or syncobj
+ * point to be signaled before being started.
+ * This is the default operation.
+ */
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK 0xf
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_SYNCOBJ 0
+#define DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_TIMELINE_SYNCOBJ 1
+#define DRM_PVR_SYNC_OP_FLAG_SIGNAL _BITULL(31)
+#define DRM_PVR_SYNC_OP_FLAG_WAIT 0
+
+#define DRM_PVR_SYNC_OP_FLAGS_MASK (DRM_PVR_SYNC_OP_FLAG_HANDLE_TYPE_MASK | \
+ DRM_PVR_SYNC_OP_FLAG_SIGNAL)
+
+/**
+ * struct drm_pvr_sync_op - Object describing a sync operation
+ */
+struct drm_pvr_sync_op {
+ /** @handle: Handle of sync object. */
+ __u32 handle;
+
+ /** @flags: Combination of ``DRM_PVR_SYNC_OP_FLAG_`` flags. */
+ __u32 flags;
+
+ /** @value: Timeline value for this drm_syncobj. MBZ for a binary syncobj. */
+ __u64 value;
+};
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl geometry command.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST
+ *
+ * Indicates if this the first command to be issued for a render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST
+ *
+ * Indicates if this the last command to be issued for a render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE
+ *
+ * Forces to use single core in a multi core device.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK
+ *
+ * Logical OR of all the geometry cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE _BITULL(2)
+#define DRM_PVR_SUBMIT_JOB_GEOM_CMD_FLAGS_MASK \
+ (DRM_PVR_SUBMIT_JOB_GEOM_CMD_FIRST | \
+ DRM_PVR_SUBMIT_JOB_GEOM_CMD_LAST | \
+ DRM_PVR_SUBMIT_JOB_GEOM_CMD_SINGLE_CORE)
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl fragment command.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE
+ *
+ * Use single core in a multi core setup.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER
+ *
+ * Indicates whether a depth buffer is present.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER
+ *
+ * Indicates whether a stencil buffer is present.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP
+ *
+ * Disallow compute overlapped with this render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS
+ *
+ * Indicates whether this render produces visibility results.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER
+ *
+ * Indicates whether partial renders write to a scratch buffer instead of
+ * the final surface. It also forces the full screen copy expected to be
+ * present on the last render after all partial renders have completed.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE
+ *
+ * Disable pixel merging for this render.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK
+ *
+ * Logical OR of all the fragment cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER _BITULL(2)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP _BITULL(3)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER _BITULL(4)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS _BITULL(5)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER _BITULL(6)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE _BITULL(7)
+#define DRM_PVR_SUBMIT_JOB_FRAG_CMD_FLAGS_MASK \
+ (DRM_PVR_SUBMIT_JOB_FRAG_CMD_SINGLE_CORE | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_DEPTHBUFFER | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_STENCILBUFFER | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_PREVENT_CDM_OVERLAP | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_SCRATCHBUFFER | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_GET_VIS_RESULTS | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_PARTIAL_RENDER | \
+ DRM_PVR_SUBMIT_JOB_FRAG_CMD_DISABLE_PIXELMERGE)
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl compute command.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP
+ *
+ * Disallow other jobs overlapped with this compute.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE
+ *
+ * Forces to use single core in a multi core device.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK
+ *
+ * Logical OR of all the compute cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP _BITULL(0)
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE _BITULL(1)
+#define DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_FLAGS_MASK \
+ (DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_PREVENT_ALL_OVERLAP | \
+ DRM_PVR_SUBMIT_JOB_COMPUTE_CMD_SINGLE_CORE)
+
+/**
+ * DOC: Flags for SUBMIT_JOB ioctl transfer command.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE
+ *
+ * Forces job to use a single core in a multi core device.
+ *
+ * .. c:macro:: DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK
+ *
+ * Logical OR of all the transfer cmd flags.
+ */
+#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE _BITULL(0)
+
+#define DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_FLAGS_MASK \
+ DRM_PVR_SUBMIT_JOB_TRANSFER_CMD_SINGLE_CORE
+
+/**
+ * enum drm_pvr_job_type - Arguments for &struct drm_pvr_job.job_type
+ */
+enum drm_pvr_job_type {
+ /** @DRM_PVR_JOB_TYPE_GEOMETRY: Job type is geometry. */
+ DRM_PVR_JOB_TYPE_GEOMETRY = 0,
+
+ /** @DRM_PVR_JOB_TYPE_FRAGMENT: Job type is fragment. */
+ DRM_PVR_JOB_TYPE_FRAGMENT,
+
+ /** @DRM_PVR_JOB_TYPE_COMPUTE: Job type is compute. */
+ DRM_PVR_JOB_TYPE_COMPUTE,
+
+ /** @DRM_PVR_JOB_TYPE_TRANSFER_FRAG: Job type is a fragment transfer. */
+ DRM_PVR_JOB_TYPE_TRANSFER_FRAG,
+};
+
+/**
+ * struct drm_pvr_hwrt_data_ref - Reference HWRT data
+ */
+struct drm_pvr_hwrt_data_ref {
+ /** @set_handle: HWRT data set handle. */
+ __u32 set_handle;
+
+ /** @data_index: Index of the HWRT data inside the data set. */
+ __u32 data_index;
+};
+
+/**
+ * struct drm_pvr_job - Job arguments passed to the %DRM_IOCTL_PVR_SUBMIT_JOBS ioctl
+ */
+struct drm_pvr_job {
+ /**
+ * @type: [IN] Type of job being submitted
+ *
+ * This must be one of the values defined by &enum drm_pvr_job_type.
+ */
+ __u32 type;
+
+ /**
+ * @context_handle: [IN] Context handle.
+ *
+ * When @job_type is %DRM_PVR_JOB_TYPE_RENDER, %DRM_PVR_JOB_TYPE_COMPUTE or
+ * %DRM_PVR_JOB_TYPE_TRANSFER_FRAG, this must be a valid handle returned by
+ * %DRM_IOCTL_PVR_CREATE_CONTEXT. The type of context must be compatible
+ * with the type of job being submitted.
+ *
+ * When @job_type is %DRM_PVR_JOB_TYPE_NULL, this must be zero.
+ */
+ __u32 context_handle;
+
+ /**
+ * @flags: [IN] Flags for command.
+ *
+ * Those are job-dependent. See all ``DRM_PVR_SUBMIT_JOB_*``.
+ */
+ __u32 flags;
+
+ /**
+ * @cmd_stream_len: [IN] Length of command stream, in bytes.
+ */
+ __u32 cmd_stream_len;
+
+ /**
+ * @cmd_stream: [IN] Pointer to command stream for command.
+ *
+ * The command stream must be u64-aligned.
+ */
+ __u64 cmd_stream;
+
+ /** @sync_ops: [IN] Fragment sync operations. */
+ struct drm_pvr_obj_array sync_ops;
+
+ /**
+ * @hwrt: [IN] HWRT data used by render jobs (geometry or fragment).
+ *
+ * Must be zero for non-render jobs.
+ */
+ struct drm_pvr_hwrt_data_ref hwrt;
+};
+
+/**
+ * struct drm_pvr_ioctl_submit_jobs_args - Arguments for %DRM_IOCTL_PVR_SUBMIT_JOB
+ *
+ * If the syscall returns an error it is important to check the value of
+ * @jobs.count. This indicates the index into @jobs.array where the
+ * error occurred.
+ */
+struct drm_pvr_ioctl_submit_jobs_args {
+ /** @jobs: [IN] Array of jobs to submit. */
+ struct drm_pvr_obj_array jobs;
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* PVR_DRM_UAPI_H */
diff --git a/include/uapi/drm/qaic_accel.h b/include/uapi/drm/qaic_accel.h
index 43ac5d864512..9dab32316aee 100644
--- a/include/uapi/drm/qaic_accel.h
+++ b/include/uapi/drm/qaic_accel.h
@@ -287,8 +287,9 @@ struct qaic_execute_entry {
* struct qaic_partial_execute_entry - Defines a BO to resize and submit.
* @handle: In. GEM handle of the BO to commit to the device.
* @dir: In. Direction of data. 1 = to device, 2 = from device.
- * @resize: In. New size of the BO. Must be <= the original BO size. 0 is
- * short for no resize.
+ * @resize: In. New size of the BO. Must be <= the original BO size.
+ * @resize as 0 would be interpreted as no DMA transfer is
+ * involved.
*/
struct qaic_partial_execute_entry {
__u32 handle;
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index 3dfc0af8756a..dce1835eced4 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -41,6 +41,7 @@ extern "C" {
#define DRM_V3D_PERFMON_CREATE 0x08
#define DRM_V3D_PERFMON_DESTROY 0x09
#define DRM_V3D_PERFMON_GET_VALUES 0x0a
+#define DRM_V3D_SUBMIT_CPU 0x0b
#define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
#define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
@@ -56,6 +57,7 @@ extern "C" {
struct drm_v3d_perfmon_destroy)
#define DRM_IOCTL_V3D_PERFMON_GET_VALUES DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_PERFMON_GET_VALUES, \
struct drm_v3d_perfmon_get_values)
+#define DRM_IOCTL_V3D_SUBMIT_CPU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CPU, struct drm_v3d_submit_cpu)
#define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01
#define DRM_V3D_SUBMIT_EXTENSION 0x02
@@ -69,7 +71,13 @@ extern "C" {
struct drm_v3d_extension {
__u64 next;
__u32 id;
-#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
+#define DRM_V3D_EXT_ID_MULTI_SYNC 0x01
+#define DRM_V3D_EXT_ID_CPU_INDIRECT_CSD 0x02
+#define DRM_V3D_EXT_ID_CPU_TIMESTAMP_QUERY 0x03
+#define DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY 0x04
+#define DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY 0x05
+#define DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY 0x06
+#define DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY 0x07
__u32 flags; /* mbz */
};
@@ -93,6 +101,7 @@ enum v3d_queue {
V3D_TFU,
V3D_CSD,
V3D_CACHE_CLEAN,
+ V3D_CPU,
};
/**
@@ -276,6 +285,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
DRM_V3D_PARAM_SUPPORTS_PERFMON,
DRM_V3D_PARAM_SUPPORTS_MULTISYNC_EXT,
+ DRM_V3D_PARAM_SUPPORTS_CPU_QUEUE,
};
struct drm_v3d_get_param {
@@ -319,6 +329,11 @@ struct drm_v3d_submit_tfu {
/* Pointer to an array of ioctl extensions*/
__u64 extensions;
+
+ struct {
+ __u32 ioc;
+ __u32 pad;
+ } v71;
};
/* Submits a compute shader for dispatch. This job will block on any
@@ -356,6 +371,234 @@ struct drm_v3d_submit_csd {
__u32 pad;
};
+/**
+ * struct drm_v3d_indirect_csd - ioctl extension for the CPU job to create an
+ * indirect CSD
+ *
+ * When an extension of DRM_V3D_EXT_ID_CPU_INDIRECT_CSD id is defined, it
+ * points to this extension to define a indirect CSD submission. It creates a
+ * CPU job linked to a CSD job. The CPU job waits for the indirect CSD
+ * dependencies and, once they are signaled, it updates the CSD job config
+ * before allowing the CSD job execution.
+ */
+struct drm_v3d_indirect_csd {
+ struct drm_v3d_extension base;
+
+ /* Indirect CSD */
+ struct drm_v3d_submit_csd submit;
+
+ /* Handle of the indirect BO, that should be also attached to the
+ * indirect CSD.
+ */
+ __u32 indirect;
+
+ /* Offset within the BO where the workgroup counts are stored */
+ __u32 offset;
+
+ /* Workgroups size */
+ __u32 wg_size;
+
+ /* Indices of the uniforms with the workgroup dispatch counts
+ * in the uniform stream. If the uniform rewrite is not needed,
+ * the offset must be 0xffffffff.
+ */
+ __u32 wg_uniform_offsets[3];
+};
+
+/**
+ * struct drm_v3d_timestamp_query - ioctl extension for the CPU job to calculate
+ * a timestamp query
+ *
+ * When an extension DRM_V3D_EXT_ID_TIMESTAMP_QUERY is defined, it points to
+ * this extension to define a timestamp query submission. This CPU job will
+ * calculate the timestamp query and update the query value within the
+ * timestamp BO. Moreover, it will signal the timestamp syncobj to indicate
+ * query availability.
+ */
+struct drm_v3d_timestamp_query {
+ struct drm_v3d_extension base;
+
+ /* Array of queries' offsets within the timestamp BO for their value */
+ __u64 offsets;
+
+ /* Array of timestamp's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* mbz */
+ __u32 pad;
+};
+
+/**
+ * struct drm_v3d_reset_timestamp_query - ioctl extension for the CPU job to
+ * reset timestamp queries
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY is defined, it
+ * points to this extension to define a reset timestamp submission. This CPU
+ * job will reset the timestamp queries based on value offset of the first
+ * query. Moreover, it will reset the timestamp syncobj to reset query
+ * availability.
+ */
+struct drm_v3d_reset_timestamp_query {
+ struct drm_v3d_extension base;
+
+ /* Array of timestamp's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Offset of the first query within the timestamp BO for its value */
+ __u32 offset;
+
+ /* Number of queries */
+ __u32 count;
+};
+
+/**
+ * struct drm_v3d_copy_timestamp_query - ioctl extension for the CPU job to copy
+ * query results to a buffer
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY is defined, it
+ * points to this extension to define a copy timestamp query submission. This
+ * CPU job will copy the timestamp queries results to a BO with the offset
+ * and stride defined in the extension.
+ */
+struct drm_v3d_copy_timestamp_query {
+ struct drm_v3d_extension base;
+
+ /* Define if should write to buffer using 64 or 32 bits */
+ __u8 do_64bit;
+
+ /* Define if it can write to buffer even if the query is not available */
+ __u8 do_partial;
+
+ /* Define if it should write availability bit to buffer */
+ __u8 availability_bit;
+
+ /* mbz */
+ __u8 pad;
+
+ /* Offset of the buffer in the BO */
+ __u32 offset;
+
+ /* Stride of the buffer in the BO */
+ __u32 stride;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* Array of queries' offsets within the timestamp BO for their value */
+ __u64 offsets;
+
+ /* Array of timestamp's syncobjs to indicate its availability */
+ __u64 syncs;
+};
+
+/**
+ * struct drm_v3d_reset_performance_query - ioctl extension for the CPU job to
+ * reset performance queries
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY is defined, it
+ * points to this extension to define a reset performance submission. This CPU
+ * job will reset the performance queries by resetting the values of the
+ * performance monitors. Moreover, it will reset the syncobj to reset query
+ * availability.
+ */
+struct drm_v3d_reset_performance_query {
+ struct drm_v3d_extension base;
+
+ /* Array of performance queries's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* Number of performance monitors */
+ __u32 nperfmons;
+
+ /* Array of u64 user-pointers that point to an array of kperfmon_ids */
+ __u64 kperfmon_ids;
+};
+
+/**
+ * struct drm_v3d_copy_performance_query - ioctl extension for the CPU job to copy
+ * performance query results to a buffer
+ *
+ * When an extension DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY is defined, it
+ * points to this extension to define a copy performance query submission. This
+ * CPU job will copy the performance queries results to a BO with the offset
+ * and stride defined in the extension.
+ */
+struct drm_v3d_copy_performance_query {
+ struct drm_v3d_extension base;
+
+ /* Define if should write to buffer using 64 or 32 bits */
+ __u8 do_64bit;
+
+ /* Define if it can write to buffer even if the query is not available */
+ __u8 do_partial;
+
+ /* Define if it should write availability bit to buffer */
+ __u8 availability_bit;
+
+ /* mbz */
+ __u8 pad;
+
+ /* Offset of the buffer in the BO */
+ __u32 offset;
+
+ /* Stride of the buffer in the BO */
+ __u32 stride;
+
+ /* Number of performance monitors */
+ __u32 nperfmons;
+
+ /* Number of performance counters related to this query pool */
+ __u32 ncounters;
+
+ /* Number of queries */
+ __u32 count;
+
+ /* Array of performance queries's syncobjs to indicate its availability */
+ __u64 syncs;
+
+ /* Array of u64 user-pointers that point to an array of kperfmon_ids */
+ __u64 kperfmon_ids;
+};
+
+struct drm_v3d_submit_cpu {
+ /* Pointer to a u32 array of the BOs that are referenced by the job.
+ *
+ * For DRM_V3D_EXT_ID_CPU_INDIRECT_CSD, it must contain only one BO,
+ * that contains the workgroup counts.
+ *
+ * For DRM_V3D_EXT_ID_TIMESTAMP_QUERY, it must contain only one BO,
+ * that will contain the timestamp.
+ *
+ * For DRM_V3D_EXT_ID_CPU_RESET_TIMESTAMP_QUERY, it must contain only
+ * one BO, that contains the timestamp.
+ *
+ * For DRM_V3D_EXT_ID_CPU_COPY_TIMESTAMP_QUERY, it must contain two
+ * BOs. The first is the BO where the timestamp queries will be written
+ * to. The second is the BO that contains the timestamp.
+ *
+ * For DRM_V3D_EXT_ID_CPU_RESET_PERFORMANCE_QUERY, it must contain no
+ * BOs.
+ *
+ * For DRM_V3D_EXT_ID_CPU_COPY_PERFORMANCE_QUERY, it must contain one
+ * BO, where the performance queries will be written.
+ */
+ __u64 bo_handles;
+
+ /* Number of BO handles passed in (size is that times 4). */
+ __u32 bo_handle_count;
+
+ __u32 flags;
+
+ /* Pointer to an array of ioctl extensions*/
+ __u64 extensions;
+};
+
enum {
V3D_PERFCNT_FEP_VALID_PRIMTS_NO_PIXELS,
V3D_PERFCNT_FEP_VALID_PRIMS,
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index b1d0e56565bc..c2ce71987e9b 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -97,6 +97,7 @@ struct drm_virtgpu_execbuffer {
#define VIRTGPU_PARAM_CROSS_DEVICE 5 /* Cross virtio-device resource sharing */
#define VIRTGPU_PARAM_CONTEXT_INIT 6 /* DRM_VIRTGPU_CONTEXT_INIT */
#define VIRTGPU_PARAM_SUPPORTED_CAPSET_IDs 7 /* Bitmask of supported capability set ids */
+#define VIRTGPU_PARAM_EXPLICIT_DEBUG_NAME 8 /* Ability to set debug name from userspace */
struct drm_virtgpu_getparam {
__u64 param;
@@ -198,6 +199,7 @@ struct drm_virtgpu_resource_create_blob {
#define VIRTGPU_CONTEXT_PARAM_CAPSET_ID 0x0001
#define VIRTGPU_CONTEXT_PARAM_NUM_RINGS 0x0002
#define VIRTGPU_CONTEXT_PARAM_POLL_RINGS_MASK 0x0003
+#define VIRTGPU_CONTEXT_PARAM_DEBUG_NAME 0x0004
struct drm_virtgpu_context_set_param {
__u64 param;
__u64 value;
diff --git a/include/uapi/drm/xe_drm.h b/include/uapi/drm/xe_drm.h
new file mode 100644
index 000000000000..9fa3ae324731
--- /dev/null
+++ b/include/uapi/drm/xe_drm.h
@@ -0,0 +1,1347 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2023 Intel Corporation
+ */
+
+#ifndef _UAPI_XE_DRM_H_
+#define _UAPI_XE_DRM_H_
+
+#include "drm.h"
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/*
+ * Please note that modifications to all structs defined here are
+ * subject to backwards-compatibility constraints.
+ * Sections in this file are organized as follows:
+ * 1. IOCTL definition
+ * 2. Extension definition and helper structs
+ * 3. IOCTL's Query structs in the order of the Query's entries.
+ * 4. The rest of IOCTL structs in the order of IOCTL declaration.
+ */
+
+/**
+ * DOC: Xe Device Block Diagram
+ *
+ * The diagram below represents a high-level simplification of a discrete
+ * GPU supported by the Xe driver. It shows some device components which
+ * are necessary to understand this API, as well as how their relations
+ * to each other. This diagram does not represent real hardware::
+ *
+ * ┌──────────────────────────────────────────────────────────────────┐
+ * │ ┌──────────────────────────────────────────────────┐ ┌─────────┐ │
+ * │ │ ┌───────────────────────┐ ┌─────┐ │ │ ┌─────┐ │ │
+ * │ │ │ VRAM0 ├───┤ ... │ │ │ │VRAM1│ │ │
+ * │ │ └───────────┬───────────┘ └─GT1─┘ │ │ └──┬──┘ │ │
+ * │ │ ┌──────────────────┴───────────────────────────┐ │ │ ┌──┴──┐ │ │
+ * │ │ │ ┌─────────────────────┐ ┌─────────────────┐ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │RCS0 │ │BCS0 │ │ │ │ │ │ │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VCS0 │ │VCS1 │ │ │ │ │ │ │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │VECS0│ │VECS1│ │ │ │ │ │ ... │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ┌──┐ ┌──┐ ┌──┐ ┌──┐ │ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │EU│ │EU│ │EU│ │EU│ │ │ │CCS0 │ │CCS1 │ │ │ │ │ │ │ │ │
+ * │ │ │ │ └──┘ └──┘ └──┘ └──┘ │ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ └─────────DSS─────────┘ │ ┌─────┐ ┌─────┐ │ │ │ │ │ │ │ │
+ * │ │ │ │ │CCS2 │ │CCS3 │ │ │ │ │ │ │ │ │
+ * │ │ │ ┌─────┐ ┌─────┐ ┌─────┐ │ └─────┘ └─────┘ │ │ │ │ │ │ │ │
+ * │ │ │ │ ... │ │ ... │ │ ... │ │ │ │ │ │ │ │ │ │
+ * │ │ │ └─DSS─┘ └─DSS─┘ └─DSS─┘ └─────Engines─────┘ │ │ │ │ │ │ │
+ * │ │ └───────────────────────────GT0────────────────┘ │ │ └─GT2─┘ │ │
+ * │ └────────────────────────────Tile0─────────────────┘ └─ Tile1──┘ │
+ * └─────────────────────────────Device0───────┬──────────────────────┘
+ * │
+ * ───────────────────────┴────────── PCI bus
+ */
+
+/**
+ * DOC: Xe uAPI Overview
+ *
+ * This section aims to describe the Xe's IOCTL entries, its structs, and other
+ * Xe related uAPI such as uevents and PMU (Platform Monitoring Unit) related
+ * entries and usage.
+ *
+ * List of supported IOCTLs:
+ * - &DRM_IOCTL_XE_DEVICE_QUERY
+ * - &DRM_IOCTL_XE_GEM_CREATE
+ * - &DRM_IOCTL_XE_GEM_MMAP_OFFSET
+ * - &DRM_IOCTL_XE_VM_CREATE
+ * - &DRM_IOCTL_XE_VM_DESTROY
+ * - &DRM_IOCTL_XE_VM_BIND
+ * - &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
+ * - &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
+ * - &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
+ * - &DRM_IOCTL_XE_EXEC
+ * - &DRM_IOCTL_XE_WAIT_USER_FENCE
+ */
+
+/*
+ * xe specific ioctls.
+ *
+ * The device specific ioctl range is [DRM_COMMAND_BASE, DRM_COMMAND_END) ie
+ * [0x40, 0xa0) (a0 is excluded). The numbers below are defined as offset
+ * against DRM_COMMAND_BASE and should be between [0x0, 0x60).
+ */
+#define DRM_XE_DEVICE_QUERY 0x00
+#define DRM_XE_GEM_CREATE 0x01
+#define DRM_XE_GEM_MMAP_OFFSET 0x02
+#define DRM_XE_VM_CREATE 0x03
+#define DRM_XE_VM_DESTROY 0x04
+#define DRM_XE_VM_BIND 0x05
+#define DRM_XE_EXEC_QUEUE_CREATE 0x06
+#define DRM_XE_EXEC_QUEUE_DESTROY 0x07
+#define DRM_XE_EXEC_QUEUE_GET_PROPERTY 0x08
+#define DRM_XE_EXEC 0x09
+#define DRM_XE_WAIT_USER_FENCE 0x0a
+/* Must be kept compact -- no holes */
+
+#define DRM_IOCTL_XE_DEVICE_QUERY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_DEVICE_QUERY, struct drm_xe_device_query)
+#define DRM_IOCTL_XE_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_CREATE, struct drm_xe_gem_create)
+#define DRM_IOCTL_XE_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_GEM_MMAP_OFFSET, struct drm_xe_gem_mmap_offset)
+#define DRM_IOCTL_XE_VM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_VM_CREATE, struct drm_xe_vm_create)
+#define DRM_IOCTL_XE_VM_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_DESTROY, struct drm_xe_vm_destroy)
+#define DRM_IOCTL_XE_VM_BIND DRM_IOW(DRM_COMMAND_BASE + DRM_XE_VM_BIND, struct drm_xe_vm_bind)
+#define DRM_IOCTL_XE_EXEC_QUEUE_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_CREATE, struct drm_xe_exec_queue_create)
+#define DRM_IOCTL_XE_EXEC_QUEUE_DESTROY DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_DESTROY, struct drm_xe_exec_queue_destroy)
+#define DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_EXEC_QUEUE_GET_PROPERTY, struct drm_xe_exec_queue_get_property)
+#define DRM_IOCTL_XE_EXEC DRM_IOW(DRM_COMMAND_BASE + DRM_XE_EXEC, struct drm_xe_exec)
+#define DRM_IOCTL_XE_WAIT_USER_FENCE DRM_IOWR(DRM_COMMAND_BASE + DRM_XE_WAIT_USER_FENCE, struct drm_xe_wait_user_fence)
+
+/**
+ * DOC: Xe IOCTL Extensions
+ *
+ * Before detailing the IOCTLs and its structs, it is important to highlight
+ * that every IOCTL in Xe is extensible.
+ *
+ * Many interfaces need to grow over time. In most cases we can simply
+ * extend the struct and have userspace pass in more data. Another option,
+ * as demonstrated by Vulkan's approach to providing extensions for forward
+ * and backward compatibility, is to use a list of optional structs to
+ * provide those extra details.
+ *
+ * The key advantage to using an extension chain is that it allows us to
+ * redefine the interface more easily than an ever growing struct of
+ * increasing complexity, and for large parts of that interface to be
+ * entirely optional. The downside is more pointer chasing; chasing across
+ * the __user boundary with pointers encapsulated inside u64.
+ *
+ * Example chaining:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_user_extension ext3 {
+ * .next_extension = 0, // end
+ * .name = ...,
+ * };
+ * struct drm_xe_user_extension ext2 {
+ * .next_extension = (uintptr_t)&ext3,
+ * .name = ...,
+ * };
+ * struct drm_xe_user_extension ext1 {
+ * .next_extension = (uintptr_t)&ext2,
+ * .name = ...,
+ * };
+ *
+ * Typically the struct drm_xe_user_extension would be embedded in some uAPI
+ * struct, and in this case we would feed it the head of the chain(i.e ext1),
+ * which would then apply all of the above extensions.
+*/
+
+/**
+ * struct drm_xe_user_extension - Base class for defining a chain of extensions
+ */
+struct drm_xe_user_extension {
+ /**
+ * @next_extension:
+ *
+ * Pointer to the next struct drm_xe_user_extension, or zero if the end.
+ */
+ __u64 next_extension;
+
+ /**
+ * @name: Name of the extension.
+ *
+ * Note that the name here is just some integer.
+ *
+ * Also note that the name space for this is not global for the whole
+ * driver, but rather its scope/meaning is limited to the specific piece
+ * of uAPI which has embedded the struct drm_xe_user_extension.
+ */
+ __u32 name;
+
+ /**
+ * @pad: MBZ
+ *
+ * All undefined bits must be zero.
+ */
+ __u32 pad;
+};
+
+/**
+ * struct drm_xe_ext_set_property - Generic set property extension
+ *
+ * A generic struct that allows any of the Xe's IOCTL to be extended
+ * with a set_property operation.
+ */
+struct drm_xe_ext_set_property {
+ /** @base: base user extension */
+ struct drm_xe_user_extension base;
+
+ /** @property: property to set */
+ __u32 property;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @value: property value */
+ __u64 value;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_engine_class_instance - instance of an engine class
+ *
+ * It is returned as part of the @drm_xe_engine, but it also is used as
+ * the input of engine selection for both @drm_xe_exec_queue_create and
+ * @drm_xe_query_engine_cycles
+ *
+ * The @engine_class can be:
+ * - %DRM_XE_ENGINE_CLASS_RENDER
+ * - %DRM_XE_ENGINE_CLASS_COPY
+ * - %DRM_XE_ENGINE_CLASS_VIDEO_DECODE
+ * - %DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE
+ * - %DRM_XE_ENGINE_CLASS_COMPUTE
+ * - %DRM_XE_ENGINE_CLASS_VM_BIND - Kernel only classes (not actual
+ * hardware engine class). Used for creating ordered queues of VM
+ * bind operations.
+ */
+struct drm_xe_engine_class_instance {
+#define DRM_XE_ENGINE_CLASS_RENDER 0
+#define DRM_XE_ENGINE_CLASS_COPY 1
+#define DRM_XE_ENGINE_CLASS_VIDEO_DECODE 2
+#define DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE 3
+#define DRM_XE_ENGINE_CLASS_COMPUTE 4
+#define DRM_XE_ENGINE_CLASS_VM_BIND 5
+ /** @engine_class: engine class id */
+ __u16 engine_class;
+ /** @engine_instance: engine instance id */
+ __u16 engine_instance;
+ /** @gt_id: Unique ID of this GT within the PCI Device */
+ __u16 gt_id;
+ /** @pad: MBZ */
+ __u16 pad;
+};
+
+/**
+ * struct drm_xe_engine - describe hardware engine
+ */
+struct drm_xe_engine {
+ /** @instance: The @drm_xe_engine_class_instance */
+ struct drm_xe_engine_class_instance instance;
+
+ /** @reserved: Reserved */
+ __u64 reserved[3];
+};
+
+/**
+ * struct drm_xe_query_engines - describe engines
+ *
+ * If a query is made with a struct @drm_xe_device_query where .query
+ * is equal to %DRM_XE_DEVICE_QUERY_ENGINES, then the reply uses an array of
+ * struct @drm_xe_query_engines in .data.
+ */
+struct drm_xe_query_engines {
+ /** @num_engines: number of engines returned in @engines */
+ __u32 num_engines;
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @engines: The returned engines for this device */
+ struct drm_xe_engine engines[];
+};
+
+/**
+ * enum drm_xe_memory_class - Supported memory classes.
+ */
+enum drm_xe_memory_class {
+ /** @DRM_XE_MEM_REGION_CLASS_SYSMEM: Represents system memory. */
+ DRM_XE_MEM_REGION_CLASS_SYSMEM = 0,
+ /**
+ * @DRM_XE_MEM_REGION_CLASS_VRAM: On discrete platforms, this
+ * represents the memory that is local to the device, which we
+ * call VRAM. Not valid on integrated platforms.
+ */
+ DRM_XE_MEM_REGION_CLASS_VRAM
+};
+
+/**
+ * struct drm_xe_mem_region - Describes some region as known to
+ * the driver.
+ */
+struct drm_xe_mem_region {
+ /**
+ * @mem_class: The memory class describing this region.
+ *
+ * See enum drm_xe_memory_class for supported values.
+ */
+ __u16 mem_class;
+ /**
+ * @instance: The unique ID for this region, which serves as the
+ * index in the placement bitmask used as argument for
+ * &DRM_IOCTL_XE_GEM_CREATE
+ */
+ __u16 instance;
+ /**
+ * @min_page_size: Min page-size in bytes for this region.
+ *
+ * When the kernel allocates memory for this region, the
+ * underlying pages will be at least @min_page_size in size.
+ * Buffer objects with an allowable placement in this region must be
+ * created with a size aligned to this value.
+ * GPU virtual address mappings of (parts of) buffer objects that
+ * may be placed in this region must also have their GPU virtual
+ * address and range aligned to this value.
+ * Affected IOCTLS will return %-EINVAL if alignment restrictions are
+ * not met.
+ */
+ __u32 min_page_size;
+ /**
+ * @total_size: The usable size in bytes for this region.
+ */
+ __u64 total_size;
+ /**
+ * @used: Estimate of the memory used in bytes for this region.
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
+ * accounting. Without this the value here will always equal
+ * zero.
+ */
+ __u64 used;
+ /**
+ * @cpu_visible_size: How much of this region can be CPU
+ * accessed, in bytes.
+ *
+ * This will always be <= @total_size, and the remainder (if
+ * any) will not be CPU accessible. If the CPU accessible part
+ * is smaller than @total_size then this is referred to as a
+ * small BAR system.
+ *
+ * On systems without small BAR (full BAR), the probed_size will
+ * always equal the @total_size, since all of it will be CPU
+ * accessible.
+ *
+ * Note this is only tracked for DRM_XE_MEM_REGION_CLASS_VRAM
+ * regions (for other types the value here will always equal
+ * zero).
+ */
+ __u64 cpu_visible_size;
+ /**
+ * @cpu_visible_used: Estimate of CPU visible memory used, in
+ * bytes.
+ *
+ * Requires CAP_PERFMON or CAP_SYS_ADMIN to get reliable
+ * accounting. Without this the value here will always equal
+ * zero. Note this is only currently tracked for
+ * DRM_XE_MEM_REGION_CLASS_VRAM regions (for other types the value
+ * here will always be zero).
+ */
+ __u64 cpu_visible_used;
+ /** @reserved: Reserved */
+ __u64 reserved[6];
+};
+
+/**
+ * struct drm_xe_query_mem_regions - describe memory regions
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_MEM_REGIONS, then the reply uses
+ * struct drm_xe_query_mem_regions in .data.
+ */
+struct drm_xe_query_mem_regions {
+ /** @num_mem_regions: number of memory regions returned in @mem_regions */
+ __u32 num_mem_regions;
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @mem_regions: The returned memory regions for this device */
+ struct drm_xe_mem_region mem_regions[];
+};
+
+/**
+ * struct drm_xe_query_config - describe the device configuration
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_CONFIG, then the reply uses
+ * struct drm_xe_query_config in .data.
+ *
+ * The index in @info can be:
+ * - %DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID - Device ID (lower 16 bits)
+ * and the device revision (next 8 bits)
+ * - %DRM_XE_QUERY_CONFIG_FLAGS - Flags describing the device
+ * configuration, see list below
+ *
+ * - %DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM - Flag is set if the device
+ * has usable VRAM
+ * - %DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT - Minimal memory alignment
+ * required by this device, typically SZ_4K or SZ_64K
+ * - %DRM_XE_QUERY_CONFIG_VA_BITS - Maximum bits of a virtual address
+ * - %DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY - Value of the highest
+ * available exec queue priority
+ */
+struct drm_xe_query_config {
+ /** @num_params: number of parameters returned in info */
+ __u32 num_params;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+#define DRM_XE_QUERY_CONFIG_REV_AND_DEVICE_ID 0
+#define DRM_XE_QUERY_CONFIG_FLAGS 1
+ #define DRM_XE_QUERY_CONFIG_FLAG_HAS_VRAM (1 << 0)
+#define DRM_XE_QUERY_CONFIG_MIN_ALIGNMENT 2
+#define DRM_XE_QUERY_CONFIG_VA_BITS 3
+#define DRM_XE_QUERY_CONFIG_MAX_EXEC_QUEUE_PRIORITY 4
+ /** @info: array of elements containing the config info */
+ __u64 info[];
+};
+
+/**
+ * struct drm_xe_gt - describe an individual GT.
+ *
+ * To be used with drm_xe_query_gt_list, which will return a list with all the
+ * existing GT individual descriptions.
+ * Graphics Technology (GT) is a subset of a GPU/tile that is responsible for
+ * implementing graphics and/or media operations.
+ *
+ * The index in @type can be:
+ * - %DRM_XE_QUERY_GT_TYPE_MAIN
+ * - %DRM_XE_QUERY_GT_TYPE_MEDIA
+ */
+struct drm_xe_gt {
+#define DRM_XE_QUERY_GT_TYPE_MAIN 0
+#define DRM_XE_QUERY_GT_TYPE_MEDIA 1
+ /** @type: GT type: Main or Media */
+ __u16 type;
+ /** @tile_id: Tile ID where this GT lives (Information only) */
+ __u16 tile_id;
+ /** @gt_id: Unique ID of this GT within the PCI Device */
+ __u16 gt_id;
+ /** @pad: MBZ */
+ __u16 pad[3];
+ /** @reference_clock: A clock frequency for timestamp */
+ __u32 reference_clock;
+ /**
+ * @near_mem_regions: Bit mask of instances from
+ * drm_xe_query_mem_regions that are nearest to the current engines
+ * of this GT.
+ * Each index in this mask refers directly to the struct
+ * drm_xe_query_mem_regions' instance, no assumptions should
+ * be made about order. The type of each region is described
+ * by struct drm_xe_query_mem_regions' mem_class.
+ */
+ __u64 near_mem_regions;
+ /**
+ * @far_mem_regions: Bit mask of instances from
+ * drm_xe_query_mem_regions that are far from the engines of this GT.
+ * In general, they have extra indirections when compared to the
+ * @near_mem_regions. For a discrete device this could mean system
+ * memory and memory living in a different tile.
+ * Each index in this mask refers directly to the struct
+ * drm_xe_query_mem_regions' instance, no assumptions should
+ * be made about order. The type of each region is described
+ * by struct drm_xe_query_mem_regions' mem_class.
+ */
+ __u64 far_mem_regions;
+ /** @reserved: Reserved */
+ __u64 reserved[8];
+};
+
+/**
+ * struct drm_xe_query_gt_list - A list with GT description items.
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_GT_LIST, then the reply uses struct
+ * drm_xe_query_gt_list in .data.
+ */
+struct drm_xe_query_gt_list {
+ /** @num_gt: number of GT items returned in gt_list */
+ __u32 num_gt;
+ /** @pad: MBZ */
+ __u32 pad;
+ /** @gt_list: The GT list returned for this device */
+ struct drm_xe_gt gt_list[];
+};
+
+/**
+ * struct drm_xe_query_topology_mask - describe the topology mask of a GT
+ *
+ * This is the hardware topology which reflects the internal physical
+ * structure of the GPU.
+ *
+ * If a query is made with a struct drm_xe_device_query where .query
+ * is equal to DRM_XE_DEVICE_QUERY_GT_TOPOLOGY, then the reply uses
+ * struct drm_xe_query_topology_mask in .data.
+ *
+ * The @type can be:
+ * - %DRM_XE_TOPO_DSS_GEOMETRY - To query the mask of Dual Sub Slices
+ * (DSS) available for geometry operations. For example a query response
+ * containing the following in mask:
+ * ``DSS_GEOMETRY ff ff ff ff 00 00 00 00``
+ * means 32 DSS are available for geometry.
+ * - %DRM_XE_TOPO_DSS_COMPUTE - To query the mask of Dual Sub Slices
+ * (DSS) available for compute operations. For example a query response
+ * containing the following in mask:
+ * ``DSS_COMPUTE ff ff ff ff 00 00 00 00``
+ * means 32 DSS are available for compute.
+ * - %DRM_XE_TOPO_EU_PER_DSS - To query the mask of Execution Units (EU)
+ * available per Dual Sub Slices (DSS). For example a query response
+ * containing the following in mask:
+ * ``EU_PER_DSS ff ff 00 00 00 00 00 00``
+ * means each DSS has 16 EU.
+ */
+struct drm_xe_query_topology_mask {
+ /** @gt_id: GT ID the mask is associated with */
+ __u16 gt_id;
+
+#define DRM_XE_TOPO_DSS_GEOMETRY (1 << 0)
+#define DRM_XE_TOPO_DSS_COMPUTE (1 << 1)
+#define DRM_XE_TOPO_EU_PER_DSS (1 << 2)
+ /** @type: type of mask */
+ __u16 type;
+
+ /** @num_bytes: number of bytes in requested mask */
+ __u32 num_bytes;
+
+ /** @mask: little-endian mask of @num_bytes */
+ __u8 mask[];
+};
+
+/**
+ * struct drm_xe_query_engine_cycles - correlate CPU and GPU timestamps
+ *
+ * If a query is made with a struct drm_xe_device_query where .query is equal to
+ * DRM_XE_DEVICE_QUERY_ENGINE_CYCLES, then the reply uses struct drm_xe_query_engine_cycles
+ * in .data. struct drm_xe_query_engine_cycles is allocated by the user and
+ * .data points to this allocated structure.
+ *
+ * The query returns the engine cycles, which along with GT's @reference_clock,
+ * can be used to calculate the engine timestamp. In addition the
+ * query returns a set of cpu timestamps that indicate when the command
+ * streamer cycle count was captured.
+ */
+struct drm_xe_query_engine_cycles {
+ /**
+ * @eci: This is input by the user and is the engine for which command
+ * streamer cycles is queried.
+ */
+ struct drm_xe_engine_class_instance eci;
+
+ /**
+ * @clockid: This is input by the user and is the reference clock id for
+ * CPU timestamp. For definition, see clock_gettime(2) and
+ * perf_event_open(2). Supported clock ids are CLOCK_MONOTONIC,
+ * CLOCK_MONOTONIC_RAW, CLOCK_REALTIME, CLOCK_BOOTTIME, CLOCK_TAI.
+ */
+ __s32 clockid;
+
+ /** @width: Width of the engine cycle counter in bits. */
+ __u32 width;
+
+ /**
+ * @engine_cycles: Engine cycles as read from its register
+ * at 0x358 offset.
+ */
+ __u64 engine_cycles;
+
+ /**
+ * @cpu_timestamp: CPU timestamp in ns. The timestamp is captured before
+ * reading the engine_cycles register using the reference clockid set by the
+ * user.
+ */
+ __u64 cpu_timestamp;
+
+ /**
+ * @cpu_delta: Time delta in ns captured around reading the lower dword
+ * of the engine_cycles register.
+ */
+ __u64 cpu_delta;
+};
+
+/**
+ * struct drm_xe_device_query - Input of &DRM_IOCTL_XE_DEVICE_QUERY - main
+ * structure to query device information
+ *
+ * The user selects the type of data to query among DRM_XE_DEVICE_QUERY_*
+ * and sets the value in the query member. This determines the type of
+ * the structure provided by the driver in data, among struct drm_xe_query_*.
+ *
+ * The @query can be:
+ * - %DRM_XE_DEVICE_QUERY_ENGINES
+ * - %DRM_XE_DEVICE_QUERY_MEM_REGIONS
+ * - %DRM_XE_DEVICE_QUERY_CONFIG
+ * - %DRM_XE_DEVICE_QUERY_GT_LIST
+ * - %DRM_XE_DEVICE_QUERY_HWCONFIG - Query type to retrieve the hardware
+ * configuration of the device such as information on slices, memory,
+ * caches, and so on. It is provided as a table of key / value
+ * attributes.
+ * - %DRM_XE_DEVICE_QUERY_GT_TOPOLOGY
+ * - %DRM_XE_DEVICE_QUERY_ENGINE_CYCLES
+ *
+ * If size is set to 0, the driver fills it with the required size for
+ * the requested type of data to query. If size is equal to the required
+ * size, the queried information is copied into data. If size is set to
+ * a value different from 0 and different from the required size, the
+ * IOCTL call returns -EINVAL.
+ *
+ * For example the following code snippet allows retrieving and printing
+ * information about the device engines with DRM_XE_DEVICE_QUERY_ENGINES:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_query_engines *engines;
+ * struct drm_xe_device_query query = {
+ * .extensions = 0,
+ * .query = DRM_XE_DEVICE_QUERY_ENGINES,
+ * .size = 0,
+ * .data = 0,
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
+ * engines = malloc(query.size);
+ * query.data = (uintptr_t)engines;
+ * ioctl(fd, DRM_IOCTL_XE_DEVICE_QUERY, &query);
+ * for (int i = 0; i < engines->num_engines; i++) {
+ * printf("Engine %d: %s\n", i,
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_RENDER ? "RENDER":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_COPY ? "COPY":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_VIDEO_DECODE ? "VIDEO_DECODE":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_VIDEO_ENHANCE ? "VIDEO_ENHANCE":
+ * engines->engines[i].instance.engine_class ==
+ * DRM_XE_ENGINE_CLASS_COMPUTE ? "COMPUTE":
+ * "UNKNOWN");
+ * }
+ * free(engines);
+ */
+struct drm_xe_device_query {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+#define DRM_XE_DEVICE_QUERY_ENGINES 0
+#define DRM_XE_DEVICE_QUERY_MEM_REGIONS 1
+#define DRM_XE_DEVICE_QUERY_CONFIG 2
+#define DRM_XE_DEVICE_QUERY_GT_LIST 3
+#define DRM_XE_DEVICE_QUERY_HWCONFIG 4
+#define DRM_XE_DEVICE_QUERY_GT_TOPOLOGY 5
+#define DRM_XE_DEVICE_QUERY_ENGINE_CYCLES 6
+ /** @query: The type of data to query */
+ __u32 query;
+
+ /** @size: Size of the queried data */
+ __u32 size;
+
+ /** @data: Queried data is placed here */
+ __u64 data;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_gem_create - Input of &DRM_IOCTL_XE_GEM_CREATE - A structure for
+ * gem creation
+ *
+ * The @flags can be:
+ * - %DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING
+ * - %DRM_XE_GEM_CREATE_FLAG_SCANOUT
+ * - %DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM - When using VRAM as a
+ * possible placement, ensure that the corresponding VRAM allocation
+ * will always use the CPU accessible part of VRAM. This is important
+ * for small-bar systems (on full-bar systems this gets turned into a
+ * noop).
+ * Note1: System memory can be used as an extra placement if the kernel
+ * should spill the allocation to system memory, if space can't be made
+ * available in the CPU accessible part of VRAM (giving the same
+ * behaviour as the i915 interface, see
+ * I915_GEM_CREATE_EXT_FLAG_NEEDS_CPU_ACCESS).
+ * Note2: For clear-color CCS surfaces the kernel needs to read the
+ * clear-color value stored in the buffer, and on discrete platforms we
+ * need to use VRAM for display surfaces, therefore the kernel requires
+ * setting this flag for such objects, otherwise an error is thrown on
+ * small-bar systems.
+ *
+ * @cpu_caching supports the following values:
+ * - %DRM_XE_GEM_CPU_CACHING_WB - Allocate the pages with write-back
+ * caching. On iGPU this can't be used for scanout surfaces. Currently
+ * not allowed for objects placed in VRAM.
+ * - %DRM_XE_GEM_CPU_CACHING_WC - Allocate the pages as write-combined. This
+ * is uncached. Scanout surfaces should likely use this. All objects
+ * that can be placed in VRAM must use this.
+ */
+struct drm_xe_gem_create {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /**
+ * @size: Size of the object to be created, must match region
+ * (system or vram) minimum alignment (&min_page_size).
+ */
+ __u64 size;
+
+ /**
+ * @placement: A mask of memory instances of where BO can be placed.
+ * Each index in this mask refers directly to the struct
+ * drm_xe_query_mem_regions' instance, no assumptions should
+ * be made about order. The type of each region is described
+ * by struct drm_xe_query_mem_regions' mem_class.
+ */
+ __u32 placement;
+
+#define DRM_XE_GEM_CREATE_FLAG_DEFER_BACKING (1 << 0)
+#define DRM_XE_GEM_CREATE_FLAG_SCANOUT (1 << 1)
+#define DRM_XE_GEM_CREATE_FLAG_NEEDS_VISIBLE_VRAM (1 << 2)
+ /**
+ * @flags: Flags, currently a mask of memory instances of where BO can
+ * be placed
+ */
+ __u32 flags;
+
+ /**
+ * @vm_id: Attached VM, if any
+ *
+ * If a VM is specified, this BO must:
+ *
+ * 1. Only ever be bound to that VM.
+ * 2. Cannot be exported as a PRIME fd.
+ */
+ __u32 vm_id;
+
+ /**
+ * @handle: Returned handle for the object.
+ *
+ * Object handles are nonzero.
+ */
+ __u32 handle;
+
+#define DRM_XE_GEM_CPU_CACHING_WB 1
+#define DRM_XE_GEM_CPU_CACHING_WC 2
+ /**
+ * @cpu_caching: The CPU caching mode to select for this object. If
+ * mmaping the object the mode selected here will also be used.
+ */
+ __u16 cpu_caching;
+ /** @pad: MBZ */
+ __u16 pad[3];
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_gem_mmap_offset - Input of &DRM_IOCTL_XE_GEM_MMAP_OFFSET
+ */
+struct drm_xe_gem_mmap_offset {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @handle: Handle for the object being mapped. */
+ __u32 handle;
+
+ /** @flags: Must be zero */
+ __u32 flags;
+
+ /** @offset: The fake offset to use for subsequent mmap call */
+ __u64 offset;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_create - Input of &DRM_IOCTL_XE_VM_CREATE
+ *
+ * The @flags can be:
+ * - %DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE
+ * - %DRM_XE_VM_CREATE_FLAG_LR_MODE - An LR, or Long Running VM accepts
+ * exec submissions to its exec_queues that don't have an upper time
+ * limit on the job execution time. But exec submissions to these
+ * don't allow any of the flags DRM_XE_SYNC_FLAG_SYNCOBJ,
+ * DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ, DRM_XE_SYNC_FLAG_DMA_BUF,
+ * used as out-syncobjs, that is, together with DRM_XE_SYNC_FLAG_SIGNAL.
+ * LR VMs can be created in recoverable page-fault mode using
+ * DRM_XE_VM_CREATE_FLAG_FAULT_MODE, if the device supports it.
+ * If that flag is omitted, the UMD can not rely on the slightly
+ * different per-VM overcommit semantics that are enabled by
+ * DRM_XE_VM_CREATE_FLAG_FAULT_MODE (see below), but KMD may
+ * still enable recoverable pagefaults if supported by the device.
+ * - %DRM_XE_VM_CREATE_FLAG_FAULT_MODE - Requires also
+ * DRM_XE_VM_CREATE_FLAG_LR_MODE. It allows memory to be allocated on
+ * demand when accessed, and also allows per-VM overcommit of memory.
+ * The xe driver internally uses recoverable pagefaults to implement
+ * this.
+ */
+struct drm_xe_vm_create {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+#define DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE (1 << 0)
+#define DRM_XE_VM_CREATE_FLAG_LR_MODE (1 << 1)
+#define DRM_XE_VM_CREATE_FLAG_FAULT_MODE (1 << 2)
+ /** @flags: Flags */
+ __u32 flags;
+
+ /** @vm_id: Returned VM ID */
+ __u32 vm_id;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_destroy - Input of &DRM_IOCTL_XE_VM_DESTROY
+ */
+struct drm_xe_vm_destroy {
+ /** @vm_id: VM ID */
+ __u32 vm_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_vm_bind_op - run bind operations
+ *
+ * The @op can be:
+ * - %DRM_XE_VM_BIND_OP_MAP
+ * - %DRM_XE_VM_BIND_OP_UNMAP
+ * - %DRM_XE_VM_BIND_OP_MAP_USERPTR
+ * - %DRM_XE_VM_BIND_OP_UNMAP_ALL
+ * - %DRM_XE_VM_BIND_OP_PREFETCH
+ *
+ * and the @flags can be:
+ * - %DRM_XE_VM_BIND_FLAG_READONLY
+ * - %DRM_XE_VM_BIND_FLAG_ASYNC
+ * - %DRM_XE_VM_BIND_FLAG_IMMEDIATE - Valid on a faulting VM only, do the
+ * MAP operation immediately rather than deferring the MAP to the page
+ * fault handler.
+ * - %DRM_XE_VM_BIND_FLAG_NULL - When the NULL flag is set, the page
+ * tables are setup with a special bit which indicates writes are
+ * dropped and all reads return zero. In the future, the NULL flags
+ * will only be valid for DRM_XE_VM_BIND_OP_MAP operations, the BO
+ * handle MBZ, and the BO offset MBZ. This flag is intended to
+ * implement VK sparse bindings.
+ */
+struct drm_xe_vm_bind_op {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /**
+ * @obj: GEM object to operate on, MBZ for MAP_USERPTR, MBZ for UNMAP
+ */
+ __u32 obj;
+
+ /**
+ * @pat_index: The platform defined @pat_index to use for this mapping.
+ * The index basically maps to some predefined memory attributes,
+ * including things like caching, coherency, compression etc. The exact
+ * meaning of the pat_index is platform specific and defined in the
+ * Bspec and PRMs. When the KMD sets up the binding the index here is
+ * encoded into the ppGTT PTE.
+ *
+ * For coherency the @pat_index needs to be at least 1way coherent when
+ * drm_xe_gem_create.cpu_caching is DRM_XE_GEM_CPU_CACHING_WB. The KMD
+ * will extract the coherency mode from the @pat_index and reject if
+ * there is a mismatch (see note below for pre-MTL platforms).
+ *
+ * Note: On pre-MTL platforms there is only a caching mode and no
+ * explicit coherency mode, but on such hardware there is always a
+ * shared-LLC (or is dgpu) so all GT memory accesses are coherent with
+ * CPU caches even with the caching mode set as uncached. It's only the
+ * display engine that is incoherent (on dgpu it must be in VRAM which
+ * is always mapped as WC on the CPU). However to keep the uapi somewhat
+ * consistent with newer platforms the KMD groups the different cache
+ * levels into the following coherency buckets on all pre-MTL platforms:
+ *
+ * ppGTT UC -> COH_NONE
+ * ppGTT WC -> COH_NONE
+ * ppGTT WT -> COH_NONE
+ * ppGTT WB -> COH_AT_LEAST_1WAY
+ *
+ * In practice UC/WC/WT should only ever used for scanout surfaces on
+ * such platforms (or perhaps in general for dma-buf if shared with
+ * another device) since it is only the display engine that is actually
+ * incoherent. Everything else should typically use WB given that we
+ * have a shared-LLC. On MTL+ this completely changes and the HW
+ * defines the coherency mode as part of the @pat_index, where
+ * incoherent GT access is possible.
+ *
+ * Note: For userptr and externally imported dma-buf the kernel expects
+ * either 1WAY or 2WAY for the @pat_index.
+ *
+ * For DRM_XE_VM_BIND_FLAG_NULL bindings there are no KMD restrictions
+ * on the @pat_index. For such mappings there is no actual memory being
+ * mapped (the address in the PTE is invalid), so the various PAT memory
+ * attributes likely do not apply. Simply leaving as zero is one
+ * option (still a valid pat_index).
+ */
+ __u16 pat_index;
+
+ /** @pad: MBZ */
+ __u16 pad;
+
+ union {
+ /**
+ * @obj_offset: Offset into the object, MBZ for CLEAR_RANGE,
+ * ignored for unbind
+ */
+ __u64 obj_offset;
+
+ /** @userptr: user pointer to bind on */
+ __u64 userptr;
+ };
+
+ /**
+ * @range: Number of bytes from the object to bind to addr, MBZ for UNMAP_ALL
+ */
+ __u64 range;
+
+ /** @addr: Address to operate on, MBZ for UNMAP_ALL */
+ __u64 addr;
+
+#define DRM_XE_VM_BIND_OP_MAP 0x0
+#define DRM_XE_VM_BIND_OP_UNMAP 0x1
+#define DRM_XE_VM_BIND_OP_MAP_USERPTR 0x2
+#define DRM_XE_VM_BIND_OP_UNMAP_ALL 0x3
+#define DRM_XE_VM_BIND_OP_PREFETCH 0x4
+ /** @op: Bind operation to perform */
+ __u32 op;
+
+#define DRM_XE_VM_BIND_FLAG_READONLY (1 << 0)
+#define DRM_XE_VM_BIND_FLAG_IMMEDIATE (1 << 1)
+#define DRM_XE_VM_BIND_FLAG_NULL (1 << 2)
+ /** @flags: Bind flags */
+ __u32 flags;
+
+ /**
+ * @prefetch_mem_region_instance: Memory region to prefetch VMA to.
+ * It is a region instance, not a mask.
+ * To be used only with %DRM_XE_VM_BIND_OP_PREFETCH operation.
+ */
+ __u32 prefetch_mem_region_instance;
+
+ /** @pad2: MBZ */
+ __u32 pad2;
+
+ /** @reserved: Reserved */
+ __u64 reserved[3];
+};
+
+/**
+ * struct drm_xe_vm_bind - Input of &DRM_IOCTL_XE_VM_BIND
+ *
+ * Below is an example of a minimal use of @drm_xe_vm_bind to
+ * asynchronously bind the buffer `data` at address `BIND_ADDRESS` to
+ * illustrate `userptr`. It can be synchronized by using the example
+ * provided for @drm_xe_sync.
+ *
+ * .. code-block:: C
+ *
+ * data = aligned_alloc(ALIGNMENT, BO_SIZE);
+ * struct drm_xe_vm_bind bind = {
+ * .vm_id = vm,
+ * .num_binds = 1,
+ * .bind.obj = 0,
+ * .bind.obj_offset = to_user_pointer(data),
+ * .bind.range = BO_SIZE,
+ * .bind.addr = BIND_ADDRESS,
+ * .bind.op = DRM_XE_VM_BIND_OP_MAP_USERPTR,
+ * .bind.flags = 0,
+ * .num_syncs = 1,
+ * .syncs = &sync,
+ * .exec_queue_id = 0,
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_VM_BIND, &bind);
+ *
+ */
+struct drm_xe_vm_bind {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @vm_id: The ID of the VM to bind to */
+ __u32 vm_id;
+
+ /**
+ * @exec_queue_id: exec_queue_id, must be of class DRM_XE_ENGINE_CLASS_VM_BIND
+ * and exec queue must have same vm_id. If zero, the default VM bind engine
+ * is used.
+ */
+ __u32 exec_queue_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @num_binds: number of binds in this IOCTL */
+ __u32 num_binds;
+
+ union {
+ /** @bind: used if num_binds == 1 */
+ struct drm_xe_vm_bind_op bind;
+
+ /**
+ * @vector_of_binds: userptr to array of struct
+ * drm_xe_vm_bind_op if num_binds > 1
+ */
+ __u64 vector_of_binds;
+ };
+
+ /** @pad2: MBZ */
+ __u32 pad2;
+
+ /** @num_syncs: amount of syncs to wait on */
+ __u32 num_syncs;
+
+ /** @syncs: pointer to struct drm_xe_sync array */
+ __u64 syncs;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec_queue_create - Input of &DRM_IOCTL_XE_EXEC_QUEUE_CREATE
+ *
+ * The example below shows how to use @drm_xe_exec_queue_create to create
+ * a simple exec_queue (no parallel submission) of class
+ * &DRM_XE_ENGINE_CLASS_RENDER.
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_engine_class_instance instance = {
+ * .engine_class = DRM_XE_ENGINE_CLASS_RENDER,
+ * };
+ * struct drm_xe_exec_queue_create exec_queue_create = {
+ * .extensions = 0,
+ * .vm_id = vm,
+ * .num_bb_per_exec = 1,
+ * .num_eng_per_bb = 1,
+ * .instances = to_user_pointer(&instance),
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_EXEC_QUEUE_CREATE, &exec_queue_create);
+ *
+ */
+struct drm_xe_exec_queue_create {
+#define DRM_XE_EXEC_QUEUE_EXTENSION_SET_PROPERTY 0
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PRIORITY 0
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_TIMESLICE 1
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PREEMPTION_TIMEOUT 2
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_PERSISTENCE 3
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_JOB_TIMEOUT 4
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_TRIGGER 5
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_NOTIFY 6
+#define DRM_XE_EXEC_QUEUE_SET_PROPERTY_ACC_GRANULARITY 7
+/* Monitor 128KB contiguous region with 4K sub-granularity */
+#define DRM_XE_ACC_GRANULARITY_128K 0
+/* Monitor 2MB contiguous region with 64KB sub-granularity */
+#define DRM_XE_ACC_GRANULARITY_2M 1
+/* Monitor 16MB contiguous region with 512KB sub-granularity */
+#define DRM_XE_ACC_GRANULARITY_16M 2
+/* Monitor 64MB contiguous region with 2M sub-granularity */
+#define DRM_XE_ACC_GRANULARITY_64M 3
+
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @width: submission width (number BB per exec) for this exec queue */
+ __u16 width;
+
+ /** @num_placements: number of valid placements for this exec queue */
+ __u16 num_placements;
+
+ /** @vm_id: VM to use for this exec queue */
+ __u32 vm_id;
+
+ /** @flags: MBZ */
+ __u32 flags;
+
+ /** @exec_queue_id: Returned exec queue ID */
+ __u32 exec_queue_id;
+
+ /**
+ * @instances: user pointer to a 2-d array of struct
+ * drm_xe_engine_class_instance
+ *
+ * length = width (i) * num_placements (j)
+ * index = j + i * width
+ */
+ __u64 instances;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec_queue_destroy - Input of &DRM_IOCTL_XE_EXEC_QUEUE_DESTROY
+ */
+struct drm_xe_exec_queue_destroy {
+ /** @exec_queue_id: Exec queue ID */
+ __u32 exec_queue_id;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec_queue_get_property - Input of &DRM_IOCTL_XE_EXEC_QUEUE_GET_PROPERTY
+ *
+ * The @property can be:
+ * - %DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN
+ */
+struct drm_xe_exec_queue_get_property {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @exec_queue_id: Exec queue ID */
+ __u32 exec_queue_id;
+
+#define DRM_XE_EXEC_QUEUE_GET_PROPERTY_BAN 0
+ /** @property: property to get */
+ __u32 property;
+
+ /** @value: property value */
+ __u64 value;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_sync - sync object
+ *
+ * The @type can be:
+ * - %DRM_XE_SYNC_TYPE_SYNCOBJ
+ * - %DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ
+ * - %DRM_XE_SYNC_TYPE_USER_FENCE
+ *
+ * and the @flags can be:
+ * - %DRM_XE_SYNC_FLAG_SIGNAL
+ *
+ * A minimal use of @drm_xe_sync looks like this:
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_sync sync = {
+ * .flags = DRM_XE_SYNC_FLAG_SIGNAL,
+ * .type = DRM_XE_SYNC_TYPE_SYNCOBJ,
+ * };
+ * struct drm_syncobj_create syncobj_create = { 0 };
+ * ioctl(fd, DRM_IOCTL_SYNCOBJ_CREATE, &syncobj_create);
+ * sync.handle = syncobj_create.handle;
+ * ...
+ * use of &sync in drm_xe_exec or drm_xe_vm_bind
+ * ...
+ * struct drm_syncobj_wait wait = {
+ * .handles = &sync.handle,
+ * .timeout_nsec = INT64_MAX,
+ * .count_handles = 1,
+ * .flags = 0,
+ * .first_signaled = 0,
+ * .pad = 0,
+ * };
+ * ioctl(fd, DRM_IOCTL_SYNCOBJ_WAIT, &wait);
+ */
+struct drm_xe_sync {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+#define DRM_XE_SYNC_TYPE_SYNCOBJ 0x0
+#define DRM_XE_SYNC_TYPE_TIMELINE_SYNCOBJ 0x1
+#define DRM_XE_SYNC_TYPE_USER_FENCE 0x2
+ /** @type: Type of the this sync object */
+ __u32 type;
+
+#define DRM_XE_SYNC_FLAG_SIGNAL (1 << 0)
+ /** @flags: Sync Flags */
+ __u32 flags;
+
+ union {
+ /** @handle: Handle for the object */
+ __u32 handle;
+
+ /**
+ * @addr: Address of user fence. When sync is passed in via exec
+ * IOCTL this is a GPU address in the VM. When sync passed in via
+ * VM bind IOCTL this is a user pointer. In either case, it is
+ * the users responsibility that this address is present and
+ * mapped when the user fence is signalled. Must be qword
+ * aligned.
+ */
+ __u64 addr;
+ };
+
+ /**
+ * @timeline_value: Input for the timeline sync object. Needs to be
+ * different than 0 when used with %DRM_XE_SYNC_FLAG_TIMELINE_SYNCOBJ.
+ */
+ __u64 timeline_value;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_exec - Input of &DRM_IOCTL_XE_EXEC
+ *
+ * This is an example to use @drm_xe_exec for execution of the object
+ * at BIND_ADDRESS (see example in @drm_xe_vm_bind) by an exec_queue
+ * (see example in @drm_xe_exec_queue_create). It can be synchronized
+ * by using the example provided for @drm_xe_sync.
+ *
+ * .. code-block:: C
+ *
+ * struct drm_xe_exec exec = {
+ * .exec_queue_id = exec_queue,
+ * .syncs = &sync,
+ * .num_syncs = 1,
+ * .address = BIND_ADDRESS,
+ * .num_batch_buffer = 1,
+ * };
+ * ioctl(fd, DRM_IOCTL_XE_EXEC, &exec);
+ *
+ */
+struct drm_xe_exec {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /** @exec_queue_id: Exec queue ID for the batch buffer */
+ __u32 exec_queue_id;
+
+ /** @num_syncs: Amount of struct drm_xe_sync in array. */
+ __u32 num_syncs;
+
+ /** @syncs: Pointer to struct drm_xe_sync array. */
+ __u64 syncs;
+
+ /**
+ * @address: address of batch buffer if num_batch_buffer == 1 or an
+ * array of batch buffer addresses
+ */
+ __u64 address;
+
+ /**
+ * @num_batch_buffer: number of batch buffer in this exec, must match
+ * the width of the engine
+ */
+ __u16 num_batch_buffer;
+
+ /** @pad: MBZ */
+ __u16 pad[3];
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+/**
+ * struct drm_xe_wait_user_fence - Input of &DRM_IOCTL_XE_WAIT_USER_FENCE
+ *
+ * Wait on user fence, XE will wake-up on every HW engine interrupt in the
+ * instances list and check if user fence is complete::
+ *
+ * (*addr & MASK) OP (VALUE & MASK)
+ *
+ * Returns to user on user fence completion or timeout.
+ *
+ * The @op can be:
+ * - %DRM_XE_UFENCE_WAIT_OP_EQ
+ * - %DRM_XE_UFENCE_WAIT_OP_NEQ
+ * - %DRM_XE_UFENCE_WAIT_OP_GT
+ * - %DRM_XE_UFENCE_WAIT_OP_GTE
+ * - %DRM_XE_UFENCE_WAIT_OP_LT
+ * - %DRM_XE_UFENCE_WAIT_OP_LTE
+ *
+ * and the @flags can be:
+ * - %DRM_XE_UFENCE_WAIT_FLAG_ABSTIME
+ * - %DRM_XE_UFENCE_WAIT_FLAG_SOFT_OP
+ *
+ * The @mask values can be for example:
+ * - 0xffu for u8
+ * - 0xffffu for u16
+ * - 0xffffffffu for u32
+ * - 0xffffffffffffffffu for u64
+ */
+struct drm_xe_wait_user_fence {
+ /** @extensions: Pointer to the first extension struct, if any */
+ __u64 extensions;
+
+ /**
+ * @addr: user pointer address to wait on, must qword aligned
+ */
+ __u64 addr;
+
+#define DRM_XE_UFENCE_WAIT_OP_EQ 0x0
+#define DRM_XE_UFENCE_WAIT_OP_NEQ 0x1
+#define DRM_XE_UFENCE_WAIT_OP_GT 0x2
+#define DRM_XE_UFENCE_WAIT_OP_GTE 0x3
+#define DRM_XE_UFENCE_WAIT_OP_LT 0x4
+#define DRM_XE_UFENCE_WAIT_OP_LTE 0x5
+ /** @op: wait operation (type of comparison) */
+ __u16 op;
+
+#define DRM_XE_UFENCE_WAIT_FLAG_ABSTIME (1 << 0)
+ /** @flags: wait flags */
+ __u16 flags;
+
+ /** @pad: MBZ */
+ __u32 pad;
+
+ /** @value: compare value */
+ __u64 value;
+
+ /** @mask: comparison mask */
+ __u64 mask;
+
+ /**
+ * @timeout: how long to wait before bailing, value in nanoseconds.
+ * Without DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flag set (relative timeout)
+ * it contains timeout expressed in nanoseconds to wait (fence will
+ * expire at now() + timeout).
+ * When DRM_XE_UFENCE_WAIT_FLAG_ABSTIME flat is set (absolute timeout) wait
+ * will end at timeout (uses system MONOTONIC_CLOCK).
+ * Passing negative timeout leads to neverending wait.
+ *
+ * On relative timeout this value is updated with timeout left
+ * (for restarting the call in case of signal delivery).
+ * On absolute timeout this value stays intact (restarted call still
+ * expire at the same point of time).
+ */
+ __s64 timeout;
+
+ /** @exec_queue_id: exec_queue_id returned from xe_exec_queue_create_ioctl */
+ __u32 exec_queue_id;
+
+ /** @pad2: MBZ */
+ __u32 pad2;
+
+ /** @reserved: Reserved */
+ __u64 reserved[2];
+};
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* _UAPI_XE_DRM_H_ */
diff --git a/include/uapi/linux/sync_file.h b/include/uapi/linux/sync_file.h
index ff0a931833e2..ff1f38889dcf 100644
--- a/include/uapi/linux/sync_file.h
+++ b/include/uapi/linux/sync_file.h
@@ -76,6 +76,27 @@ struct sync_file_info {
__u64 sync_fence_info;
};
+/**
+ * struct sync_set_deadline - SYNC_IOC_SET_DEADLINE - set a deadline hint on a fence
+ * @deadline_ns: absolute time of the deadline
+ * @pad: must be zero
+ *
+ * Allows userspace to set a deadline on a fence, see &dma_fence_set_deadline
+ *
+ * The timebase for the deadline is CLOCK_MONOTONIC (same as vblank). For
+ * example
+ *
+ * clock_gettime(CLOCK_MONOTONIC, &t);
+ * deadline_ns = (t.tv_sec * 1000000000L) + t.tv_nsec + ns_until_deadline
+ */
+struct sync_set_deadline {
+ __u64 deadline_ns;
+ /* Not strictly needed for alignment but gives some possibility
+ * for future extension:
+ */
+ __u64 pad;
+};
+
#define SYNC_IOC_MAGIC '>'
/*
@@ -87,5 +108,6 @@ struct sync_file_info {
#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
#define SYNC_IOC_FILE_INFO _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info)
+#define SYNC_IOC_SET_DEADLINE _IOW(SYNC_IOC_MAGIC, 5, struct sync_set_deadline)
#endif /* _UAPI_LINUX_SYNC_H */