summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-08-30 13:34:34 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-08-30 13:34:34 -0700
commit461f35f014466c4e26dca6be0f431f57297df3f2 (patch)
tree0bd2fded69ba0752ca16c304d3e1880d5f1eb30b /include
parent53ea7f624fb91074c2f9458832ed74975ee5d64c (diff)
parent3698a75f5a98d0a6599e2878ab25d30a82dd836a (diff)
Merge tag 'drm-next-2023-08-30' of git://anongit.freedesktop.org/drm/drm
Pull drm updates from Dave Airlie: "The drm core grew a new generic gpu virtual address manager, and new execution locking helpers. These are used by nouveau now to provide uAPI support for the userspace Vulkan driver. AMD had a bunch of new IP core support, loads of refactoring around fbdev, but mostly just the usual amount of stuff across the board. core: - fix gfp flags in drmm_kmalloc gpuva: - add new generic GPU VA manager (for nouveau initially) syncobj: - add new DRM_IOCTL_SYNCOBJ_EVENTFD ioctl dma-buf: - acquire resv lock for mmap() in exporters - support dma-buf self import automatically - docs fixes backlight: - fix fbdev interactions atomic: - improve logging prime: - remove struct gem_prim_mmap plus driver updates gem: - drm_exec: add locking over multiple GEM objects - fix lockdep checking fbdev: - make fbdev userspace interfaces optional - use linux device instead of fbdev device - use deferred i/o helper macros in various drivers - Make FB core selectable without drivers - Remove obsolete flags FBINFO_DEFAULT and FBINFO_FLAG_DEFAULT - Add helper macros and Kconfig tokens for DMA-allocated framebuffer ttm: - support init_on_free - swapout fixes panel: - panel-edp: Support AUO B116XAB01.4 - Support Visionox R66451 plus DT bindings - ld9040: - Backlight support - magic improved - Kconfig fix - Convert to of_device_get_match_data() - Fix Kconfig dependencies - simple: - Set bpc value to fix warning - Set connector type for AUO T215HVN01 - Support Innolux G156HCE-L01 plus DT bindings - ili9881: Support TDO TL050HDV35 LCD panel plus DT bindings - startek: Support KD070FHFID015 MIPI-DSI panel plus DT bindings - sitronix-st7789v: - Support Inanbo T28CP45TN89 plus DT bindings - Support EDT ET028013DMA plus DT bindings - Various cleanups - edp: Add timings for N140HCA-EAC - Allow panels and touchscreens to power sequence together - Fix Innolux G156HCE-L01 LVDS clock bridge: - debugfs for chains support - dw-hdmi: - Improve support for YUV420 bus format - CEC suspend/resume - update EDID on HDMI detect - dw-mipi-dsi: Fix enable/disable of DSI controller - lt9611uxc: Use MODULE_FIRMWARE() - ps8640: Remove broken EDID code - samsung-dsim: Fix command transfer - tc358764: - Handle HS/VS polarity - Use BIT() macro - Various cleanups - adv7511: Fix low refresh rate - anx7625: - Switch to macros instead of hardcoded values - locking fixes - tc358767: fix hardware delays - sitronix-st7789v: - Support panel orientation - Support rotation property - Add support for Jasonic JT240MHQS-HWT-EK-E3 plus DT bindings amdgpu: - SDMA 6.1.0 support - HDP 6.1 support - SMUIO 14.0 support - PSP 14.0 support - IH 6.1 support - Lots of checkpatch cleanups - GFX 9.4.3 updates - Add USB PD and IFWI flashing documentation - GPUVM updates - RAS fixes - DRR fixes - FAMS fixes - Virtual display fixes - Soft IH fixes - SMU13 fixes - Rework PSP firmware loading for other IPs - Kernel doc fixes - DCN 3.0.1 fixes - LTTPR fixes - DP MST fixes - DCN 3.1.6 fixes - SMU 13.x fixes - PSP 13.x fixes - SubVP fixes - GC 9.4.3 fixes - Display bandwidth calculation fixes - VCN4 secure submission fixes - Allow building DC on RISC-V - Add visible FB info to bo_print_info - HBR3 fixes - GFX9 MCBP fix - GMC10 vmhub index fix - GMC11 vmhub index fix - Create a new doorbell manager - SR-IOV fixes - initial freesync panel replay support - revert zpos properly until igt regression is fixeed - use TTM to manage doorbell BAR - Expose both current and average power via hwmon if supported amdkfd: - Cleanup CRIU dma-buf handling - Use KIQ to unmap HIQ - GFX 9.4.3 debugger updates - GFX 9.4.2 debugger fixes - Enable cooperative groups fof gfx11 - SVM fixes - Convert older APUs to use dGPU path like newer APUs - Drop IOMMUv2 path as it is no longer used - TBA fix for aldebaran i915: - ICL+ DSI modeset sequence - HDCP improvements - MTL display fixes and cleanups - HSW/BDW PSR1 restored - Init DDI ports in VBT order - General display refactors - Start using plane scale factor for relative data rate - Use shmem for dpt objects - Expose RPS thresholds in sysfs - Apply GuC SLPC min frequency softlimit correctly - Extend Wa_14015795083 to TGL, RKL, DG1 and ADL - Fix a VMA UAF for multi-gt platform - Do not use stolen on MTL due to HW bug - Check HuC and GuC version compatibility on MTL - avoid infinite GPU waits due to premature release of request memory - Fixes and updates for GSC memory allocation - Display SDVO fixes - Take stolen handling out of FBC code - Make i915_coherent_map_type GT-centric - Simplify shmem_create_from_object map_type msm: - SM6125 MDSS support - DPU: SM6125 DPU support - DSI: runtime PM support, burst mode support - DSI PHY: SM6125 support in 14nm DSI PHY driver - GPU: prepare for a7xx - fix a690 firmware - disable relocs on a6xx and newer radeon: - Lots of checkpatch cleanups ast: - improve device-model detection - Represent BMV as virtual connector - Report DP connection status nouveau: - add new exec/bind interface to support Vulkan - document some getparam ioctls - improve VRAM detection - various fixes/cleanups - workraound DPCD issues ivpu: - MMU updates - debugfs support - Support vpu4 virtio: - add sync object support atmel-hlcdc: - Support inverted pixclock polarity etnaviv: - runtime PM cleanups - hang handling fixes exynos: - use fbdev DMA helpers - fix possible NULL ptr dereference komeda: - always attach encoder omapdrm: - use fbdev DMA helpers ingenic: - kconfig regmap fixes loongson: - support display controller mediatek: - Small mtk-dpi cleanups - DisplayPort: support eDP and aux-bus - Fix coverity issues - Fix potential memory leak if vmap() fail mgag200: - minor fixes mxsfb: - support disabling overlay planes panfrost: - fix sync in IRQ handling ssd130x: - Support per-controller default resolution plus DT bindings - Reduce memory-allocation overhead - Improve intermediate buffer size computation - Fix allocation of temporary buffers - Fix pitch computation - Fix shadow plane allocation tegra: - use fbdev DMA helpers - Convert to devm_platform_ioremap_resource() - support bridge/connector - enable PM tidss: - Support TI AM625 plus DT bindings - Implement new connector model plus driver updates vkms: - improve write back support - docs fixes - support gamma LUT zynqmp-dpsub: - misc fixes" * tag 'drm-next-2023-08-30' of git://anongit.freedesktop.org/drm/drm: (1327 commits) drm/gpuva_mgr: remove unused prev pointer in __drm_gpuva_sm_map() drm/tests/drm_kunit_helpers: Place correct function name in the comment header drm/nouveau: uapi: don't pass NO_PREFETCH flag implicitly drm/nouveau: uvmm: fix unset region pointer on remap drm/nouveau: sched: avoid job races between entities drm/i915: Fix HPD polling, reenabling the output poll work as needed drm: Add an HPD poll helper to reschedule the poll work drm/i915: Fix TLB-Invalidation seqno store drm/ttm/tests: Fix type conversion in ttm_pool_test drm/msm/a6xx: Bail out early if setting GPU OOB fails drm/msm/a6xx: Move LLC accessors to the common header drm/msm/a6xx: Introduce a6xx_llc_read drm/ttm/tests: Require MMU when testing drm/panel: simple: Fix Innolux G156HCE-L01 LVDS clock Revert "Revert "drm/amdgpu/display: change pipe policy for DCN 2.0"" drm/amdgpu: Add memory vendor information drm/amd: flush any delayed gfxoff on suspend entry drm/amdgpu: skip fence GFX interrupts disable/enable for S0ix drm/amdgpu: Remove gfxoff check in GFX v9.4.3 drm/amd/pm: Update pci link speed for smu v13.0.6 ...
Diffstat (limited to 'include')
-rw-r--r--include/drm/bridge/dw_hdmi.h2
-rw-r--r--include/drm/drm_bridge.h3
-rw-r--r--include/drm/drm_crtc.h5
-rw-r--r--include/drm/drm_debugfs.h25
-rw-r--r--include/drm/drm_drv.h32
-rw-r--r--include/drm/drm_exec.h123
-rw-r--r--include/drm/drm_file.h8
-rw-r--r--include/drm/drm_gem.h82
-rw-r--r--include/drm/drm_gem_dma_helper.h14
-rw-r--r--include/drm/drm_gem_shmem_helper.h19
-rw-r--r--include/drm/drm_gem_vram_helper.h9
-rw-r--r--include/drm/drm_gpuva_mgr.h706
-rw-r--r--include/drm/drm_kunit_helpers.h7
-rw-r--r--include/drm/drm_modeset_helper_vtables.h48
-rw-r--r--include/drm/drm_panel.h96
-rw-r--r--include/drm/drm_plane.h2
-rw-r--r--include/drm/drm_prime.h7
-rw-r--r--include/drm/drm_syncobj.h6
-rw-r--r--include/drm/drm_sysfs.h4
-rw-r--r--include/drm/task_barrier.h4
-rw-r--r--include/drm/ttm/ttm_bo.h2
-rw-r--r--include/linux/efi.h3
-rw-r--r--include/linux/fb.h58
-rw-r--r--include/linux/platform_data/bd6107.h2
-rw-r--r--include/linux/platform_data/gpio_backlight.h2
-rw-r--r--include/linux/platform_data/lv5207lp.h2
-rw-r--r--include/linux/sysfb.h3
-rw-r--r--include/uapi/drm/amdgpu_drm.h7
-rw-r--r--include/uapi/drm/drm.h84
-rw-r--r--include/uapi/drm/drm_mode.h7
-rw-r--r--include/uapi/drm/ivpu_accel.h9
-rw-r--r--include/uapi/drm/nouveau_drm.h271
-rw-r--r--include/uapi/drm/virtgpu_drm.h16
33 files changed, 1499 insertions, 169 deletions
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index f668e75fbabe..6a46baa0737c 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -206,4 +206,6 @@ void dw_hdmi_phy_update_hpd(struct dw_hdmi *hdmi, void *data,
bool force, bool disabled, bool rxsense);
void dw_hdmi_phy_setup_hpd(struct dw_hdmi *hdmi, void *data);
+bool dw_hdmi_bus_fmt_is_420(struct dw_hdmi *hdmi);
+
#endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index bf964cdfb330..c339fc85fd07 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -36,6 +36,7 @@ struct drm_bridge;
struct drm_bridge_timings;
struct drm_connector;
struct drm_display_info;
+struct drm_minor;
struct drm_panel;
struct edid;
struct i2c_adapter;
@@ -949,4 +950,6 @@ static inline struct drm_bridge *drmm_of_get_bridge(struct drm_device *drm,
}
#endif
+void drm_bridge_debugfs_init(struct drm_minor *minor);
+
#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 8e1cbc75143e..8b48a1974da3 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -77,11 +77,6 @@ struct drm_plane_helper_funcs;
* intended to indicate whether a full modeset is needed, rather than strictly
* describing what has changed in a commit. See also:
* drm_atomic_crtc_needs_modeset()
- *
- * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or
- * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control
- * state like @plane_mask so drivers not converted over to atomic helpers should
- * not rely on these being accurate!
*/
struct drm_crtc_state {
/** @crtc: backpointer to the CRTC */
diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h
index 7616f457ce70..cb2c1956a214 100644
--- a/include/drm/drm_debugfs.h
+++ b/include/drm/drm_debugfs.h
@@ -34,6 +34,22 @@
#include <linux/types.h>
#include <linux/seq_file.h>
+
+#include <drm/drm_gpuva_mgr.h>
+
+/**
+ * DRM_DEBUGFS_GPUVA_INFO - &drm_info_list entry to dump a GPU VA space
+ * @show: the &drm_info_list's show callback
+ * @data: driver private data
+ *
+ * Drivers should use this macro to define a &drm_info_list entry to provide a
+ * debugfs file for dumping the GPU VA space regions and mappings.
+ *
+ * For each DRM GPU VA space drivers should call drm_debugfs_gpuva_info() from
+ * their @show callback.
+ */
+#define DRM_DEBUGFS_GPUVA_INFO(show, data) {"gpuvas", show, DRIVER_GEM_GPUVA, data}
+
/**
* struct drm_info_list - debugfs info list entry
*
@@ -134,6 +150,9 @@ void drm_debugfs_add_file(struct drm_device *dev, const char *name,
void drm_debugfs_add_files(struct drm_device *dev,
const struct drm_debugfs_info *files, int count);
+
+int drm_debugfs_gpuva_info(struct seq_file *m,
+ struct drm_gpuva_manager *mgr);
#else
static inline void drm_debugfs_create_files(const struct drm_info_list *files,
int count, struct dentry *root,
@@ -155,6 +174,12 @@ static inline void drm_debugfs_add_files(struct drm_device *dev,
const struct drm_debugfs_info *files,
int count)
{}
+
+static inline int drm_debugfs_gpuva_info(struct seq_file *m,
+ struct drm_gpuva_manager *mgr)
+{
+ return 0;
+}
#endif
#endif /* _DRM_DEBUGFS_H_ */
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 89e2706cac56..9813fa759b75 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -104,6 +104,12 @@ enum drm_driver_feature {
* acceleration should be handled by two drivers that are connected using auxiliary bus.
*/
DRIVER_COMPUTE_ACCEL = BIT(7),
+ /**
+ * @DRIVER_GEM_GPUVA:
+ *
+ * Driver supports user defined GPU VA bindings for GEM objects.
+ */
+ DRIVER_GEM_GPUVA = BIT(8),
/* IMPORTANT: Below are all the legacy flags, add new ones above. */
@@ -304,22 +310,14 @@ struct drm_driver {
/**
* @prime_handle_to_fd:
*
- * Main PRIME export function. Should be implemented with
- * drm_gem_prime_handle_to_fd() for GEM based drivers.
- *
- * For an in-depth discussion see :ref:`PRIME buffer sharing
- * documentation <prime_buffer_sharing>`.
+ * PRIME export function. Only used by vmwgfx.
*/
int (*prime_handle_to_fd)(struct drm_device *dev, struct drm_file *file_priv,
uint32_t handle, uint32_t flags, int *prime_fd);
/**
* @prime_fd_to_handle:
*
- * Main PRIME import function. Should be implemented with
- * drm_gem_prime_fd_to_handle() for GEM based drivers.
- *
- * For an in-depth discussion see :ref:`PRIME buffer sharing
- * documentation <prime_buffer_sharing>`.
+ * PRIME import function. Only used by vmwgfx.
*/
int (*prime_fd_to_handle)(struct drm_device *dev, struct drm_file *file_priv,
int prime_fd, uint32_t *handle);
@@ -343,20 +341,6 @@ struct drm_driver {
struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
- /**
- * @gem_prime_mmap:
- *
- * mmap hook for GEM drivers, used to implement dma-buf mmap in the
- * PRIME helpers.
- *
- * This hook only exists for historical reasons. Drivers must use
- * drm_gem_prime_mmap() to implement it.
- *
- * FIXME: Convert all drivers to implement mmap in struct
- * &drm_gem_object_funcs and inline drm_gem_prime_mmap() into
- * its callers. This hook should be removed afterwards.
- */
- int (*gem_prime_mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
/**
* @dumb_create:
diff --git a/include/drm/drm_exec.h b/include/drm/drm_exec.h
new file mode 100644
index 000000000000..e0462361adf9
--- /dev/null
+++ b/include/drm/drm_exec.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#ifndef __DRM_EXEC_H__
+#define __DRM_EXEC_H__
+
+#include <linux/compiler.h>
+#include <linux/ww_mutex.h>
+
+#define DRM_EXEC_INTERRUPTIBLE_WAIT BIT(0)
+#define DRM_EXEC_IGNORE_DUPLICATES BIT(1)
+
+struct drm_gem_object;
+
+/**
+ * struct drm_exec - Execution context
+ */
+struct drm_exec {
+ /**
+ * @flags: Flags to control locking behavior
+ */
+ uint32_t flags;
+
+ /**
+ * @ticket: WW ticket used for acquiring locks
+ */
+ struct ww_acquire_ctx ticket;
+
+ /**
+ * @num_objects: number of objects locked
+ */
+ unsigned int num_objects;
+
+ /**
+ * @max_objects: maximum objects in array
+ */
+ unsigned int max_objects;
+
+ /**
+ * @objects: array of the locked objects
+ */
+ struct drm_gem_object **objects;
+
+ /**
+ * @contended: contended GEM object we backed off for
+ */
+ struct drm_gem_object *contended;
+
+ /**
+ * @prelocked: already locked GEM object due to contention
+ */
+ struct drm_gem_object *prelocked;
+};
+
+/**
+ * drm_exec_for_each_locked_object - iterate over all the locked objects
+ * @exec: drm_exec object
+ * @index: unsigned long index for the iteration
+ * @obj: the current GEM object
+ *
+ * Iterate over all the locked GEM objects inside the drm_exec object.
+ */
+#define drm_exec_for_each_locked_object(exec, index, obj) \
+ for (index = 0, obj = (exec)->objects[0]; \
+ index < (exec)->num_objects; \
+ ++index, obj = (exec)->objects[index])
+
+/**
+ * drm_exec_until_all_locked - loop until all GEM objects are locked
+ * @exec: drm_exec object
+ *
+ * Core functionality of the drm_exec object. Loops until all GEM objects are
+ * locked and no more contention exists. At the beginning of the loop it is
+ * guaranteed that no GEM object is locked.
+ *
+ * Since labels can't be defined local to the loops body we use a jump pointer
+ * to make sure that the retry is only used from within the loops body.
+ */
+#define drm_exec_until_all_locked(exec) \
+__PASTE(__drm_exec_, __LINE__): \
+ for (void *__drm_exec_retry_ptr; ({ \
+ __drm_exec_retry_ptr = &&__PASTE(__drm_exec_, __LINE__);\
+ (void)__drm_exec_retry_ptr; \
+ drm_exec_cleanup(exec); \
+ });)
+
+/**
+ * drm_exec_retry_on_contention - restart the loop to grap all locks
+ * @exec: drm_exec object
+ *
+ * Control flow helper to continue when a contention was detected and we need to
+ * clean up and re-start the loop to prepare all GEM objects.
+ */
+#define drm_exec_retry_on_contention(exec) \
+ do { \
+ if (unlikely(drm_exec_is_contended(exec))) \
+ goto *__drm_exec_retry_ptr; \
+ } while (0)
+
+/**
+ * drm_exec_is_contended - check for contention
+ * @exec: drm_exec object
+ *
+ * Returns true if the drm_exec object has run into some contention while
+ * locking a GEM object and needs to clean up.
+ */
+static inline bool drm_exec_is_contended(struct drm_exec *exec)
+{
+ return !!exec->contended;
+}
+
+void drm_exec_init(struct drm_exec *exec, uint32_t flags);
+void drm_exec_fini(struct drm_exec *exec);
+bool drm_exec_cleanup(struct drm_exec *exec);
+int drm_exec_lock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
+void drm_exec_unlock_obj(struct drm_exec *exec, struct drm_gem_object *obj);
+int drm_exec_prepare_obj(struct drm_exec *exec, struct drm_gem_object *obj,
+ unsigned int num_fences);
+int drm_exec_prepare_array(struct drm_exec *exec,
+ struct drm_gem_object **objects,
+ unsigned int num_objects,
+ unsigned int num_fences);
+
+#endif
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 966912053cb0..010239392adf 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -50,16 +50,16 @@ struct file;
* header include loops we need it here for now.
*/
-/* Note that the order of this enum is ABI (it determines
+/* Note that the values of this enum are ABI (it determines
* /dev/dri/renderD* numbers).
*
* Setting DRM_MINOR_ACCEL to 32 gives enough space for more drm minors to
* be implemented before we hit any future
*/
enum drm_minor_type {
- DRM_MINOR_PRIMARY,
- DRM_MINOR_CONTROL,
- DRM_MINOR_RENDER,
+ DRM_MINOR_PRIMARY = 0,
+ DRM_MINOR_CONTROL = 1,
+ DRM_MINOR_RENDER = 2,
DRM_MINOR_ACCEL = 32,
};
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index bbc721870c13..bc9f6aa2f3fe 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -36,6 +36,8 @@
#include <linux/kref.h>
#include <linux/dma-resv.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
#include <drm/drm_vma_manager.h>
@@ -380,6 +382,22 @@ struct drm_gem_object {
struct dma_resv _resv;
/**
+ * @gpuva:
+ *
+ * Provides the list of GPU VAs attached to this GEM object.
+ *
+ * Drivers should lock list accesses with the GEMs &dma_resv lock
+ * (&drm_gem_object.resv) or a custom lock if one is provided.
+ */
+ struct {
+ struct list_head list;
+
+#ifdef CONFIG_LOCKDEP
+ struct lockdep_map *lock_dep_map;
+#endif
+ } gpuva;
+
+ /**
* @funcs:
*
* Optional GEM object functions. If this is set, it will be used instead of the
@@ -526,4 +544,68 @@ unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru,
int drm_gem_evict(struct drm_gem_object *obj);
+#ifdef CONFIG_LOCKDEP
+/**
+ * drm_gem_gpuva_set_lock() - Set the lock protecting accesses to the gpuva list.
+ * @obj: the &drm_gem_object
+ * @lock: the lock used to protect the gpuva list. The locking primitive
+ * must contain a dep_map field.
+ *
+ * Call this if you're not proctecting access to the gpuva list with the
+ * dma-resv lock, but with a custom lock.
+ */
+#define drm_gem_gpuva_set_lock(obj, lock) \
+ if (!WARN((obj)->gpuva.lock_dep_map, \
+ "GEM GPUVA lock should be set only once.")) \
+ (obj)->gpuva.lock_dep_map = &(lock)->dep_map
+#define drm_gem_gpuva_assert_lock_held(obj) \
+ lockdep_assert((obj)->gpuva.lock_dep_map ? \
+ lock_is_held((obj)->gpuva.lock_dep_map) : \
+ dma_resv_held((obj)->resv))
+#else
+#define drm_gem_gpuva_set_lock(obj, lock) do {} while (0)
+#define drm_gem_gpuva_assert_lock_held(obj) do {} while (0)
+#endif
+
+/**
+ * drm_gem_gpuva_init() - initialize the gpuva list of a GEM object
+ * @obj: the &drm_gem_object
+ *
+ * This initializes the &drm_gem_object's &drm_gpuva list.
+ *
+ * Calling this function is only necessary for drivers intending to support the
+ * &drm_driver_feature DRIVER_GEM_GPUVA.
+ *
+ * See also drm_gem_gpuva_set_lock().
+ */
+static inline void drm_gem_gpuva_init(struct drm_gem_object *obj)
+{
+ INIT_LIST_HEAD(&obj->gpuva.list);
+}
+
+/**
+ * drm_gem_for_each_gpuva() - iternator to walk over a list of gpuvas
+ * @entry__: &drm_gpuva structure to assign to in each iteration step
+ * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the
+ * &drm_gpuva_manager.
+ */
+#define drm_gem_for_each_gpuva(entry__, obj__) \
+ list_for_each_entry(entry__, &(obj__)->gpuva.list, gem.entry)
+
+/**
+ * drm_gem_for_each_gpuva_safe() - iternator to safely walk over a list of
+ * gpuvas
+ * @entry__: &drm_gpuva structure to assign to in each iteration step
+ * @next__: &next &drm_gpuva to store the next step
+ * @obj__: the &drm_gem_object the &drm_gpuvas to walk are associated with
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the
+ * &drm_gem_object. It is implemented with list_for_each_entry_safe(), hence
+ * it is save against removal of elements.
+ */
+#define drm_gem_for_each_gpuva_safe(entry__, next__, obj__) \
+ list_for_each_entry_safe(entry__, next__, &(obj__)->gpuva.list, gem.entry)
+
#endif /* __DRM_GEM_H__ */
diff --git a/include/drm/drm_gem_dma_helper.h b/include/drm/drm_gem_dma_helper.h
index 8a043235dad8..a827bde494f6 100644
--- a/include/drm/drm_gem_dma_helper.h
+++ b/include/drm/drm_gem_dma_helper.h
@@ -166,11 +166,8 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
* DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE() instead.
*/
#define DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE(dumb_create_func) \
- .dumb_create = (dumb_create_func), \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table, \
- .gem_prime_mmap = drm_gem_prime_mmap
+ .dumb_create = (dumb_create_func), \
+ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table
/**
* DRM_GEM_DMA_DRIVER_OPS - DMA GEM driver operations
@@ -204,11 +201,8 @@ drm_gem_dma_prime_import_sg_table(struct drm_device *dev,
* DRM_GEM_DMA_DRIVER_OPS_WITH_DUMB_CREATE() instead.
*/
#define DRM_GEM_DMA_DRIVER_OPS_VMAP_WITH_DUMB_CREATE(dumb_create_func) \
- .dumb_create = dumb_create_func, \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
- .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap, \
- .gem_prime_mmap = drm_gem_prime_mmap
+ .dumb_create = (dumb_create_func), \
+ .gem_prime_import_sg_table = drm_gem_dma_prime_import_sg_table_vmap
/**
* DRM_GEM_DMA_DRIVER_OPS_VMAP - DMA GEM driver operations ensuring a virtual
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 5994fed5e327..bf0c31aa8fbe 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -27,11 +27,6 @@ struct drm_gem_shmem_object {
struct drm_gem_object base;
/**
- * @pages_lock: Protects the page table and use count
- */
- struct mutex pages_lock;
-
- /**
* @pages: Page table
*/
struct page **pages;
@@ -66,11 +61,6 @@ struct drm_gem_shmem_object {
struct sg_table *sgt;
/**
- * @vmap_lock: Protects the vmap address and use count
- */
- struct mutex vmap_lock;
-
- /**
* @vaddr: Kernel virtual address of the backing memory
*/
void *vaddr;
@@ -109,7 +99,6 @@ struct drm_gem_shmem_object {
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem);
-int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem);
int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem);
void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem);
@@ -128,8 +117,7 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem
!shmem->base.dma_buf && !shmem->base.import_attach;
}
-void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem);
-bool drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
+void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem);
struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem);
@@ -290,10 +278,7 @@ int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
* the &drm_driver structure.
*/
#define DRM_GEM_SHMEM_DRIVER_OPS \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle, \
.gem_prime_import_sg_table = drm_gem_shmem_prime_import_sg_table, \
- .gem_prime_mmap = drm_gem_prime_mmap, \
- .dumb_create = drm_gem_shmem_dumb_create
+ .dumb_create = drm_gem_shmem_dumb_create
#endif /* __DRM_GEM_SHMEM_HELPER_H__ */
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index f4aab64411d8..e18429f09e53 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -157,12 +157,9 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb(
* &struct drm_driver with default functions.
*/
#define DRM_GEM_VRAM_DRIVER \
- .debugfs_init = drm_vram_mm_debugfs_init, \
- .dumb_create = drm_gem_vram_driver_dumb_create, \
- .dumb_map_offset = drm_gem_ttm_dumb_map_offset, \
- .gem_prime_mmap = drm_gem_prime_mmap, \
- .prime_handle_to_fd = drm_gem_prime_handle_to_fd, \
- .prime_fd_to_handle = drm_gem_prime_fd_to_handle
+ .debugfs_init = drm_vram_mm_debugfs_init, \
+ .dumb_create = drm_gem_vram_driver_dumb_create, \
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset
/*
* VRAM memory manager
diff --git a/include/drm/drm_gpuva_mgr.h b/include/drm/drm_gpuva_mgr.h
new file mode 100644
index 000000000000..ed8d50200cc3
--- /dev/null
+++ b/include/drm/drm_gpuva_mgr.h
@@ -0,0 +1,706 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __DRM_GPUVA_MGR_H__
+#define __DRM_GPUVA_MGR_H__
+
+/*
+ * Copyright (c) 2022 Red Hat.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <linux/list.h>
+#include <linux/rbtree.h>
+#include <linux/types.h>
+
+#include <drm/drm_gem.h>
+
+struct drm_gpuva_manager;
+struct drm_gpuva_fn_ops;
+
+/**
+ * enum drm_gpuva_flags - flags for struct drm_gpuva
+ */
+enum drm_gpuva_flags {
+ /**
+ * @DRM_GPUVA_INVALIDATED:
+ *
+ * Flag indicating that the &drm_gpuva's backing GEM is invalidated.
+ */
+ DRM_GPUVA_INVALIDATED = (1 << 0),
+
+ /**
+ * @DRM_GPUVA_SPARSE:
+ *
+ * Flag indicating that the &drm_gpuva is a sparse mapping.
+ */
+ DRM_GPUVA_SPARSE = (1 << 1),
+
+ /**
+ * @DRM_GPUVA_USERBITS: user defined bits
+ */
+ DRM_GPUVA_USERBITS = (1 << 2),
+};
+
+/**
+ * struct drm_gpuva - structure to track a GPU VA mapping
+ *
+ * This structure represents a GPU VA mapping and is associated with a
+ * &drm_gpuva_manager.
+ *
+ * Typically, this structure is embedded in bigger driver structures.
+ */
+struct drm_gpuva {
+ /**
+ * @mgr: the &drm_gpuva_manager this object is associated with
+ */
+ struct drm_gpuva_manager *mgr;
+
+ /**
+ * @flags: the &drm_gpuva_flags for this mapping
+ */
+ enum drm_gpuva_flags flags;
+
+ /**
+ * @va: structure containing the address and range of the &drm_gpuva
+ */
+ struct {
+ /**
+ * @addr: the start address
+ */
+ u64 addr;
+
+ /*
+ * @range: the range
+ */
+ u64 range;
+ } va;
+
+ /**
+ * @gem: structure containing the &drm_gem_object and it's offset
+ */
+ struct {
+ /**
+ * @offset: the offset within the &drm_gem_object
+ */
+ u64 offset;
+
+ /**
+ * @obj: the mapped &drm_gem_object
+ */
+ struct drm_gem_object *obj;
+
+ /**
+ * @entry: the &list_head to attach this object to a &drm_gem_object
+ */
+ struct list_head entry;
+ } gem;
+
+ /**
+ * @rb: structure containing data to store &drm_gpuvas in a rb-tree
+ */
+ struct {
+ /**
+ * @rb: the rb-tree node
+ */
+ struct rb_node node;
+
+ /**
+ * @entry: The &list_head to additionally connect &drm_gpuvas
+ * in the same order they appear in the interval tree. This is
+ * useful to keep iterating &drm_gpuvas from a start node found
+ * through the rb-tree while doing modifications on the rb-tree
+ * itself.
+ */
+ struct list_head entry;
+
+ /**
+ * @__subtree_last: needed by the interval tree, holding last-in-subtree
+ */
+ u64 __subtree_last;
+ } rb;
+};
+
+int drm_gpuva_insert(struct drm_gpuva_manager *mgr, struct drm_gpuva *va);
+void drm_gpuva_remove(struct drm_gpuva *va);
+
+void drm_gpuva_link(struct drm_gpuva *va);
+void drm_gpuva_unlink(struct drm_gpuva *va);
+
+struct drm_gpuva *drm_gpuva_find(struct drm_gpuva_manager *mgr,
+ u64 addr, u64 range);
+struct drm_gpuva *drm_gpuva_find_first(struct drm_gpuva_manager *mgr,
+ u64 addr, u64 range);
+struct drm_gpuva *drm_gpuva_find_prev(struct drm_gpuva_manager *mgr, u64 start);
+struct drm_gpuva *drm_gpuva_find_next(struct drm_gpuva_manager *mgr, u64 end);
+
+bool drm_gpuva_interval_empty(struct drm_gpuva_manager *mgr, u64 addr, u64 range);
+
+static inline void drm_gpuva_init(struct drm_gpuva *va, u64 addr, u64 range,
+ struct drm_gem_object *obj, u64 offset)
+{
+ va->va.addr = addr;
+ va->va.range = range;
+ va->gem.obj = obj;
+ va->gem.offset = offset;
+}
+
+/**
+ * drm_gpuva_invalidate() - sets whether the backing GEM of this &drm_gpuva is
+ * invalidated
+ * @va: the &drm_gpuva to set the invalidate flag for
+ * @invalidate: indicates whether the &drm_gpuva is invalidated
+ */
+static inline void drm_gpuva_invalidate(struct drm_gpuva *va, bool invalidate)
+{
+ if (invalidate)
+ va->flags |= DRM_GPUVA_INVALIDATED;
+ else
+ va->flags &= ~DRM_GPUVA_INVALIDATED;
+}
+
+/**
+ * drm_gpuva_invalidated() - indicates whether the backing BO of this &drm_gpuva
+ * is invalidated
+ * @va: the &drm_gpuva to check
+ */
+static inline bool drm_gpuva_invalidated(struct drm_gpuva *va)
+{
+ return va->flags & DRM_GPUVA_INVALIDATED;
+}
+
+/**
+ * struct drm_gpuva_manager - DRM GPU VA Manager
+ *
+ * The DRM GPU VA Manager keeps track of a GPU's virtual address space by using
+ * &maple_tree structures. Typically, this structure is embedded in bigger
+ * driver structures.
+ *
+ * Drivers can pass addresses and ranges in an arbitrary unit, e.g. bytes or
+ * pages.
+ *
+ * There should be one manager instance per GPU virtual address space.
+ */
+struct drm_gpuva_manager {
+ /**
+ * @name: the name of the DRM GPU VA space
+ */
+ const char *name;
+
+ /**
+ * @mm_start: start of the VA space
+ */
+ u64 mm_start;
+
+ /**
+ * @mm_range: length of the VA space
+ */
+ u64 mm_range;
+
+ /**
+ * @rb: structures to track &drm_gpuva entries
+ */
+ struct {
+ /**
+ * @tree: the rb-tree to track GPU VA mappings
+ */
+ struct rb_root_cached tree;
+
+ /**
+ * @list: the &list_head to track GPU VA mappings
+ */
+ struct list_head list;
+ } rb;
+
+ /**
+ * @kernel_alloc_node:
+ *
+ * &drm_gpuva representing the address space cutout reserved for
+ * the kernel
+ */
+ struct drm_gpuva kernel_alloc_node;
+
+ /**
+ * @ops: &drm_gpuva_fn_ops providing the split/merge steps to drivers
+ */
+ const struct drm_gpuva_fn_ops *ops;
+};
+
+void drm_gpuva_manager_init(struct drm_gpuva_manager *mgr,
+ const char *name,
+ u64 start_offset, u64 range,
+ u64 reserve_offset, u64 reserve_range,
+ const struct drm_gpuva_fn_ops *ops);
+void drm_gpuva_manager_destroy(struct drm_gpuva_manager *mgr);
+
+static inline struct drm_gpuva *
+__drm_gpuva_next(struct drm_gpuva *va)
+{
+ if (va && !list_is_last(&va->rb.entry, &va->mgr->rb.list))
+ return list_next_entry(va, rb.entry);
+
+ return NULL;
+}
+
+/**
+ * drm_gpuva_for_each_va_range() - iterate over a range of &drm_gpuvas
+ * @va__: &drm_gpuva structure to assign to in each iteration step
+ * @mgr__: &drm_gpuva_manager to walk over
+ * @start__: starting offset, the first gpuva will overlap this
+ * @end__: ending offset, the last gpuva will start before this (but may
+ * overlap)
+ *
+ * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie
+ * between @start__ and @end__. It is implemented similarly to list_for_each(),
+ * but is using the &drm_gpuva_manager's internal interval tree to accelerate
+ * the search for the starting &drm_gpuva, and hence isn't safe against removal
+ * of elements. It assumes that @end__ is within (or is the upper limit of) the
+ * &drm_gpuva_manager. This iterator does not skip over the &drm_gpuva_manager's
+ * @kernel_alloc_node.
+ */
+#define drm_gpuva_for_each_va_range(va__, mgr__, start__, end__) \
+ for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)); \
+ va__ && (va__->va.addr < (end__)); \
+ va__ = __drm_gpuva_next(va__))
+
+/**
+ * drm_gpuva_for_each_va_range_safe() - safely iterate over a range of
+ * &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @next__: another &drm_gpuva to use as temporary storage
+ * @mgr__: &drm_gpuva_manager to walk over
+ * @start__: starting offset, the first gpuva will overlap this
+ * @end__: ending offset, the last gpuva will start before this (but may
+ * overlap)
+ *
+ * This iterator walks over all &drm_gpuvas in the &drm_gpuva_manager that lie
+ * between @start__ and @end__. It is implemented similarly to
+ * list_for_each_safe(), but is using the &drm_gpuva_manager's internal interval
+ * tree to accelerate the search for the starting &drm_gpuva, and hence is safe
+ * against removal of elements. It assumes that @end__ is within (or is the
+ * upper limit of) the &drm_gpuva_manager. This iterator does not skip over the
+ * &drm_gpuva_manager's @kernel_alloc_node.
+ */
+#define drm_gpuva_for_each_va_range_safe(va__, next__, mgr__, start__, end__) \
+ for (va__ = drm_gpuva_find_first((mgr__), (start__), (end__) - (start__)), \
+ next__ = __drm_gpuva_next(va__); \
+ va__ && (va__->va.addr < (end__)); \
+ va__ = next__, next__ = __drm_gpuva_next(va__))
+
+/**
+ * drm_gpuva_for_each_va() - iterate over all &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @mgr__: &drm_gpuva_manager to walk over
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the given
+ * &drm_gpuva_manager.
+ */
+#define drm_gpuva_for_each_va(va__, mgr__) \
+ list_for_each_entry(va__, &(mgr__)->rb.list, rb.entry)
+
+/**
+ * drm_gpuva_for_each_va_safe() - safely iterate over all &drm_gpuvas
+ * @va__: &drm_gpuva to assign to in each iteration step
+ * @next__: another &drm_gpuva to use as temporary storage
+ * @mgr__: &drm_gpuva_manager to walk over
+ *
+ * This iterator walks over all &drm_gpuva structures associated with the given
+ * &drm_gpuva_manager. It is implemented with list_for_each_entry_safe(), and
+ * hence safe against the removal of elements.
+ */
+#define drm_gpuva_for_each_va_safe(va__, next__, mgr__) \
+ list_for_each_entry_safe(va__, next__, &(mgr__)->rb.list, rb.entry)
+
+/**
+ * enum drm_gpuva_op_type - GPU VA operation type
+ *
+ * Operations to alter the GPU VA mappings tracked by the &drm_gpuva_manager.
+ */
+enum drm_gpuva_op_type {
+ /**
+ * @DRM_GPUVA_OP_MAP: the map op type
+ */
+ DRM_GPUVA_OP_MAP,
+
+ /**
+ * @DRM_GPUVA_OP_REMAP: the remap op type
+ */
+ DRM_GPUVA_OP_REMAP,
+
+ /**
+ * @DRM_GPUVA_OP_UNMAP: the unmap op type
+ */
+ DRM_GPUVA_OP_UNMAP,
+
+ /**
+ * @DRM_GPUVA_OP_PREFETCH: the prefetch op type
+ */
+ DRM_GPUVA_OP_PREFETCH,
+};
+
+/**
+ * struct drm_gpuva_op_map - GPU VA map operation
+ *
+ * This structure represents a single map operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_map {
+ /**
+ * @va: structure containing address and range of a map
+ * operation
+ */
+ struct {
+ /**
+ * @addr: the base address of the new mapping
+ */
+ u64 addr;
+
+ /**
+ * @range: the range of the new mapping
+ */
+ u64 range;
+ } va;
+
+ /**
+ * @gem: structure containing the &drm_gem_object and it's offset
+ */
+ struct {
+ /**
+ * @offset: the offset within the &drm_gem_object
+ */
+ u64 offset;
+
+ /**
+ * @obj: the &drm_gem_object to map
+ */
+ struct drm_gem_object *obj;
+ } gem;
+};
+
+/**
+ * struct drm_gpuva_op_unmap - GPU VA unmap operation
+ *
+ * This structure represents a single unmap operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_unmap {
+ /**
+ * @va: the &drm_gpuva to unmap
+ */
+ struct drm_gpuva *va;
+
+ /**
+ * @keep:
+ *
+ * Indicates whether this &drm_gpuva is physically contiguous with the
+ * original mapping request.
+ *
+ * Optionally, if &keep is set, drivers may keep the actual page table
+ * mappings for this &drm_gpuva, adding the missing page table entries
+ * only and update the &drm_gpuva_manager accordingly.
+ */
+ bool keep;
+};
+
+/**
+ * struct drm_gpuva_op_remap - GPU VA remap operation
+ *
+ * This represents a single remap operation generated by the DRM GPU VA manager.
+ *
+ * A remap operation is generated when an existing GPU VA mmapping is split up
+ * by inserting a new GPU VA mapping or by partially unmapping existent
+ * mapping(s), hence it consists of a maximum of two map and one unmap
+ * operation.
+ *
+ * The @unmap operation takes care of removing the original existing mapping.
+ * @prev is used to remap the preceding part, @next the subsequent part.
+ *
+ * If either a new mapping's start address is aligned with the start address
+ * of the old mapping or the new mapping's end address is aligned with the
+ * end address of the old mapping, either @prev or @next is NULL.
+ *
+ * Note, the reason for a dedicated remap operation, rather than arbitrary
+ * unmap and map operations, is to give drivers the chance of extracting driver
+ * specific data for creating the new mappings from the unmap operations's
+ * &drm_gpuva structure which typically is embedded in larger driver specific
+ * structures.
+ */
+struct drm_gpuva_op_remap {
+ /**
+ * @prev: the preceding part of a split mapping
+ */
+ struct drm_gpuva_op_map *prev;
+
+ /**
+ * @next: the subsequent part of a split mapping
+ */
+ struct drm_gpuva_op_map *next;
+
+ /**
+ * @unmap: the unmap operation for the original existing mapping
+ */
+ struct drm_gpuva_op_unmap *unmap;
+};
+
+/**
+ * struct drm_gpuva_op_prefetch - GPU VA prefetch operation
+ *
+ * This structure represents a single prefetch operation generated by the
+ * DRM GPU VA manager.
+ */
+struct drm_gpuva_op_prefetch {
+ /**
+ * @va: the &drm_gpuva to prefetch
+ */
+ struct drm_gpuva *va;
+};
+
+/**
+ * struct drm_gpuva_op - GPU VA operation
+ *
+ * This structure represents a single generic operation.
+ *
+ * The particular type of the operation is defined by @op.
+ */
+struct drm_gpuva_op {
+ /**
+ * @entry:
+ *
+ * The &list_head used to distribute instances of this struct within
+ * &drm_gpuva_ops.
+ */
+ struct list_head entry;
+
+ /**
+ * @op: the type of the operation
+ */
+ enum drm_gpuva_op_type op;
+
+ union {
+ /**
+ * @map: the map operation
+ */
+ struct drm_gpuva_op_map map;
+
+ /**
+ * @remap: the remap operation
+ */
+ struct drm_gpuva_op_remap remap;
+
+ /**
+ * @unmap: the unmap operation
+ */
+ struct drm_gpuva_op_unmap unmap;
+
+ /**
+ * @prefetch: the prefetch operation
+ */
+ struct drm_gpuva_op_prefetch prefetch;
+ };
+};
+
+/**
+ * struct drm_gpuva_ops - wraps a list of &drm_gpuva_op
+ */
+struct drm_gpuva_ops {
+ /**
+ * @list: the &list_head
+ */
+ struct list_head list;
+};
+
+/**
+ * drm_gpuva_for_each_op() - iterator to walk over &drm_gpuva_ops
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations.
+ */
+#define drm_gpuva_for_each_op(op, ops) list_for_each_entry(op, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_for_each_op_safe() - iterator to safely walk over &drm_gpuva_ops
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @next: &next &drm_gpuva_op to store the next step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations. It is
+ * implemented with list_for_each_safe(), so save against removal of elements.
+ */
+#define drm_gpuva_for_each_op_safe(op, next, ops) \
+ list_for_each_entry_safe(op, next, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_for_each_op_from_reverse() - iterate backwards from the given point
+ * @op: &drm_gpuva_op to assign in each iteration step
+ * @ops: &drm_gpuva_ops to walk
+ *
+ * This iterator walks over all ops within a given list of operations beginning
+ * from the given operation in reverse order.
+ */
+#define drm_gpuva_for_each_op_from_reverse(op, ops) \
+ list_for_each_entry_from_reverse(op, &(ops)->list, entry)
+
+/**
+ * drm_gpuva_first_op() - returns the first &drm_gpuva_op from &drm_gpuva_ops
+ * @ops: the &drm_gpuva_ops to get the fist &drm_gpuva_op from
+ */
+#define drm_gpuva_first_op(ops) \
+ list_first_entry(&(ops)->list, struct drm_gpuva_op, entry)
+
+/**
+ * drm_gpuva_last_op() - returns the last &drm_gpuva_op from &drm_gpuva_ops
+ * @ops: the &drm_gpuva_ops to get the last &drm_gpuva_op from
+ */
+#define drm_gpuva_last_op(ops) \
+ list_last_entry(&(ops)->list, struct drm_gpuva_op, entry)
+
+/**
+ * drm_gpuva_prev_op() - previous &drm_gpuva_op in the list
+ * @op: the current &drm_gpuva_op
+ */
+#define drm_gpuva_prev_op(op) list_prev_entry(op, entry)
+
+/**
+ * drm_gpuva_next_op() - next &drm_gpuva_op in the list
+ * @op: the current &drm_gpuva_op
+ */
+#define drm_gpuva_next_op(op) list_next_entry(op, entry)
+
+struct drm_gpuva_ops *
+drm_gpuva_sm_map_ops_create(struct drm_gpuva_manager *mgr,
+ u64 addr, u64 range,
+ struct drm_gem_object *obj, u64 offset);
+struct drm_gpuva_ops *
+drm_gpuva_sm_unmap_ops_create(struct drm_gpuva_manager *mgr,
+ u64 addr, u64 range);
+
+struct drm_gpuva_ops *
+drm_gpuva_prefetch_ops_create(struct drm_gpuva_manager *mgr,
+ u64 addr, u64 range);
+
+struct drm_gpuva_ops *
+drm_gpuva_gem_unmap_ops_create(struct drm_gpuva_manager *mgr,
+ struct drm_gem_object *obj);
+
+void drm_gpuva_ops_free(struct drm_gpuva_manager *mgr,
+ struct drm_gpuva_ops *ops);
+
+static inline void drm_gpuva_init_from_op(struct drm_gpuva *va,
+ struct drm_gpuva_op_map *op)
+{
+ drm_gpuva_init(va, op->va.addr, op->va.range,
+ op->gem.obj, op->gem.offset);
+}
+
+/**
+ * struct drm_gpuva_fn_ops - callbacks for split/merge steps
+ *
+ * This structure defines the callbacks used by &drm_gpuva_sm_map and
+ * &drm_gpuva_sm_unmap to provide the split/merge steps for map and unmap
+ * operations to drivers.
+ */
+struct drm_gpuva_fn_ops {
+ /**
+ * @op_alloc: called when the &drm_gpuva_manager allocates
+ * a struct drm_gpuva_op
+ *
+ * Some drivers may want to embed struct drm_gpuva_op into driver
+ * specific structures. By implementing this callback drivers can
+ * allocate memory accordingly.
+ *
+ * This callback is optional.
+ */
+ struct drm_gpuva_op *(*op_alloc)(void);
+
+ /**
+ * @op_free: called when the &drm_gpuva_manager frees a
+ * struct drm_gpuva_op
+ *
+ * Some drivers may want to embed struct drm_gpuva_op into driver
+ * specific structures. By implementing this callback drivers can
+ * free the previously allocated memory accordingly.
+ *
+ * This callback is optional.
+ */
+ void (*op_free)(struct drm_gpuva_op *op);
+
+ /**
+ * @sm_step_map: called from &drm_gpuva_sm_map to finally insert the
+ * mapping once all previous steps were completed
+ *
+ * The &priv pointer matches the one the driver passed to
+ * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
+ *
+ * Can be NULL if &drm_gpuva_sm_map is used.
+ */
+ int (*sm_step_map)(struct drm_gpuva_op *op, void *priv);
+
+ /**
+ * @sm_step_remap: called from &drm_gpuva_sm_map and
+ * &drm_gpuva_sm_unmap to split up an existent mapping
+ *
+ * This callback is called when existent mapping needs to be split up.
+ * This is the case when either a newly requested mapping overlaps or
+ * is enclosed by an existent mapping or a partial unmap of an existent
+ * mapping is requested.
+ *
+ * The &priv pointer matches the one the driver passed to
+ * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
+ *
+ * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is
+ * used.
+ */
+ int (*sm_step_remap)(struct drm_gpuva_op *op, void *priv);
+
+ /**
+ * @sm_step_unmap: called from &drm_gpuva_sm_map and
+ * &drm_gpuva_sm_unmap to unmap an existent mapping
+ *
+ * This callback is called when existent mapping needs to be unmapped.
+ * This is the case when either a newly requested mapping encloses an
+ * existent mapping or an unmap of an existent mapping is requested.
+ *
+ * The &priv pointer matches the one the driver passed to
+ * &drm_gpuva_sm_map or &drm_gpuva_sm_unmap, respectively.
+ *
+ * Can be NULL if neither &drm_gpuva_sm_map nor &drm_gpuva_sm_unmap is
+ * used.
+ */
+ int (*sm_step_unmap)(struct drm_gpuva_op *op, void *priv);
+};
+
+int drm_gpuva_sm_map(struct drm_gpuva_manager *mgr, void *priv,
+ u64 addr, u64 range,
+ struct drm_gem_object *obj, u64 offset);
+
+int drm_gpuva_sm_unmap(struct drm_gpuva_manager *mgr, void *priv,
+ u64 addr, u64 range);
+
+void drm_gpuva_map(struct drm_gpuva_manager *mgr,
+ struct drm_gpuva *va,
+ struct drm_gpuva_op_map *op);
+
+void drm_gpuva_remap(struct drm_gpuva *prev,
+ struct drm_gpuva *next,
+ struct drm_gpuva_op_remap *op);
+
+void drm_gpuva_unmap(struct drm_gpuva_op_unmap *op);
+
+#endif /* __DRM_GPUVA_MGR_H__ */
diff --git a/include/drm/drm_kunit_helpers.h b/include/drm/drm_kunit_helpers.h
index ed013fdcc1ff..514c8a7a32f0 100644
--- a/include/drm/drm_kunit_helpers.h
+++ b/include/drm/drm_kunit_helpers.h
@@ -87,5 +87,12 @@ __drm_kunit_helper_alloc_drm_device(struct kunit *test,
sizeof(_type), \
offsetof(_type, _member), \
_feat))
+struct drm_modeset_acquire_ctx *
+drm_kunit_helper_acquire_ctx_alloc(struct kunit *test);
+
+struct drm_atomic_state *
+drm_kunit_helper_atomic_state_alloc(struct kunit *test,
+ struct drm_device *drm,
+ struct drm_modeset_acquire_ctx *ctx);
#endif // DRM_KUNIT_HELPERS_H_
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 965faf082a6d..e3c3ac615909 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -59,8 +59,8 @@ enum mode_set_atomic {
/**
* struct drm_crtc_helper_funcs - helper operations for CRTCs
*
- * These hooks are used by the legacy CRTC helpers, the transitional plane
- * helpers and the new atomic modesetting helpers.
+ * These hooks are used by the legacy CRTC helpers and the new atomic
+ * modesetting helpers.
*/
struct drm_crtc_helper_funcs {
/**
@@ -216,9 +216,7 @@ struct drm_crtc_helper_funcs {
*
* This callback is used to update the display mode of a CRTC without
* changing anything of the primary plane configuration. This fits the
- * requirement of atomic and hence is used by the atomic helpers. It is
- * also used by the transitional plane helpers to implement a
- * @mode_set hook in drm_helper_crtc_mode_set().
+ * requirement of atomic and hence is used by the atomic helpers.
*
* Note that the display pipe is completely off when this function is
* called. Atomic drivers which need hardware to be running before they
@@ -333,8 +331,8 @@ struct drm_crtc_helper_funcs {
* all updated. Again the recommendation is to just call check helpers
* until a maximal configuration is reached.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*
* NOTE:
*
@@ -373,8 +371,8 @@ struct drm_crtc_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*/
void (*atomic_begin)(struct drm_crtc *crtc,
struct drm_atomic_state *state);
@@ -397,8 +395,8 @@ struct drm_crtc_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*/
void (*atomic_flush)(struct drm_crtc *crtc,
struct drm_atomic_state *state);
@@ -507,8 +505,8 @@ static inline void drm_crtc_helper_add(struct drm_crtc *crtc,
/**
* struct drm_encoder_helper_funcs - helper operations for encoders
*
- * These hooks are used by the legacy CRTC helpers, the transitional plane
- * helpers and the new atomic modesetting helpers.
+ * These hooks are used by the legacy CRTC helpers and the new atomic
+ * modesetting helpers.
*/
struct drm_encoder_helper_funcs {
/**
@@ -1185,8 +1183,7 @@ static inline void drm_connector_helper_add(struct drm_connector *connector,
/**
* struct drm_plane_helper_funcs - helper operations for planes
*
- * These functions are used by the atomic helpers and by the transitional plane
- * helpers.
+ * These functions are used by the atomic helpers.
*/
struct drm_plane_helper_funcs {
/**
@@ -1221,9 +1218,8 @@ struct drm_plane_helper_funcs {
* The helpers will call @cleanup_fb with matching arguments for every
* successful call to this hook.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional. See @begin_fb_access
- * for preparing per-commit resources.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional. See @begin_fb_access for preparing per-commit resources.
*
* RETURNS:
*
@@ -1240,8 +1236,8 @@ struct drm_plane_helper_funcs {
* This hook is called to clean up any resources allocated for the given
* framebuffer and plane configuration in @prepare_fb.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*/
void (*cleanup_fb)(struct drm_plane *plane,
struct drm_plane_state *old_state);
@@ -1295,8 +1291,8 @@ struct drm_plane_helper_funcs {
* all updated. Again the recommendation is to just call check helpers
* until a maximal configuration is reached.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional.
*
* NOTE:
*
@@ -1326,8 +1322,7 @@ struct drm_plane_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional.
+ * This callback is used by the atomic modeset helpers, but it is optional.
*/
void (*atomic_update)(struct drm_plane *plane,
struct drm_atomic_state *state);
@@ -1376,9 +1371,8 @@ struct drm_plane_helper_funcs {
* has picked. See drm_atomic_helper_commit_planes() for a discussion of
* the tradeoffs and variants of plane commit helpers.
*
- * This callback is used by the atomic modeset helpers and by the
- * transitional plane helpers, but it is optional. It's intended to
- * reverse the effects of @atomic_enable.
+ * This callback is used by the atomic modeset helpers, but it is
+ * optional. It's intended to reverse the effects of @atomic_enable.
*/
void (*atomic_disable)(struct drm_plane *plane,
struct drm_atomic_state *state);
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 432fab2347eb..10015891b056 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -27,12 +27,14 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/list.h>
+#include <linux/mutex.h>
struct backlight_device;
struct dentry;
struct device_node;
struct drm_connector;
struct drm_device;
+struct drm_panel_follower;
struct drm_panel;
struct display_timing;
@@ -144,6 +146,45 @@ struct drm_panel_funcs {
void (*debugfs_init)(struct drm_panel *panel, struct dentry *root);
};
+struct drm_panel_follower_funcs {
+ /**
+ * @panel_prepared:
+ *
+ * Called after the panel has been powered on.
+ */
+ int (*panel_prepared)(struct drm_panel_follower *follower);
+
+ /**
+ * @panel_unpreparing:
+ *
+ * Called before the panel is powered off.
+ */
+ int (*panel_unpreparing)(struct drm_panel_follower *follower);
+};
+
+struct drm_panel_follower {
+ /**
+ * @funcs:
+ *
+ * Dependent device callbacks; should be initted by the caller.
+ */
+ const struct drm_panel_follower_funcs *funcs;
+
+ /**
+ * @list
+ *
+ * Used for linking into panel's list; set by drm_panel_add_follower().
+ */
+ struct list_head list;
+
+ /**
+ * @panel
+ *
+ * The panel we're dependent on; set by drm_panel_add_follower().
+ */
+ struct drm_panel *panel;
+};
+
/**
* struct drm_panel - DRM panel object
*/
@@ -190,6 +231,20 @@ struct drm_panel {
struct list_head list;
/**
+ * @followers:
+ *
+ * A list of struct drm_panel_follower dependent on this panel.
+ */
+ struct list_head followers;
+
+ /**
+ * @follower_lock:
+ *
+ * Lock for followers list.
+ */
+ struct mutex follower_lock;
+
+ /**
* @prepare_prev_first:
*
* The previous controller should be prepared first, before the prepare
@@ -198,6 +253,20 @@ struct drm_panel {
* the panel is powered up.
*/
bool prepare_prev_first;
+
+ /**
+ * @prepared:
+ *
+ * If true then the panel has been prepared.
+ */
+ bool prepared;
+
+ /**
+ * @enabled:
+ *
+ * If true then the panel has been enabled.
+ */
+ bool enabled;
};
void drm_panel_init(struct drm_panel *panel, struct device *dev,
@@ -232,6 +301,33 @@ static inline int of_drm_get_panel_orientation(const struct device_node *np,
}
#endif
+#if defined(CONFIG_DRM_PANEL)
+bool drm_is_panel_follower(struct device *dev);
+int drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower);
+void drm_panel_remove_follower(struct drm_panel_follower *follower);
+int devm_drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower);
+#else
+static inline bool drm_is_panel_follower(struct device *dev)
+{
+ return false;
+}
+
+static inline int drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower)
+{
+ return -ENODEV;
+}
+
+static inline void drm_panel_remove_follower(struct drm_panel_follower *follower) { }
+static inline int devm_drm_panel_add_follower(struct device *follower_dev,
+ struct drm_panel_follower *follower)
+{
+ return -ENODEV;
+}
+#endif
+
#if IS_ENABLED(CONFIG_DRM_PANEL) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
(IS_MODULE(CONFIG_DRM) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE)))
int drm_panel_of_backlight(struct drm_panel *panel);
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 51291983ea44..79d62856defb 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -56,7 +56,7 @@ struct drm_plane_state {
/**
* @crtc:
*
- * Currently bound CRTC, NULL if disabled. Do not this write directly,
+ * Currently bound CRTC, NULL if disabled. Do not write this directly,
* use drm_atomic_set_crtc_for_plane()
*/
struct drm_crtc *crtc;
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 2a1d01e5b56b..a7abf9f3e697 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -60,19 +60,12 @@ enum dma_data_direction;
struct drm_device;
struct drm_gem_object;
-struct drm_file;
/* core prime functions */
struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf_export_info *exp_info);
void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
-int drm_gem_prime_fd_to_handle(struct drm_device *dev,
- struct drm_file *file_priv, int prime_fd, uint32_t *handle);
-int drm_gem_prime_handle_to_fd(struct drm_device *dev,
- struct drm_file *file_priv, uint32_t handle, uint32_t flags,
- int *prime_fd);
-
/* helper functions for exporting */
int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach);
diff --git a/include/drm/drm_syncobj.h b/include/drm/drm_syncobj.h
index 6cf7243a1dc5..b40052132e52 100644
--- a/include/drm/drm_syncobj.h
+++ b/include/drm/drm_syncobj.h
@@ -54,7 +54,11 @@ struct drm_syncobj {
*/
struct list_head cb_list;
/**
- * @lock: Protects &cb_list and write-locks &fence.
+ * @ev_fd_list: List of registered eventfd.
+ */
+ struct list_head ev_fd_list;
+ /**
+ * @lock: Protects &cb_list and &ev_fd_list, and write-locks &fence.
*/
spinlock_t lock;
/**
diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h
index 6273cac44e47..96a5d858404b 100644
--- a/include/drm/drm_sysfs.h
+++ b/include/drm/drm_sysfs.h
@@ -12,6 +12,6 @@ void drm_class_device_unregister(struct device *dev);
void drm_sysfs_hotplug_event(struct drm_device *dev);
void drm_sysfs_connector_hotplug_event(struct drm_connector *connector);
-void drm_sysfs_connector_status_event(struct drm_connector *connector,
- struct drm_property *property);
+void drm_sysfs_connector_property_event(struct drm_connector *connector,
+ struct drm_property *property);
#endif
diff --git a/include/drm/task_barrier.h b/include/drm/task_barrier.h
index 087e3f649c52..f6e6ed529681 100644
--- a/include/drm/task_barrier.h
+++ b/include/drm/task_barrier.h
@@ -24,8 +24,8 @@
#include <linux/atomic.h>
/*
- * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks.
- * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/
+ * Reusable 2 PHASE task barrier (rendez-vous point) implementation for N tasks.
+ * Based on the Little book of semaphores - https://greenteapress.com/wp/semaphores/
*/
diff --git a/include/drm/ttm/ttm_bo.h b/include/drm/ttm/ttm_bo.h
index 8b113c384236..0223a41a64b2 100644
--- a/include/drm/ttm/ttm_bo.h
+++ b/include/drm/ttm/ttm_bo.h
@@ -355,8 +355,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
void ttm_bo_put(struct ttm_buffer_object *bo);
void ttm_bo_set_bulk_move(struct ttm_buffer_object *bo,
struct ttm_lru_bulk_move *bulk);
-int ttm_bo_lock_delayed_workqueue(struct ttm_device *bdev);
-void ttm_bo_unlock_delayed_workqueue(struct ttm_device *bdev, int resched);
bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place);
int ttm_bo_init_reserved(struct ttm_device *bdev, struct ttm_buffer_object *bo,
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 5a1e39df8b26..80b21d1c6eaf 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -24,10 +24,11 @@
#include <linux/range.h>
#include <linux/reboot.h>
#include <linux/uuid.h>
-#include <linux/screen_info.h>
#include <asm/page.h>
+struct screen_info;
+
#define EFI_SUCCESS 0
#define EFI_LOAD_ERROR ( 1 | (1UL << (BITS_PER_LONG-1)))
#define EFI_INVALID_PARAMETER ( 2 | (1UL << (BITS_PER_LONG-1)))
diff --git a/include/linux/fb.h b/include/linux/fb.h
index ce7d588edc3e..16c3e6d6c55d 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -383,7 +383,6 @@ struct fb_tile_ops {
#endif /* CONFIG_FB_TILEBLITTING */
/* FBINFO_* = fb_info.flags bit flags */
-#define FBINFO_DEFAULT 0
#define FBINFO_HWACCEL_DISABLED 0x0002
/* When FBINFO_HWACCEL_DISABLED is set:
* Hardware acceleration is turned off. Software implementations
@@ -481,7 +480,9 @@ struct fb_info {
const struct fb_ops *fbops;
struct device *device; /* This is the parent */
+#if defined(CONFIG_FB_DEVICE)
struct device *dev; /* This is this fb device */
+#endif
int class_flag; /* private sysfs flags */
#ifdef CONFIG_FB_TILEBLITTING
struct fb_tile_ops *tileops; /* Tile Blitting */
@@ -502,8 +503,6 @@ struct fb_info {
bool skip_vt_switch; /* no VT switch on suspend/resume required */
};
-#define FBINFO_FLAG_DEFAULT FBINFO_DEFAULT
-
/* This will go away
* fbset currently hacks in FB_ACCELF_TEXT into var.accel_flags
* when it wants to turn the acceleration engine on. This is
@@ -527,7 +526,7 @@ extern int fb_pan_display(struct fb_info *info, struct fb_var_screeninfo *var);
extern int fb_blank(struct fb_info *info, int blank);
/*
- * Drawing operations where framebuffer is in I/O memory
+ * Helpers for framebuffers in I/O memory
*/
extern void cfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
@@ -538,29 +537,25 @@ extern ssize_t fb_io_read(struct fb_info *info, char __user *buf,
extern ssize_t fb_io_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos);
-/*
- * Initializes struct fb_ops for framebuffers in I/O memory.
- */
-
-#define __FB_DEFAULT_IO_OPS_RDWR \
+#define __FB_DEFAULT_IOMEM_OPS_RDWR \
.fb_read = fb_io_read, \
.fb_write = fb_io_write
-#define __FB_DEFAULT_IO_OPS_DRAW \
+#define __FB_DEFAULT_IOMEM_OPS_DRAW \
.fb_fillrect = cfb_fillrect, \
.fb_copyarea = cfb_copyarea, \
.fb_imageblit = cfb_imageblit
-#define __FB_DEFAULT_IO_OPS_MMAP \
+#define __FB_DEFAULT_IOMEM_OPS_MMAP \
.fb_mmap = NULL /* default implementation */
-#define FB_DEFAULT_IO_OPS \
- __FB_DEFAULT_IO_OPS_RDWR, \
- __FB_DEFAULT_IO_OPS_DRAW, \
- __FB_DEFAULT_IO_OPS_MMAP
+#define FB_DEFAULT_IOMEM_OPS \
+ __FB_DEFAULT_IOMEM_OPS_RDWR, \
+ __FB_DEFAULT_IOMEM_OPS_DRAW, \
+ __FB_DEFAULT_IOMEM_OPS_MMAP
/*
- * Drawing operations where framebuffer is in system RAM
+ * Helpers for framebuffers in system memory
*/
extern void sys_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
@@ -571,27 +566,28 @@ extern ssize_t fb_sys_read(struct fb_info *info, char __user *buf,
extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
size_t count, loff_t *ppos);
+#define __FB_DEFAULT_SYSMEM_OPS_RDWR \
+ .fb_read = fb_sys_read, \
+ .fb_write = fb_sys_write
+
+#define __FB_DEFAULT_SYSMEM_OPS_DRAW \
+ .fb_fillrect = sys_fillrect, \
+ .fb_copyarea = sys_copyarea, \
+ .fb_imageblit = sys_imageblit
+
/*
- * Initializes struct fb_ops for framebuffers in system memory.
+ * Helpers for framebuffers in DMA-able memory
*/
-#define __FB_DEFAULT_SYS_OPS_RDWR \
+#define __FB_DEFAULT_DMAMEM_OPS_RDWR \
.fb_read = fb_sys_read, \
.fb_write = fb_sys_write
-#define __FB_DEFAULT_SYS_OPS_DRAW \
+#define __FB_DEFAULT_DMAMEM_OPS_DRAW \
.fb_fillrect = sys_fillrect, \
.fb_copyarea = sys_copyarea, \
.fb_imageblit = sys_imageblit
-#define __FB_DEFAULT_SYS_OPS_MMAP \
- .fb_mmap = NULL /* default implementation */
-
-#define FB_DEFAULT_SYS_OPS \
- __FB_DEFAULT_SYS_OPS_RDWR, \
- __FB_DEFAULT_SYS_OPS_DRAW, \
- __FB_DEFAULT_SYS_OPS_MMAP
-
/* drivers/video/fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern void unregister_framebuffer(struct fb_info *fb_info);
@@ -609,7 +605,6 @@ extern int fb_new_modelist(struct fb_info *info);
extern bool fb_center_logo;
extern int fb_logo_count;
-extern struct class *fb_class;
static inline void lock_fb_info(struct fb_info *info)
{
@@ -687,11 +682,11 @@ extern int fb_deferred_io_fsync(struct file *file, loff_t start,
__damage_area(info, image->dx, image->dy, image->width, image->height); \
}
-#define FB_GEN_DEFAULT_DEFERRED_IO_OPS(__prefix, __damage_range, __damage_area) \
+#define FB_GEN_DEFAULT_DEFERRED_IOMEM_OPS(__prefix, __damage_range, __damage_area) \
__FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, io) \
__FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, cfb)
-#define FB_GEN_DEFAULT_DEFERRED_SYS_OPS(__prefix, __damage_range, __damage_area) \
+#define FB_GEN_DEFAULT_DEFERRED_SYSMEM_OPS(__prefix, __damage_range, __damage_area) \
__FB_GEN_DEFAULT_DEFERRED_OPS_RDWR(__prefix, __damage_range, sys) \
__FB_GEN_DEFAULT_DEFERRED_OPS_DRAW(__prefix, __damage_area, sys)
@@ -735,11 +730,8 @@ static inline bool fb_be_math(struct fb_info *info)
#endif /* CONFIG_FB_FOREIGN_ENDIAN */
}
-/* drivers/video/fbsysfs.c */
extern struct fb_info *framebuffer_alloc(size_t size, struct device *dev);
extern void framebuffer_release(struct fb_info *info);
-extern int fb_init_device(struct fb_info *fb_info);
-extern void fb_cleanup_device(struct fb_info *head);
extern void fb_bl_default_curve(struct fb_info *fb_info, u8 off, u8 min, u8 max);
/* drivers/video/fbmon.c */
diff --git a/include/linux/platform_data/bd6107.h b/include/linux/platform_data/bd6107.h
index 54a06a4d2618..596ca4f95cfa 100644
--- a/include/linux/platform_data/bd6107.h
+++ b/include/linux/platform_data/bd6107.h
@@ -8,7 +8,7 @@
struct device;
struct bd6107_platform_data {
- struct device *fbdev;
+ struct device *dev;
unsigned int def_value;
};
diff --git a/include/linux/platform_data/gpio_backlight.h b/include/linux/platform_data/gpio_backlight.h
index 1a8b5b1946fe..323fbf5f7613 100644
--- a/include/linux/platform_data/gpio_backlight.h
+++ b/include/linux/platform_data/gpio_backlight.h
@@ -8,7 +8,7 @@
struct device;
struct gpio_backlight_platform_data {
- struct device *fbdev;
+ struct device *dev;
};
#endif
diff --git a/include/linux/platform_data/lv5207lp.h b/include/linux/platform_data/lv5207lp.h
index c9da8d402750..95d85c1394bc 100644
--- a/include/linux/platform_data/lv5207lp.h
+++ b/include/linux/platform_data/lv5207lp.h
@@ -8,7 +8,7 @@
struct device;
struct lv5207lp_platform_data {
- struct device *fbdev;
+ struct device *dev;
unsigned int max_value;
unsigned int def_value;
};
diff --git a/include/linux/sysfb.h b/include/linux/sysfb.h
index c1ef5fc60a3c..19cb803dd5ec 100644
--- a/include/linux/sysfb.h
+++ b/include/linux/sysfb.h
@@ -9,7 +9,8 @@
#include <linux/kernel.h>
#include <linux/platform_data/simplefb.h>
-#include <linux/screen_info.h>
+
+struct screen_info;
enum {
M_I17, /* 17-Inch iMac */
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 79b14828d542..f477eda6a2b8 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -94,6 +94,9 @@ extern "C" {
*
* %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
* for appending data.
+ *
+ * %AMDGPU_GEM_DOMAIN_DOORBELL Doorbell. It is an MMIO region for
+ * signalling user mode queues.
*/
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
@@ -101,12 +104,14 @@ extern "C" {
#define AMDGPU_GEM_DOMAIN_GDS 0x8
#define AMDGPU_GEM_DOMAIN_GWS 0x10
#define AMDGPU_GEM_DOMAIN_OA 0x20
+#define AMDGPU_GEM_DOMAIN_DOORBELL 0x40
#define AMDGPU_GEM_DOMAIN_MASK (AMDGPU_GEM_DOMAIN_CPU | \
AMDGPU_GEM_DOMAIN_GTT | \
AMDGPU_GEM_DOMAIN_VRAM | \
AMDGPU_GEM_DOMAIN_GDS | \
AMDGPU_GEM_DOMAIN_GWS | \
- AMDGPU_GEM_DOMAIN_OA)
+ AMDGPU_GEM_DOMAIN_OA | \
+ AMDGPU_GEM_DOMAIN_DOORBELL)
/* Flag that CPU access will be required for the case of VRAM domain */
#define AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED (1 << 0)
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index a87bbbbca2d4..794c1d857677 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -673,8 +673,11 @@ struct drm_gem_open {
* Bitfield of supported PRIME sharing capabilities. See &DRM_PRIME_CAP_IMPORT
* and &DRM_PRIME_CAP_EXPORT.
*
- * PRIME buffers are exposed as dma-buf file descriptors. See
- * Documentation/gpu/drm-mm.rst, section "PRIME Buffer Sharing".
+ * Starting from kernel version 6.6, both &DRM_PRIME_CAP_IMPORT and
+ * &DRM_PRIME_CAP_EXPORT are always advertised.
+ *
+ * PRIME buffers are exposed as dma-buf file descriptors.
+ * See :ref:`prime_buffer_sharing`.
*/
#define DRM_CAP_PRIME 0x5
/**
@@ -682,6 +685,8 @@ struct drm_gem_open {
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports importing PRIME
* buffers via the &DRM_IOCTL_PRIME_FD_TO_HANDLE ioctl.
+ *
+ * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_IMPORT 0x1
/**
@@ -689,6 +694,8 @@ struct drm_gem_open {
*
* If this bit is set in &DRM_CAP_PRIME, the driver supports exporting PRIME
* buffers via the &DRM_IOCTL_PRIME_HANDLE_TO_FD ioctl.
+ *
+ * Starting from kernel version 6.6, this bit is always set in &DRM_CAP_PRIME.
*/
#define DRM_PRIME_CAP_EXPORT 0x2
/**
@@ -756,15 +763,14 @@ struct drm_gem_open {
/**
* DRM_CAP_SYNCOBJ
*
- * If set to 1, the driver supports sync objects. See
- * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ * If set to 1, the driver supports sync objects. See :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ 0x13
/**
* DRM_CAP_SYNCOBJ_TIMELINE
*
* If set to 1, the driver supports timeline operations on sync objects. See
- * Documentation/gpu/drm-mm.rst, section "DRM Sync Objects".
+ * :ref:`drm_sync_objects`.
*/
#define DRM_CAP_SYNCOBJ_TIMELINE 0x14
@@ -909,6 +915,27 @@ struct drm_syncobj_timeline_wait {
__u32 pad;
};
+/**
+ * struct drm_syncobj_eventfd
+ * @handle: syncobj handle.
+ * @flags: Zero to wait for the point to be signalled, or
+ * &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE to wait for a fence to be
+ * available for the point.
+ * @point: syncobj timeline point (set to zero for binary syncobjs).
+ * @fd: Existing eventfd to sent events to.
+ * @pad: Must be zero.
+ *
+ * Register an eventfd to be signalled by a syncobj. The eventfd counter will
+ * be incremented by one.
+ */
+struct drm_syncobj_eventfd {
+ __u32 handle;
+ __u32 flags;
+ __u64 point;
+ __s32 fd;
+ __u32 pad;
+};
+
struct drm_syncobj_array {
__u64 handles;
@@ -1169,6 +1196,8 @@ extern "C" {
*/
#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
+#define DRM_IOCTL_SYNCOBJ_EVENTFD DRM_IOWR(0xCF, struct drm_syncobj_eventfd)
+
/*
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
@@ -1180,25 +1209,50 @@ extern "C" {
#define DRM_COMMAND_BASE 0x40
#define DRM_COMMAND_END 0xA0
-/*
- * Header for events written back to userspace on the drm fd. The
- * type defines the type of event, the length specifies the total
- * length of the event (including the header), and user_data is
- * typically a 64 bit value passed with the ioctl that triggered the
- * event. A read on the drm fd will always only return complete
- * events, that is, if for example the read buffer is 100 bytes, and
- * there are two 64 byte events pending, only one will be returned.
+/**
+ * struct drm_event - Header for DRM events
+ * @type: event type.
+ * @length: total number of payload bytes (including header).
*
- * Event types 0 - 0x7fffffff are generic drm events, 0x80000000 and
- * up are chipset specific.
+ * This struct is a header for events written back to user-space on the DRM FD.
+ * A read on the DRM FD will always only return complete events: e.g. if the
+ * read buffer is 100 bytes large and there are two 64 byte events pending,
+ * only one will be returned.
+ *
+ * Event types 0 - 0x7fffffff are generic DRM events, 0x80000000 and
+ * up are chipset specific. Generic DRM events include &DRM_EVENT_VBLANK,
+ * &DRM_EVENT_FLIP_COMPLETE and &DRM_EVENT_CRTC_SEQUENCE.
*/
struct drm_event {
__u32 type;
__u32 length;
};
+/**
+ * DRM_EVENT_VBLANK - vertical blanking event
+ *
+ * This event is sent in response to &DRM_IOCTL_WAIT_VBLANK with the
+ * &_DRM_VBLANK_EVENT flag set.
+ *
+ * The event payload is a struct drm_event_vblank.
+ */
#define DRM_EVENT_VBLANK 0x01
+/**
+ * DRM_EVENT_FLIP_COMPLETE - page-flip completion event
+ *
+ * This event is sent in response to an atomic commit or legacy page-flip with
+ * the &DRM_MODE_PAGE_FLIP_EVENT flag set.
+ *
+ * The event payload is a struct drm_event_vblank.
+ */
#define DRM_EVENT_FLIP_COMPLETE 0x02
+/**
+ * DRM_EVENT_CRTC_SEQUENCE - CRTC sequence event
+ *
+ * This event is sent in response to &DRM_IOCTL_CRTC_QUEUE_SEQUENCE.
+ *
+ * The event payload is a struct drm_event_crtc_sequence.
+ */
#define DRM_EVENT_CRTC_SEQUENCE 0x03
struct drm_event_vblank {
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 43691058d28f..ea1b639bcb28 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -488,6 +488,9 @@ struct drm_mode_get_connector {
* This is not an object ID. This is a per-type connector number. Each
* (type, type_id) combination is unique across all connectors of a DRM
* device.
+ *
+ * The (type, type_id) combination is not a stable identifier: the
+ * type_id can change depending on the driver probe order.
*/
__u32 connector_type_id;
@@ -883,7 +886,7 @@ struct hdr_metadata_infoframe {
*/
struct {
__u16 x, y;
- } display_primaries[3];
+ } display_primaries[3];
/**
* @white_point: White Point of Colorspace Data.
* These are coded as unsigned 16-bit values in units of
@@ -894,7 +897,7 @@ struct hdr_metadata_infoframe {
*/
struct {
__u16 x, y;
- } white_point;
+ } white_point;
/**
* @max_display_mastering_luminance: Max Mastering Display Luminance.
* This value is coded as an unsigned 16-bit value in units of 1 cd/m2,
diff --git a/include/uapi/drm/ivpu_accel.h b/include/uapi/drm/ivpu_accel.h
index 839820aed87e..a58a14c9f222 100644
--- a/include/uapi/drm/ivpu_accel.h
+++ b/include/uapi/drm/ivpu_accel.h
@@ -60,6 +60,7 @@ extern "C" {
#define DRM_IVPU_PARAM_UNIQUE_INFERENCE_ID 10
#define DRM_IVPU_PARAM_TILE_CONFIG 11
#define DRM_IVPU_PARAM_SKU 12
+#define DRM_IVPU_PARAM_CAPABILITIES 13
#define DRM_IVPU_PLATFORM_TYPE_SILICON 0
@@ -68,6 +69,9 @@ extern "C" {
#define DRM_IVPU_CONTEXT_PRIORITY_FOCUS 2
#define DRM_IVPU_CONTEXT_PRIORITY_REALTIME 3
+#define DRM_IVPU_CAP_METRIC_STREAMER 1
+#define DRM_IVPU_CAP_DMA_MEMORY_RANGE 2
+
/**
* struct drm_ivpu_param - Get/Set VPU parameters
*/
@@ -129,8 +133,10 @@ struct drm_ivpu_param {
__u64 value;
};
-#define DRM_IVPU_BO_HIGH_MEM 0x00000001
+#define DRM_IVPU_BO_SHAVE_MEM 0x00000001
+#define DRM_IVPU_BO_HIGH_MEM DRM_IVPU_BO_SHAVE_MEM
#define DRM_IVPU_BO_MAPPABLE 0x00000002
+#define DRM_IVPU_BO_DMA_MEM 0x00000004
#define DRM_IVPU_BO_CACHED 0x00000000
#define DRM_IVPU_BO_UNCACHED 0x00010000
@@ -140,6 +146,7 @@ struct drm_ivpu_param {
#define DRM_IVPU_BO_FLAGS \
(DRM_IVPU_BO_HIGH_MEM | \
DRM_IVPU_BO_MAPPABLE | \
+ DRM_IVPU_BO_DMA_MEM | \
DRM_IVPU_BO_CACHE_MASK)
/**
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 853a327433d3..8d7402c13e56 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -33,11 +33,51 @@
extern "C" {
#endif
+#define NOUVEAU_GETPARAM_PCI_VENDOR 3
+#define NOUVEAU_GETPARAM_PCI_DEVICE 4
+#define NOUVEAU_GETPARAM_BUS_TYPE 5
+#define NOUVEAU_GETPARAM_FB_SIZE 8
+#define NOUVEAU_GETPARAM_AGP_SIZE 9
+#define NOUVEAU_GETPARAM_CHIPSET_ID 11
+#define NOUVEAU_GETPARAM_VM_VRAM_BASE 12
+#define NOUVEAU_GETPARAM_GRAPH_UNITS 13
+#define NOUVEAU_GETPARAM_PTIMER_TIME 14
+#define NOUVEAU_GETPARAM_HAS_BO_USAGE 15
+#define NOUVEAU_GETPARAM_HAS_PAGEFLIP 16
+struct drm_nouveau_getparam {
+ __u64 param;
+ __u64 value;
+};
+
+struct drm_nouveau_channel_alloc {
+ __u32 fb_ctxdma_handle;
+ __u32 tt_ctxdma_handle;
+
+ __s32 channel;
+ __u32 pushbuf_domains;
+
+ /* Notifier memory */
+ __u32 notifier_handle;
+
+ /* DRM-enforced subchannel assignments */
+ struct {
+ __u32 handle;
+ __u32 grclass;
+ } subchan[8];
+ __u32 nr_subchan;
+};
+
+struct drm_nouveau_channel_free {
+ __s32 channel;
+};
+
#define NOUVEAU_GEM_DOMAIN_CPU (1 << 0)
#define NOUVEAU_GEM_DOMAIN_VRAM (1 << 1)
#define NOUVEAU_GEM_DOMAIN_GART (1 << 2)
#define NOUVEAU_GEM_DOMAIN_MAPPABLE (1 << 3)
#define NOUVEAU_GEM_DOMAIN_COHERENT (1 << 4)
+/* The BO will never be shared via import or export. */
+#define NOUVEAU_GEM_DOMAIN_NO_SHARE (1 << 5)
#define NOUVEAU_GEM_TILE_COMP 0x00030000 /* nv50-only */
#define NOUVEAU_GEM_TILE_LAYOUT_MASK 0x0000ff00
@@ -98,6 +138,7 @@ struct drm_nouveau_gem_pushbuf_push {
__u32 pad;
__u64 offset;
__u64 length;
+#define NOUVEAU_GEM_PUSHBUF_NO_PREFETCH (1 << 23)
};
struct drm_nouveau_gem_pushbuf {
@@ -126,16 +167,233 @@ struct drm_nouveau_gem_cpu_fini {
__u32 handle;
};
-#define DRM_NOUVEAU_GETPARAM 0x00 /* deprecated */
+/**
+ * struct drm_nouveau_sync - sync object
+ *
+ * This structure serves as synchronization mechanism for (potentially)
+ * asynchronous operations such as EXEC or VM_BIND.
+ */
+struct drm_nouveau_sync {
+ /**
+ * @flags: the flags for a sync object
+ *
+ * The first 8 bits are used to determine the type of the sync object.
+ */
+ __u32 flags;
+#define DRM_NOUVEAU_SYNC_SYNCOBJ 0x0
+#define DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ 0x1
+#define DRM_NOUVEAU_SYNC_TYPE_MASK 0xf
+ /**
+ * @handle: the handle of the sync object
+ */
+ __u32 handle;
+ /**
+ * @timeline_value:
+ *
+ * The timeline point of the sync object in case the syncobj is of
+ * type DRM_NOUVEAU_SYNC_TIMELINE_SYNCOBJ.
+ */
+ __u64 timeline_value;
+};
+
+/**
+ * struct drm_nouveau_vm_init - GPU VA space init structure
+ *
+ * Used to initialize the GPU's VA space for a user client, telling the kernel
+ * which portion of the VA space is managed by the UMD and kernel respectively.
+ *
+ * For the UMD to use the VM_BIND uAPI, this must be called before any BOs or
+ * channels are created; if called afterwards DRM_IOCTL_NOUVEAU_VM_INIT fails
+ * with -ENOSYS.
+ */
+struct drm_nouveau_vm_init {
+ /**
+ * @kernel_managed_addr: start address of the kernel managed VA space
+ * region
+ */
+ __u64 kernel_managed_addr;
+ /**
+ * @kernel_managed_size: size of the kernel managed VA space region in
+ * bytes
+ */
+ __u64 kernel_managed_size;
+};
+
+/**
+ * struct drm_nouveau_vm_bind_op - VM_BIND operation
+ *
+ * This structure represents a single VM_BIND operation. UMDs should pass
+ * an array of this structure via struct drm_nouveau_vm_bind's &op_ptr field.
+ */
+struct drm_nouveau_vm_bind_op {
+ /**
+ * @op: the operation type
+ */
+ __u32 op;
+/**
+ * @DRM_NOUVEAU_VM_BIND_OP_MAP:
+ *
+ * Map a GEM object to the GPU's VA space. Optionally, the
+ * &DRM_NOUVEAU_VM_BIND_SPARSE flag can be passed to instruct the kernel to
+ * create sparse mappings for the given range.
+ */
+#define DRM_NOUVEAU_VM_BIND_OP_MAP 0x0
+/**
+ * @DRM_NOUVEAU_VM_BIND_OP_UNMAP:
+ *
+ * Unmap an existing mapping in the GPU's VA space. If the region the mapping
+ * is located in is a sparse region, new sparse mappings are created where the
+ * unmapped (memory backed) mapping was mapped previously. To remove a sparse
+ * region the &DRM_NOUVEAU_VM_BIND_SPARSE must be set.
+ */
+#define DRM_NOUVEAU_VM_BIND_OP_UNMAP 0x1
+ /**
+ * @flags: the flags for a &drm_nouveau_vm_bind_op
+ */
+ __u32 flags;
+/**
+ * @DRM_NOUVEAU_VM_BIND_SPARSE:
+ *
+ * Indicates that an allocated VA space region should be sparse.
+ */
+#define DRM_NOUVEAU_VM_BIND_SPARSE (1 << 8)
+ /**
+ * @handle: the handle of the DRM GEM object to map
+ */
+ __u32 handle;
+ /**
+ * @pad: 32 bit padding, should be 0
+ */
+ __u32 pad;
+ /**
+ * @addr:
+ *
+ * the address the VA space region or (memory backed) mapping should be mapped to
+ */
+ __u64 addr;
+ /**
+ * @bo_offset: the offset within the BO backing the mapping
+ */
+ __u64 bo_offset;
+ /**
+ * @range: the size of the requested mapping in bytes
+ */
+ __u64 range;
+};
+
+/**
+ * struct drm_nouveau_vm_bind - structure for DRM_IOCTL_NOUVEAU_VM_BIND
+ */
+struct drm_nouveau_vm_bind {
+ /**
+ * @op_count: the number of &drm_nouveau_vm_bind_op
+ */
+ __u32 op_count;
+ /**
+ * @flags: the flags for a &drm_nouveau_vm_bind ioctl
+ */
+ __u32 flags;
+/**
+ * @DRM_NOUVEAU_VM_BIND_RUN_ASYNC:
+ *
+ * Indicates that the given VM_BIND operation should be executed asynchronously
+ * by the kernel.
+ *
+ * If this flag is not supplied the kernel executes the associated operations
+ * synchronously and doesn't accept any &drm_nouveau_sync objects.
+ */
+#define DRM_NOUVEAU_VM_BIND_RUN_ASYNC 0x1
+ /**
+ * @wait_count: the number of wait &drm_nouveau_syncs
+ */
+ __u32 wait_count;
+ /**
+ * @sig_count: the number of &drm_nouveau_syncs to signal when finished
+ */
+ __u32 sig_count;
+ /**
+ * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
+ */
+ __u64 wait_ptr;
+ /**
+ * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
+ */
+ __u64 sig_ptr;
+ /**
+ * @op_ptr: pointer to the &drm_nouveau_vm_bind_ops to execute
+ */
+ __u64 op_ptr;
+};
+
+/**
+ * struct drm_nouveau_exec_push - EXEC push operation
+ *
+ * This structure represents a single EXEC push operation. UMDs should pass an
+ * array of this structure via struct drm_nouveau_exec's &push_ptr field.
+ */
+struct drm_nouveau_exec_push {
+ /**
+ * @va: the virtual address of the push buffer mapping
+ */
+ __u64 va;
+ /**
+ * @va_len: the length of the push buffer mapping
+ */
+ __u32 va_len;
+ /**
+ * @flags: the flags for this push buffer mapping
+ */
+ __u32 flags;
+#define DRM_NOUVEAU_EXEC_PUSH_NO_PREFETCH 0x1
+};
+
+/**
+ * struct drm_nouveau_exec - structure for DRM_IOCTL_NOUVEAU_EXEC
+ */
+struct drm_nouveau_exec {
+ /**
+ * @channel: the channel to execute the push buffer in
+ */
+ __u32 channel;
+ /**
+ * @push_count: the number of &drm_nouveau_exec_push ops
+ */
+ __u32 push_count;
+ /**
+ * @wait_count: the number of wait &drm_nouveau_syncs
+ */
+ __u32 wait_count;
+ /**
+ * @sig_count: the number of &drm_nouveau_syncs to signal when finished
+ */
+ __u32 sig_count;
+ /**
+ * @wait_ptr: pointer to &drm_nouveau_syncs to wait for
+ */
+ __u64 wait_ptr;
+ /**
+ * @sig_ptr: pointer to &drm_nouveau_syncs to signal when finished
+ */
+ __u64 sig_ptr;
+ /**
+ * @push_ptr: pointer to &drm_nouveau_exec_push ops
+ */
+ __u64 push_ptr;
+};
+
+#define DRM_NOUVEAU_GETPARAM 0x00
#define DRM_NOUVEAU_SETPARAM 0x01 /* deprecated */
-#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02 /* deprecated */
-#define DRM_NOUVEAU_CHANNEL_FREE 0x03 /* deprecated */
+#define DRM_NOUVEAU_CHANNEL_ALLOC 0x02
+#define DRM_NOUVEAU_CHANNEL_FREE 0x03
#define DRM_NOUVEAU_GROBJ_ALLOC 0x04 /* deprecated */
#define DRM_NOUVEAU_NOTIFIEROBJ_ALLOC 0x05 /* deprecated */
#define DRM_NOUVEAU_GPUOBJ_FREE 0x06 /* deprecated */
#define DRM_NOUVEAU_NVIF 0x07
#define DRM_NOUVEAU_SVM_INIT 0x08
#define DRM_NOUVEAU_SVM_BIND 0x09
+#define DRM_NOUVEAU_VM_INIT 0x10
+#define DRM_NOUVEAU_VM_BIND 0x11
+#define DRM_NOUVEAU_EXEC 0x12
#define DRM_NOUVEAU_GEM_NEW 0x40
#define DRM_NOUVEAU_GEM_PUSHBUF 0x41
#define DRM_NOUVEAU_GEM_CPU_PREP 0x42
@@ -188,6 +446,10 @@ struct drm_nouveau_svm_bind {
#define NOUVEAU_SVM_BIND_TARGET__GPU_VRAM (1UL << 31)
+#define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc)
+#define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free)
+
#define DRM_IOCTL_NOUVEAU_SVM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_INIT, struct drm_nouveau_svm_init)
#define DRM_IOCTL_NOUVEAU_SVM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SVM_BIND, struct drm_nouveau_svm_bind)
@@ -197,6 +459,9 @@ struct drm_nouveau_svm_bind {
#define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini)
#define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info)
+#define DRM_IOCTL_NOUVEAU_VM_INIT DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_INIT, struct drm_nouveau_vm_init)
+#define DRM_IOCTL_NOUVEAU_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_VM_BIND, struct drm_nouveau_vm_bind)
+#define DRM_IOCTL_NOUVEAU_EXEC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_EXEC, struct drm_nouveau_exec)
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/virtgpu_drm.h b/include/uapi/drm/virtgpu_drm.h
index 7b158fcb02b4..b1d0e56565bc 100644
--- a/include/uapi/drm/virtgpu_drm.h
+++ b/include/uapi/drm/virtgpu_drm.h
@@ -64,6 +64,16 @@ struct drm_virtgpu_map {
__u32 pad;
};
+#define VIRTGPU_EXECBUF_SYNCOBJ_RESET 0x01
+#define VIRTGPU_EXECBUF_SYNCOBJ_FLAGS ( \
+ VIRTGPU_EXECBUF_SYNCOBJ_RESET | \
+ 0)
+struct drm_virtgpu_execbuffer_syncobj {
+ __u32 handle;
+ __u32 flags;
+ __u64 point;
+};
+
/* fence_fd is modified on success if VIRTGPU_EXECBUF_FENCE_FD_OUT flag is set. */
struct drm_virtgpu_execbuffer {
__u32 flags;
@@ -73,7 +83,11 @@ struct drm_virtgpu_execbuffer {
__u32 num_bo_handles;
__s32 fence_fd; /* in/out fence fd (see VIRTGPU_EXECBUF_FENCE_FD_IN/OUT) */
__u32 ring_idx; /* command ring index (see VIRTGPU_EXECBUF_RING_IDX) */
- __u32 pad;
+ __u32 syncobj_stride; /* size of @drm_virtgpu_execbuffer_syncobj */
+ __u32 num_in_syncobjs;
+ __u32 num_out_syncobjs;
+ __u64 in_syncobjs;
+ __u64 out_syncobjs;
};
#define VIRTGPU_PARAM_3D_FEATURES 1 /* do we have 3D features in the hw */