summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml15
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml124
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml74
-rw-r--r--Documentation/gpu/drm-internals.rst12
-rw-r--r--Documentation/gpu/todo.rst2
-rw-r--r--MAINTAINERS18
-rw-r--r--drivers/gpu/drm/Kconfig28
-rw-r--r--drivers/gpu/drm/Makefile8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c61
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c104
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c63
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c27
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h2
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c1
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c9
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c5
-rw-r--r--drivers/gpu/drm/ast/ast_dp501.c139
-rw-r--r--drivers/gpu/drm/ast/ast_drv.c23
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h12
-rw-r--r--drivers/gpu/drm/ast/ast_main.c14
-rw-r--r--drivers/gpu/drm/bochs/bochs.h1
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c3
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c25
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c8
-rw-r--r--drivers/gpu/drm/bridge/Kconfig12
-rw-r--r--drivers/gpu/drm/bridge/Makefile1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h1
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_audio.c6
-rw-r--r--drivers/gpu/drm/bridge/analogix/Kconfig2
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c1
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c1
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c1
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c2
-rw-r--r--drivers/gpu/drm/bridge/cadence/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c140
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h22
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c570
-rw-r--r--drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h92
-rw-r--r--drivers/gpu/drm/bridge/ite-it66121.c1021
-rw-r--r--drivers/gpu/drm/bridge/nwl-dsi.c86
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c22
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c1
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c719
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c101
-rw-r--r--drivers/gpu/drm/drm_aperture.c344
-rw-r--r--drivers/gpu/drm/drm_atomic.c7
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c40
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c6
-rw-r--r--drivers/gpu/drm/drm_auth.c3
-rw-r--r--drivers/gpu/drm/drm_blend.c4
-rw-r--r--drivers/gpu/drm/drm_bridge.c3
-rw-r--r--drivers/gpu/drm/drm_bufs.c25
-rw-r--r--drivers/gpu/drm/drm_connector.c127
-rw-r--r--drivers/gpu/drm/drm_context.c3
-rw-r--r--drivers/gpu/drm/drm_dma.c8
-rw-r--r--drivers/gpu/drm/drm_dp_aux_dev.c6
-rw-r--r--drivers/gpu/drm/drm_dp_dual_mode_helper.c68
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c195
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c405
-rw-r--r--drivers/gpu/drm/drm_drv.c4
-rw-r--r--drivers/gpu/drm/drm_file.c14
-rw-r--r--drivers/gpu/drm/drm_format_helper.c96
-rw-r--r--drivers/gpu/drm/drm_gem_ttm_helper.c33
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c48
-rw-r--r--drivers/gpu/drm/drm_internal.h5
-rw-r--r--drivers/gpu/drm/drm_ioc32.c19
-rw-r--r--drivers/gpu/drm/drm_ioctl.c24
-rw-r--r--drivers/gpu/drm/drm_legacy.h30
-rw-r--r--drivers/gpu/drm/drm_legacy_misc.c1
-rw-r--r--drivers/gpu/drm/drm_memory.c1
-rw-r--r--drivers/gpu/drm/drm_modes.c13
-rw-r--r--drivers/gpu/drm/drm_pci.c82
-rw-r--r--drivers/gpu/drm/drm_plane.c18
-rw-r--r--drivers/gpu/drm/drm_vm.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/gma500/backlight.c4
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_dp.c50
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_lvds.c2
-rw-r--r--drivers/gpu/drm/gma500/intel_gmbus.c2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h2
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h4
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c6
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c11
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_lspcon.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_region_lmem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c4
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.h2
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c1
-rw-r--r--drivers/gpu/drm/imx/dcss/dcss-kms.c1
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c1
-rw-r--r--drivers/gpu/drm/kmb/kmb_dsi.c10
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c27
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c1
-rw-r--r--drivers/gpu/drm/mga/mga_dma.c29
-rw-r--r--drivers/gpu/drm/mga/mga_drv.h1
-rw-r--r--drivers/gpu/drm/mga/mga_state.c3
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c5
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c5
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c8
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c6
-rw-r--r--drivers/gpu/drm/msm/edp/edp.h3
-rw-r--r--drivers/gpu/drm/msm/edp/edp_aux.c5
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c8
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c5
-rw-r--r--drivers/gpu/drm/mxsfb/Kconfig1
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c19
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/panel/Kconfig1
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c140
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c5
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c8
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.h3
-rw-r--r--drivers/gpu/drm/qxl/qxl_dumb.c17
-rw-r--r--drivers/gpu/drm/qxl/qxl_ioctl.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_object.h5
-rw-r--r--drivers/gpu/drm/r128/ati_pcigart.c33
-rw-r--r--drivers/gpu/drm/r128/r128_cce.c2
-rw-r--r--drivers/gpu/drm/r128/r128_drv.c4
-rw-r--r--drivers/gpu/drm/r128/r128_state.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon.h42
-rw-r--r--drivers/gpu/drm/radeon/radeon_agp.c118
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c6
-rw-r--r--drivers/gpu/drm/savage/savage_bci.c26
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c6
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c9
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c3
-rw-r--r--drivers/gpu/drm/stm/ltdc.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c5
-rw-r--r--drivers/gpu/drm/tegra/dc.c10
-rw-r--r--drivers/gpu/drm/tegra/dpaux.c1
-rw-r--r--drivers/gpu/drm/tegra/drm.c6
-rw-r--r--drivers/gpu/drm/tiny/Kconfig16
-rw-r--r--drivers/gpu/drm/tiny/Makefile1
-rw-r--r--drivers/gpu/drm/tiny/cirrus.c5
-rw-r--r--drivers/gpu/drm/tiny/hx8357d.c6
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c12
-rw-r--r--drivers/gpu/drm/tiny/ili9341.c12
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c12
-rw-r--r--drivers/gpu/drm/tiny/mi0283qt.c12
-rw-r--r--drivers/gpu/drm/tiny/simpledrm.c896
-rw-r--r--drivers/gpu/drm/tiny/st7586.c12
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c12
-rw-r--r--drivers/gpu/drm/ttm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c58
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_device.c25
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.h3
-rw-r--r--drivers/gpu/drm/ttm/ttm_range_manager.c40
-rw-r--r--drivers/gpu/drm/ttm/ttm_resource.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_sys_manager.c41
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c20
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_drv.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c9
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.c61
-rw-r--r--drivers/gpu/drm/vc4/vc4_hdmi.h3
-rw-r--r--drivers/gpu/drm/vc4/vc4_kms.c1
-rw-r--r--drivers/gpu/drm/via/via_dma.c1
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c15
-rw-r--r--drivers/gpu/drm/via/via_map.c3
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c10
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c8
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c3
-rw-r--r--drivers/gpu/drm/vkms/vkms_composer.c104
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c5
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h9
-rw-r--r--drivers/gpu/drm/vkms/vkms_output.c28
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c51
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig10
-rw-r--r--drivers/gpu/drm/vmwgfx/Makefile6
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h8
-rw-r--r--drivers/gpu/drm/vmwgfx/device_include/svga_reg.h55
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_lock.c194
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_lock.h218
-rw-r--r--drivers/gpu/drm/vmwgfx/ttm_object.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_binding.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c43
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c118
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c169
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h147
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c41
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c60
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c77
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg.h214
-rwxr-xr-xdrivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h130
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h219
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_reg.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_so.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c40
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_thp.c15
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_validation.c4
-rw-r--r--drivers/gpu/drm/xlnx/zynqmp_dp.c5
-rw-r--r--drivers/gpu/drm/zte/Kconfig1
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/imxfb.c2
-rw-r--r--include/drm/bridge/dw_hdmi.h2
-rw-r--r--include/drm/drm_agpsupport.h117
-rw-r--r--include/drm/drm_aperture.h35
-rw-r--r--include/drm/drm_connector.h4
-rw-r--r--include/drm/drm_device.h9
-rw-r--r--include/drm/drm_dp_dual_mode_helper.h14
-rw-r--r--include/drm/drm_dp_helper.h19
-rw-r--r--include/drm/drm_dp_mst_helper.h15
-rw-r--r--include/drm/drm_fb_helper.h51
-rw-r--r--include/drm/drm_format_helper.h10
-rw-r--r--include/drm/drm_gem_ttm_helper.h5
-rw-r--r--include/drm/drm_gem_vram_helper.h7
-rw-r--r--include/drm/drm_legacy.h86
-rw-r--r--include/drm/drm_mode_config.h2
-rw-r--r--include/drm/drm_print.h20
-rw-r--r--include/drm/ttm/ttm_bo_api.h21
-rw-r--r--include/drm/ttm/ttm_resource.h2
-rw-r--r--include/uapi/drm/drm_mode.h7
269 files changed, 7169 insertions, 3006 deletions
diff --git a/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml b/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml
index 63427878715e..2333fdbe9296 100644
--- a/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/cdns,mhdp8546.yaml
@@ -18,7 +18,7 @@ properties:
reg:
minItems: 1
- maxItems: 2
+ maxItems: 3
items:
- description:
Register block of mhdptx apb registers up to PHY mapped area (AUX_CONFIG_P).
@@ -26,13 +26,16 @@ properties:
included in the associated PHY.
- description:
Register block for DSS_EDP0_INTG_CFG_VP registers in case of TI J7 SoCs.
+ - description:
+ Register block of mhdptx sapb registers.
reg-names:
minItems: 1
- maxItems: 2
+ maxItems: 3
items:
- const: mhdptx
- const: j721e-intg
+ - const: mhdptx-sapb
clocks:
maxItems: 1
@@ -99,14 +102,18 @@ allOf:
properties:
reg:
minItems: 2
+ maxItems: 3
reg-names:
minItems: 2
+ maxItems: 3
else:
properties:
reg:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
reg-names:
- maxItems: 1
+ minItems: 1
+ maxItems: 2
required:
- compatible
diff --git a/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml b/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
new file mode 100644
index 000000000000..6ec1d5fbb8bc
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
@@ -0,0 +1,124 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/ite,it66121.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: ITE it66121 HDMI bridge Device Tree Bindings
+
+maintainers:
+ - Phong LE <ple@baylibre.com>
+ - Neil Armstrong <narmstrong@baylibre.com>
+
+description: |
+ The IT66121 is a high-performance and low-power single channel HDMI
+ transmitter, fully compliant with HDMI 1.3a, HDCP 1.2 and backward compatible
+ to DVI 1.0 specifications.
+
+properties:
+ compatible:
+ const: ite,it66121
+
+ reg:
+ maxItems: 1
+
+ reset-gpios:
+ maxItems: 1
+ description: GPIO connected to active low reset
+
+ vrf12-supply:
+ description: Regulator for 1.2V analog core power.
+
+ vcn33-supply:
+ description: Regulator for 3.3V digital core power.
+
+ vcn18-supply:
+ description: Regulator for 1.8V IO core power.
+
+ interrupts:
+ maxItems: 1
+
+ ports:
+ $ref: /schemas/graph.yaml#/properties/ports
+
+ properties:
+ port@0:
+ $ref: /schemas/graph.yaml#/$defs/port-base
+ unevaluatedProperties: false
+ description: DPI input port.
+
+ properties:
+ endpoint:
+ $ref: /schemas/graph.yaml#/$defs/endpoint-base
+ unevaluatedProperties: false
+
+ properties:
+ bus-width:
+ description:
+ Endpoint bus width.
+ enum:
+ - 12 # 12 data lines connected and dual-edge mode
+ - 24 # 24 data lines connected and single-edge mode
+ default: 24
+
+ port@1:
+ $ref: /schemas/graph.yaml#/properties/port
+ description: HDMI Connector port.
+
+ required:
+ - port@0
+ - port@1
+
+required:
+ - compatible
+ - reg
+ - reset-gpios
+ - vrf12-supply
+ - vcn33-supply
+ - vcn18-supply
+ - interrupts
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/gpio/gpio.h>
+ i2c {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ it66121hdmitx: hdmitx@4c {
+ compatible = "ite,it66121";
+ pinctrl-names = "default";
+ pinctrl-0 = <&ite_pins_default>;
+ vcn33-supply = <&mt6358_vcn33_wifi_reg>;
+ vcn18-supply = <&mt6358_vcn18_reg>;
+ vrf12-supply = <&mt6358_vrf12_reg>;
+ reset-gpios = <&pio 160 GPIO_ACTIVE_LOW>;
+ interrupt-parent = <&pio>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+ reg = <0x4c>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ it66121_in: endpoint {
+ bus-width = <12>;
+ remote-endpoint = <&display_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ hdmi_conn_out: endpoint {
+ remote-endpoint = <&hdmi_conn_in>;
+ };
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml b/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
new file mode 100644
index 000000000000..4cb75a5f2e3a
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/samsung,lms397kf04.yaml
@@ -0,0 +1,74 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/samsung,lms397kf04.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung LMS397KF04 display panel
+
+description: The datasheet claims this is based around a display controller
+ named DB7430 with a separate backlight controller.
+
+maintainers:
+ - Linus Walleij <linus.walleij@linaro.org>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ const: samsung,lms397kf04
+
+ reg: true
+
+ reset-gpios: true
+
+ vci-supply:
+ description: regulator that supplies the VCI analog voltage
+ usually around 3.0 V
+
+ vccio-supply:
+ description: regulator that supplies the VCCIO voltage usually
+ around 1.8 V
+
+ backlight: true
+
+ spi-max-frequency:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: inherited as a SPI client node, the datasheet specifies
+ maximum 300 ns minimum cycle which gives around 3 MHz max frequency
+ maximum: 3000000
+
+ port: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "samsung,lms397kf04";
+ spi-max-frequency = <3000000>;
+ reg = <0>;
+ vci-supply = <&lcd_3v0_reg>;
+ vccio-supply = <&lcd_1v8_reg>;
+ reset-gpios = <&gpio 1 GPIO_ACTIVE_LOW>;
+ backlight = <&ktd259>;
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&display_out>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/gpu/drm-internals.rst b/Documentation/gpu/drm-internals.rst
index 12272b168580..06af044c882f 100644
--- a/Documentation/gpu/drm-internals.rst
+++ b/Documentation/gpu/drm-internals.rst
@@ -75,6 +75,18 @@ update it, its value is mostly useless. The DRM core prints it to the
kernel log at initialization time and passes it to userspace through the
DRM_IOCTL_VERSION ioctl.
+Managing Ownership of the Framebuffer Aperture
+----------------------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_aperture.c
+ :doc: overview
+
+.. kernel-doc:: include/drm/drm_aperture.h
+ :internal:
+
+.. kernel-doc:: drivers/gpu/drm/drm_aperture.c
+ :export:
+
Device Instance and Driver Handling
-----------------------------------
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index 7ff9fac10d8b..12e61869939e 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -546,6 +546,8 @@ There's a bunch of issues with it:
this (together with the drm_minor->drm_device move) would allow us to remove
debugfs_init.
+Previous RFC that hasn't landed yet: https://lore.kernel.org/dri-devel/20200513114130.28641-2-wambui.karugax@gmail.com/
+
Contact: Daniel Vetter
Level: Intermediate
diff --git a/MAINTAINERS b/MAINTAINERS
index 008fcad7ac00..beb0d3e1c8c8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5870,6 +5870,13 @@ S: Orphan / Obsolete
F: drivers/gpu/drm/savage/
F: include/uapi/drm/savage_drm.h
+DRM DRIVER FOR SIMPLE FRAMEBUFFERS
+M: Thomas Zimmermann <tzimmermann@suse.de>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: drivers/gpu/drm/tiny/simplekms.c
+
DRM DRIVER FOR SIS VIDEO CARDS
S: Orphan / Obsolete
F: drivers/gpu/drm/sis/
@@ -6239,7 +6246,7 @@ M: Christian Koenig <christian.koenig@amd.com>
M: Huang Rui <ray.huang@amd.com>
L: dri-devel@lists.freedesktop.org
S: Maintained
-T: git git://people.freedesktop.org/~agd5f/linux
+T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/ttm/
F: include/drm/ttm/
@@ -9719,6 +9726,14 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/
T: git git://linuxtv.org/anttip/media_tree.git
F: drivers/media/tuners/it913x*
+ITE IT66121 HDMI BRIDGE DRIVER
+M: Phong LE <ple@baylibre.com>
+M: Neil Armstrong <narmstrong@baylibre.com>
+S: Maintained
+T: git git://anongit.freedesktop.org/drm/drm-misc
+F: Documentation/devicetree/bindings/display/bridge/ite,it66121.yaml
+F: drivers/gpu/drm/bridge/ite-it66121.c
+
IVTV VIDEO4LINUX DRIVER
M: Andy Walls <awalls@md.metrocast.net>
L: linux-media@vger.kernel.org
@@ -15257,6 +15272,7 @@ F: drivers/net/wireless/quantenna
RADEON and AMDGPU DRM DRIVERS
M: Alex Deucher <alexander.deucher@amd.com>
M: Christian König <christian.koenig@amd.com>
+M: Pan, Xinhui <Xinhui.Pan@amd.com>
L: amd-gfx@lists.freedesktop.org
S: Supported
T: git https://gitlab.freedesktop.org/agd5f/linux.git
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 3c16bd1afd87..d3a9ca4b1cec 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -80,23 +80,6 @@ config DRM_KMS_HELPER
help
CRTC helpers for KMS drivers.
-config DRM_KMS_FB_HELPER
- bool
- depends on DRM_KMS_HELPER
- select FB
- select FRAMEBUFFER_CONSOLE if !EXPERT
- select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
- select FB_SYS_FOPS
- select FB_SYS_FILLRECT
- select FB_SYS_COPYAREA
- select FB_SYS_IMAGEBLIT
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
- select FB_DEFERRED_IO
- help
- FBDEV helpers for KMS drivers.
-
config DRM_DEBUG_DP_MST_TOPOLOGY_REFS
bool "Enable refcount backtrace history in the DP MST helpers"
depends on STACKTRACE_SUPPORT
@@ -117,6 +100,17 @@ config DRM_FBDEV_EMULATION
depends on DRM
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
+ select FB
+ select FB_CFB_FILLRECT
+ select FB_CFB_COPYAREA
+ select FB_CFB_IMAGEBLIT
+ select FB_DEFERRED_IO
+ select FB_SYS_FOPS
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
+ select FRAMEBUFFER_CONSOLE if !EXPERT
+ select FRAMEBUFFER_CONSOLE_DETECT_PRIMARY if FRAMEBUFFER_CONSOLE
default y
help
Choose this option if you have a need for the legacy fbdev
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 5279db4392df..a91cc7684904 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -3,7 +3,7 @@
# Makefile for the drm device driver. This driver provides support for the
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
-drm-y := drm_auth.o drm_cache.o \
+drm-y := drm_aperture.o drm_auth.o drm_cache.o \
drm_file.o drm_gem.o drm_ioctl.o drm_irq.o \
drm_drv.o \
drm_sysfs.o drm_hashtab.o drm_mm.o \
@@ -20,15 +20,15 @@ drm-y := drm_auth.o drm_cache.o \
drm_client_modeset.o drm_atomic_uapi.o drm_hdcp.o \
drm_managed.o drm_vblank_work.o
-drm-$(CONFIG_DRM_LEGACY) += drm_bufs.o drm_context.o drm_dma.o drm_legacy_misc.o drm_lock.o \
- drm_memory.o drm_scatter.o drm_vm.o
+drm-$(CONFIG_DRM_LEGACY) += drm_agpsupport.o drm_bufs.o drm_context.o drm_dma.o \
+ drm_legacy_misc.o drm_lock.o drm_memory.o drm_scatter.o \
+ drm_vm.o
drm-$(CONFIG_DRM_LIB_RANDOM) += lib/drm_random.o
drm-$(CONFIG_COMPAT) += drm_ioc32.o
drm-$(CONFIG_DRM_GEM_CMA_HELPER) += drm_gem_cma_helper.o
drm-$(CONFIG_DRM_GEM_SHMEM_HELPER) += drm_gem_shmem_helper.o
drm-$(CONFIG_DRM_PANEL) += drm_panel.o
drm-$(CONFIG_OF) += drm_of.o
-drm-$(CONFIG_AGP) += drm_agpsupport.o
drm-$(CONFIG_PCI) += drm_pci.o
drm-$(CONFIG_DEBUG_FS) += drm_debugfs.o drm_debugfs_crc.o
drm-$(CONFIG_DRM_LOAD_EDID_FIRMWARE) += drm_edid_load.o
diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile
index ee85e8aba636..d216b7ecb5d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/Makefile
+++ b/drivers/gpu/drm/amd/amdgpu/Makefile
@@ -58,6 +58,8 @@ amdgpu-y += amdgpu_device.o amdgpu_kms.o \
amdgpu_umc.o smu_v11_0_i2c.o amdgpu_fru_eeprom.o amdgpu_rap.o \
amdgpu_fw_attestation.o amdgpu_securedisplay.o
+amdgpu-$(CONFIG_PROC_FS) += amdgpu_fdinfo.o
+
amdgpu-$(CONFIG_PERF_EVENTS) += amdgpu_pmu.o
# add asic specific block
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 264176a01e16..6cabecc74007 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -107,6 +107,7 @@
#include "amdgpu_gfxhub.h"
#include "amdgpu_df.h"
#include "amdgpu_smuio.h"
+#include "amdgpu_fdinfo.h"
#define MAX_GPU_INSTANCE 16
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 0350205c4897..01fe60fedcbe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -651,3 +651,64 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
idr_destroy(&mgr->ctx_handles);
mutex_destroy(&mgr->lock);
}
+
+void amdgpu_ctx_fence_time(struct amdgpu_ctx *ctx, struct amdgpu_ctx_entity *centity,
+ ktime_t *total, ktime_t *max)
+{
+ ktime_t now, t1;
+ uint32_t i;
+
+ now = ktime_get();
+ for (i = 0; i < amdgpu_sched_jobs; i++) {
+ struct dma_fence *fence;
+ struct drm_sched_fence *s_fence;
+
+ spin_lock(&ctx->ring_lock);
+ fence = dma_fence_get(centity->fences[i]);
+ spin_unlock(&ctx->ring_lock);
+ if (!fence)
+ continue;
+ s_fence = to_drm_sched_fence(fence);
+ if (!dma_fence_is_signaled(&s_fence->scheduled))
+ continue;
+ t1 = s_fence->scheduled.timestamp;
+ if (t1 >= now)
+ continue;
+ if (dma_fence_is_signaled(&s_fence->finished) &&
+ s_fence->finished.timestamp < now)
+ *total += ktime_sub(s_fence->finished.timestamp, t1);
+ else
+ *total += ktime_sub(now, t1);
+ t1 = ktime_sub(now, t1);
+ dma_fence_put(fence);
+ *max = max(t1, *max);
+ }
+}
+
+ktime_t amdgpu_ctx_mgr_fence_usage(struct amdgpu_ctx_mgr *mgr, uint32_t hwip,
+ uint32_t idx, uint64_t *elapsed)
+{
+ struct idr *idp;
+ struct amdgpu_ctx *ctx;
+ uint32_t id;
+ struct amdgpu_ctx_entity *centity;
+ ktime_t total = 0, max = 0;
+
+ if (idx >= AMDGPU_MAX_ENTITY_NUM)
+ return 0;
+ idp = &mgr->ctx_handles;
+ mutex_lock(&mgr->lock);
+ idr_for_each_entry(idp, ctx, id) {
+ if (!ctx->entities[hwip][idx])
+ continue;
+
+ centity = ctx->entities[hwip][idx];
+ amdgpu_ctx_fence_time(ctx, centity, &total, &max);
+ }
+
+ mutex_unlock(&mgr->lock);
+ if (elapsed)
+ *elapsed = max;
+
+ return total;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
index f54e10314661..10dcf59a5c6b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h
@@ -87,5 +87,8 @@ void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr);
void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr);
long amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr, long timeout);
void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr);
-
+ktime_t amdgpu_ctx_mgr_fence_usage(struct amdgpu_ctx_mgr *mgr, uint32_t hwip,
+ uint32_t idx, uint64_t *elapsed);
+void amdgpu_ctx_fence_time(struct amdgpu_ctx *ctx, struct amdgpu_ctx_entity *centity,
+ ktime_t *total, ktime_t *max);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index f93883db2b46..6fd20ea2935b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -23,6 +23,7 @@
*/
#include <drm/amdgpu_drm.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem.h>
#include <drm/drm_vblank.h>
@@ -42,7 +43,7 @@
#include "amdgpu_irq.h"
#include "amdgpu_dma_buf.h"
#include "amdgpu_sched.h"
-
+#include "amdgpu_fdinfo.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_ras.h"
@@ -1258,7 +1259,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
#endif
/* Get rid of things like offb */
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb");
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "amdgpudrmfb");
if (ret)
return ret;
@@ -1694,6 +1695,9 @@ static const struct file_operations amdgpu_driver_kms_fops = {
#ifdef CONFIG_COMPAT
.compat_ioctl = amdgpu_kms_compat_ioctl,
#endif
+#ifdef CONFIG_PROC_FS
+ .show_fdinfo = amdgpu_show_fdinfo
+#endif
};
int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
new file mode 100644
index 000000000000..dbebbe16e3b3
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: MIT
+/* Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: David Nieto
+ * Roy Sun
+ */
+
+#include <linux/debugfs.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+#include <linux/reboot.h>
+#include <linux/syscalls.h>
+
+#include <drm/amdgpu_drm.h>
+#include <drm/drm_debugfs.h>
+
+#include "amdgpu.h"
+#include "amdgpu_vm.h"
+#include "amdgpu_gem.h"
+#include "amdgpu_ctx.h"
+#include "amdgpu_fdinfo.h"
+
+
+static const char *amdgpu_ip_name[AMDGPU_HW_IP_NUM] = {
+ [AMDGPU_HW_IP_GFX] = "gfx",
+ [AMDGPU_HW_IP_COMPUTE] = "compute",
+ [AMDGPU_HW_IP_DMA] = "dma",
+ [AMDGPU_HW_IP_UVD] = "dec",
+ [AMDGPU_HW_IP_VCE] = "enc",
+ [AMDGPU_HW_IP_UVD_ENC] = "enc_1",
+ [AMDGPU_HW_IP_VCN_DEC] = "dec",
+ [AMDGPU_HW_IP_VCN_ENC] = "enc",
+ [AMDGPU_HW_IP_VCN_JPEG] = "jpeg",
+};
+
+void amdgpu_show_fdinfo(struct seq_file *m, struct file *f)
+{
+ struct amdgpu_fpriv *fpriv;
+ uint32_t bus, dev, fn, i, domain;
+ uint64_t vram_mem = 0, gtt_mem = 0, cpu_mem = 0;
+ struct drm_file *file = f->private_data;
+ struct amdgpu_device *adev = drm_to_adev(file->minor->dev);
+ int ret;
+
+ ret = amdgpu_file_to_fpriv(f, &fpriv);
+ if (ret)
+ return;
+ bus = adev->pdev->bus->number;
+ domain = pci_domain_nr(adev->pdev->bus);
+ dev = PCI_SLOT(adev->pdev->devfn);
+ fn = PCI_FUNC(adev->pdev->devfn);
+
+ ret = amdgpu_bo_reserve(fpriv->vm.root.base.bo, false);
+ if (ret) {
+ DRM_ERROR("Fail to reserve bo\n");
+ return;
+ }
+ amdgpu_vm_get_memory(&fpriv->vm, &vram_mem, &gtt_mem, &cpu_mem);
+ amdgpu_bo_unreserve(fpriv->vm.root.base.bo);
+ seq_printf(m, "pdev:\t%04x:%02x:%02x.%d\npasid:\t%u\n", domain, bus,
+ dev, fn, fpriv->vm.pasid);
+ seq_printf(m, "vram mem:\t%llu kB\n", vram_mem/1024UL);
+ seq_printf(m, "gtt mem:\t%llu kB\n", gtt_mem/1024UL);
+ seq_printf(m, "cpu mem:\t%llu kB\n", cpu_mem/1024UL);
+ for (i = 0; i < AMDGPU_HW_IP_NUM; i++) {
+ uint32_t count = amdgpu_ctx_num_entities[i];
+ int idx = 0;
+ uint64_t total = 0, min = 0;
+ uint32_t perc, frac;
+
+ for (idx = 0; idx < count; idx++) {
+ total = amdgpu_ctx_mgr_fence_usage(&fpriv->ctx_mgr,
+ i, idx, &min);
+ if ((total == 0) || (min == 0))
+ continue;
+
+ perc = div64_u64(10000 * total, min);
+ frac = perc % 100;
+
+ seq_printf(m, "%s%d:\t%d.%d%%\n",
+ amdgpu_ip_name[i],
+ idx, perc/100, frac);
+ }
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h
new file mode 100644
index 000000000000..41a4c7056729
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fdinfo.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: MIT
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: David Nieto
+ * Roy Sun
+ */
+#ifndef __AMDGPU_SMI_H__
+#define __AMDGPU_SMI_H__
+
+#include <linux/idr.h>
+#include <linux/kfifo.h>
+#include <linux/rbtree.h>
+#include <drm/gpu_scheduler.h>
+#include <drm/drm_file.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <linux/sched/mm.h>
+
+#include "amdgpu_sync.h"
+#include "amdgpu_ring.h"
+#include "amdgpu_ids.h"
+
+uint32_t amdgpu_get_ip_count(struct amdgpu_device *adev, int id);
+void amdgpu_show_fdinfo(struct seq_file *m, struct file *f);
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 311bcdc59eda..18974bd081f0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -766,7 +766,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data,
void __user *out = u64_to_user_ptr(args->value);
info.bo_size = robj->tbo.base.size;
- info.alignment = robj->tbo.mem.page_alignment << PAGE_SHIFT;
+ info.alignment = robj->tbo.page_alignment << PAGE_SHIFT;
info.domains = robj->preferred_domains;
info.domain_flags = robj->flags;
amdgpu_bo_unreserve(robj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
index 540c01052b21..72962de4c04c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c
@@ -205,7 +205,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man,
spin_lock(&mgr->lock);
r = drm_mm_insert_node_in_range(&mgr->mm, &node->node, mem->num_pages,
- mem->page_alignment, 0, place->fpfn,
+ tbo->page_alignment, 0, place->fpfn,
place->lpfn, DRM_MM_INSERT_BEST);
spin_unlock(&mgr->lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 1345f7eba011..0adffcace326 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -52,36 +52,12 @@
*
*/
-/**
- * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
- *
- * @bo: &amdgpu_bo buffer object
- *
- * This function is called when a BO stops being pinned, and updates the
- * &amdgpu_device pin_size values accordingly.
- */
-static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
-{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
-
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
- atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
- atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
- &adev->visible_pin_size);
- } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
- atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
- }
-}
-
static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
struct amdgpu_bo_user *ubo;
- if (bo->tbo.pin_count > 0)
- amdgpu_bo_subtract_pin_size(bo);
-
amdgpu_bo_kunmap(bo);
if (bo->tbo.base.import_attach)
@@ -1037,14 +1013,22 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
*/
void amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
ttm_bo_unpin(&bo->tbo);
if (bo->tbo.pin_count)
return;
- amdgpu_bo_subtract_pin_size(bo);
-
if (bo->tbo.base.import_attach)
dma_buf_unpin(bo->tbo.base.import_attach);
+
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
+ atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
+ &adev->visible_pin_size);
+ } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
+ }
}
/**
@@ -1304,6 +1288,26 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
+void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
+ uint64_t *gtt_mem, uint64_t *cpu_mem)
+{
+ unsigned int domain;
+
+ domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ switch (domain) {
+ case AMDGPU_GEM_DOMAIN_VRAM:
+ *vram_mem += amdgpu_bo_size(bo);
+ break;
+ case AMDGPU_GEM_DOMAIN_GTT:
+ *gtt_mem += amdgpu_bo_size(bo);
+ break;
+ case AMDGPU_GEM_DOMAIN_CPU:
+ default:
+ *cpu_mem += amdgpu_bo_size(bo);
+ break;
+ }
+}
+
/**
* amdgpu_bo_release_notify - notification about a BO being released
* @bo: pointer to a buffer object
@@ -1362,7 +1366,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
- unsigned long offset, size;
+ unsigned long offset;
int r;
/* Remember that this BO was accessed by the CPU */
@@ -1371,9 +1375,8 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
if (bo->mem.mem_type != TTM_PL_VRAM)
return 0;
- size = bo->mem.num_pages << PAGE_SHIFT;
offset = bo->mem.start << PAGE_SHIFT;
- if ((offset + size) <= adev->gmc.visible_vram_size)
+ if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -1398,7 +1401,7 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
offset = bo->mem.start << PAGE_SHIFT;
/* this should never happen */
if (bo->mem.mem_type == TTM_PL_VRAM &&
- (offset + size) > adev->gmc.visible_vram_size)
+ (offset + bo->base.size) > adev->gmc.visible_vram_size)
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 2d1fefbe1e99..b37d36ac6b5a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -191,7 +191,7 @@ static inline unsigned amdgpu_bo_ngpu_pages(struct amdgpu_bo *bo)
static inline unsigned amdgpu_bo_gpu_page_alignment(struct amdgpu_bo *bo)
{
- return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
+ return (bo->tbo.page_alignment << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE;
}
/**
@@ -300,6 +300,8 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
+void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
+ uint64_t *gtt_mem, uint64_t *cpu_mem);
int amdgpu_bo_restore_shadow(struct amdgpu_bo *shadow,
struct dma_fence **fence);
uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 3bef0432cac2..8c7ec09eb1a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1018,8 +1018,6 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
} else {
/* allocate GART space */
- tmp = bo->mem;
- tmp.mm_node = NULL;
placement.num_placement = 1;
placement.placement = &placements;
placement.num_busy_placement = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 9acee4a5b2ba..4a3e3f72e127 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -25,6 +25,7 @@
* Alex Deucher
* Jerome Glisse
*/
+
#include <linux/dma-fence-array.h>
#include <linux/interval_tree_generic.h>
#include <linux/idr.h>
@@ -1717,6 +1718,50 @@ error_unlock:
return r;
}
+void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
+ uint64_t *gtt_mem, uint64_t *cpu_mem)
+{
+ struct amdgpu_bo_va *bo_va, *tmp;
+
+ list_for_each_entry_safe(bo_va, tmp, &vm->idle, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+ gtt_mem, cpu_mem);
+ }
+ list_for_each_entry_safe(bo_va, tmp, &vm->evicted, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+ gtt_mem, cpu_mem);
+ }
+ list_for_each_entry_safe(bo_va, tmp, &vm->relocated, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+ gtt_mem, cpu_mem);
+ }
+ list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+ gtt_mem, cpu_mem);
+ }
+ spin_lock(&vm->invalidated_lock);
+ list_for_each_entry_safe(bo_va, tmp, &vm->invalidated, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+ gtt_mem, cpu_mem);
+ }
+ list_for_each_entry_safe(bo_va, tmp, &vm->done, base.vm_status) {
+ if (!bo_va->base.bo)
+ continue;
+ amdgpu_bo_get_memory(bo_va->base.bo, vram_mem,
+ gtt_mem, cpu_mem);
+ }
+ spin_unlock(&vm->invalidated_lock);
+}
/**
* amdgpu_vm_bo_update - update all BO mappings in the vm page table
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index 4e140288159c..6fd7dad0540a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -447,6 +447,8 @@ void amdgpu_vm_set_task_info(struct amdgpu_vm *vm);
void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
struct amdgpu_vm *vm);
void amdgpu_vm_del_from_lru_notify(struct ttm_buffer_object *bo);
+void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem,
+ uint64_t *gtt_mem, uint64_t *cpu_mem);
#if defined(CONFIG_DEBUG_FS)
void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index bce105e2973e..f7235438535f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -450,7 +450,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
/* default to 2MB */
pages_per_node = (2UL << (20UL - PAGE_SHIFT));
#endif
- pages_per_node = max((uint32_t)pages_per_node, mem->page_alignment);
+ pages_per_node = max((uint32_t)pages_per_node,
+ tbo->page_alignment);
num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
}
@@ -489,7 +490,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
for (; pages_left; ++i) {
unsigned long pages = min(pages_left, pages_per_node);
- uint32_t alignment = mem->page_alignment;
+ uint32_t alignment = tbo->page_alignment;
if (pages == pages_per_node)
alignment = pages_per_node;
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index a3ba9ca11e98..f327becb022f 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -188,6 +188,8 @@ void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector)
{
amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd;
amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer;
+ amdgpu_connector->ddc_bus->aux.drm_dev = amdgpu_connector->base.dev;
+
drm_dp_aux_init(&amdgpu_connector->ddc_bus->aux);
amdgpu_connector->ddc_bus->has_aux = true;
}
@@ -610,7 +612,7 @@ amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_i
dp_info->tries = 0;
voltage = 0xff;
while (1) {
- drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
+ drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
dp_info->link_status) <= 0) {
@@ -675,7 +677,7 @@ amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_i
dp_info->tries = 0;
channel_eq = false;
while (1) {
- drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
+ drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
dp_info->link_status) <= 0) {
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index 0cd98fcb1f9f..939bcfa2a4ec 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -363,6 +363,7 @@ static int uvd_v7_0_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
error:
dma_fence_put(fence);
+ amdgpu_bo_unpin(bo);
amdgpu_bo_unreserve(bo);
amdgpu_bo_unref(&bo);
return r;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 389eff96fcf6..9ca517b65854 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -6308,25 +6308,6 @@ static int fill_hdr_info_packet(const struct drm_connector_state *state,
return 0;
}
-static bool
-is_hdr_metadata_different(const struct drm_connector_state *old_state,
- const struct drm_connector_state *new_state)
-{
- struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
- struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
-
- if (old_blob != new_blob) {
- if (old_blob && new_blob &&
- old_blob->length == new_blob->length)
- return memcmp(old_blob->data, new_blob->data,
- old_blob->length);
-
- return true;
- }
-
- return false;
-}
-
static int
amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
struct drm_atomic_state *state)
@@ -6344,7 +6325,7 @@ amdgpu_dm_connector_atomic_check(struct drm_connector *conn,
if (!crtc)
return 0;
- if (is_hdr_metadata_different(old_con_state, new_con_state)) {
+ if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) {
struct dc_info_packet hdr_infopacket;
ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket);
@@ -7531,9 +7512,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
if (connector_type == DRM_MODE_CONNECTOR_HDMIA ||
connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
connector_type == DRM_MODE_CONNECTOR_eDP) {
- drm_object_attach_property(
- &aconnector->base.base,
- dm->ddev->mode_config.hdr_output_metadata_property, 0);
+ drm_connector_attach_hdr_output_metadata_property(&aconnector->base);
if (!aconnector->mst_port)
drm_connector_attach_vrr_capable_property(&aconnector->base);
@@ -8838,7 +8817,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
dm_old_crtc_state->abm_level;
hdr_changed =
- is_hdr_metadata_different(old_con_state, new_con_state);
+ !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state);
if (!scaling_changed && !abm_changed && !hdr_changed)
continue;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 9b221db526dc..4a0c24ce5f7d 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -434,10 +434,13 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector,
int link_index)
{
+ struct dc_link_settings max_link_enc_cap = {0};
+
aconnector->dm_dp_aux.aux.name =
kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d",
link_index);
aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
+ aconnector->dm_dp_aux.aux.drm_dev = dm->ddev;
aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
@@ -447,6 +450,7 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
return;
+ dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap);
aconnector->mst_mgr.cbs = &dm_mst_cbs;
drm_dp_mst_topology_mgr_init(
&aconnector->mst_mgr,
@@ -454,6 +458,8 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
&aconnector->dm_dp_aux.aux,
16,
4,
+ (u8)max_link_enc_cap.lane_count,
+ (u8)max_link_enc_cap.link_rate,
aconnector->connector_id);
drm_connector_attach_dp_subconnector_property(&aconnector->base);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 3ff3d9e90983..afa43181dec6 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1893,6 +1893,24 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down)
return true;
}
+bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap)
+{
+ if (!max_link_enc_cap) {
+ DC_LOG_ERROR("%s: Could not return max link encoder caps", __func__);
+ return false;
+ }
+
+ if (link->link_enc->funcs->get_max_link_cap) {
+ link->link_enc->funcs->get_max_link_cap(link->link_enc, max_link_enc_cap);
+ return true;
+ }
+
+ DC_LOG_ERROR("%s: Max link encoder caps unknown", __func__);
+ max_link_enc_cap->lane_count = 1;
+ max_link_enc_cap->link_rate = 6;
+ return false;
+}
+
static struct dc_link_settings get_max_link_cap(struct dc_link *link)
{
struct dc_link_settings max_link_cap = {0};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 054bab45ee17..fc5622ffec3d 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -345,6 +345,8 @@ bool dc_link_dp_set_test_pattern(
const unsigned char *p_custom_pattern,
unsigned int cust_pattern_size);
+bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap);
+
void dc_link_enable_hpd_filter(struct dc_link *link, bool enable);
bool dc_link_is_dp_sink_present(struct dc_link *link);
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index aeda4e5ec4f4..ff45f23f3d56 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -247,7 +247,6 @@ static void komeda_kms_mode_config_init(struct komeda_kms_dev *kms,
config->min_height = 0;
config->max_width = 4096;
config->max_height = 4096;
- config->allow_fb_modifiers = true;
config->funcs = &komeda_mode_config_funcs;
config->helper_private = &komeda_mode_config_helpers;
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index d83c7366b348..de59f3302516 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -403,7 +403,6 @@ static int malidp_init(struct drm_device *drm)
drm->mode_config.max_height = hwdev->max_line_size;
drm->mode_config.funcs = &malidp_mode_config_funcs;
drm->mode_config.helper_private = &malidp_mode_config_helpers;
- drm->mode_config.allow_fb_modifiers = true;
ret = malidp_crtc_init(drm);
if (ret)
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index ddbba67f0283..8c2ab3d653b7 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -927,6 +927,11 @@ static const struct drm_plane_helper_funcs malidp_de_plane_helper_funcs = {
.atomic_disable = malidp_de_plane_disable,
};
+static const uint64_t linear_only_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
int malidp_de_planes_init(struct drm_device *drm)
{
struct malidp_drm *malidp = drm->dev_private;
@@ -990,8 +995,8 @@ int malidp_de_planes_init(struct drm_device *drm)
*/
ret = drm_universal_plane_init(drm, &plane->base, crtcs,
&malidp_de_plane_funcs, formats, n,
- (id == DE_SMART) ? NULL : modifiers, plane_type,
- NULL);
+ (id == DE_SMART) ? linear_only_modifiers : modifiers,
+ plane_type, NULL);
if (ret < 0)
goto cleanup;
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 44fe9f994fc5..dab0a1f0983b 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -9,6 +9,7 @@
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
@@ -94,9 +95,7 @@ static int armada_drm_bind(struct device *dev)
}
/* Remove early framebuffers */
- ret = drm_fb_helper_remove_conflicting_framebuffers(NULL,
- "armada-drm-fb",
- false);
+ ret = drm_aperture_remove_framebuffers(false, "armada-drm-fb");
if (ret) {
dev_err(dev, "[" DRM_NAME ":%s] can't kick out simple-fb: %d\n",
__func__, ret);
diff --git a/drivers/gpu/drm/ast/ast_dp501.c b/drivers/gpu/drm/ast/ast_dp501.c
index 88121c0e0d05..cd93c44f2662 100644
--- a/drivers/gpu/drm/ast/ast_dp501.c
+++ b/drivers/gpu/drm/ast/ast_dp501.c
@@ -189,6 +189,9 @@ bool ast_backup_fw(struct drm_device *dev, u8 *addr, u32 size)
u32 i, data;
u32 boot_address;
+ if (ast->config_mode != ast_use_p2a)
+ return false;
+
data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
if (data) {
boot_address = get_fw_base(ast);
@@ -207,6 +210,9 @@ static bool ast_launch_m68k(struct drm_device *dev)
u8 *fw_addr = NULL;
u8 jreg;
+ if (ast->config_mode != ast_use_p2a)
+ return false;
+
data = ast_mindwm(ast, 0x1e6e2100) & 0x01;
if (!data) {
@@ -271,25 +277,55 @@ u8 ast_get_dp501_max_clk(struct drm_device *dev)
struct ast_private *ast = to_ast_private(dev);
u32 boot_address, offset, data;
u8 linkcap[4], linkrate, linklanes, maxclk = 0xff;
+ u32 *plinkcap;
- boot_address = get_fw_base(ast);
-
- /* validate FW version */
- offset = 0xf000;
- data = ast_mindwm(ast, boot_address + offset);
- if ((data & 0xf0) != 0x10) /* version: 1x */
- return maxclk;
-
- /* Read Link Capability */
- offset = 0xf014;
- *(u32 *)linkcap = ast_mindwm(ast, boot_address + offset);
- if (linkcap[2] == 0) {
- linkrate = linkcap[0];
- linklanes = linkcap[1];
- data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
- if (data > 0xff)
- data = 0xff;
- maxclk = (u8)data;
+ if (ast->config_mode == ast_use_p2a) {
+ boot_address = get_fw_base(ast);
+
+ /* validate FW version */
+ offset = AST_DP501_GBL_VERSION;
+ data = ast_mindwm(ast, boot_address + offset);
+ if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1) /* version: 1x */
+ return maxclk;
+
+ /* Read Link Capability */
+ offset = AST_DP501_LINKRATE;
+ plinkcap = (u32 *)linkcap;
+ *plinkcap = ast_mindwm(ast, boot_address + offset);
+ if (linkcap[2] == 0) {
+ linkrate = linkcap[0];
+ linklanes = linkcap[1];
+ data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
+ if (data > 0xff)
+ data = 0xff;
+ maxclk = (u8)data;
+ }
+ } else {
+ if (!ast->dp501_fw_buf)
+ return AST_DP501_DEFAULT_DCLK; /* 1024x768 as default */
+
+ /* dummy read */
+ offset = 0x0000;
+ data = readl(ast->dp501_fw_buf + offset);
+
+ /* validate FW version */
+ offset = AST_DP501_GBL_VERSION;
+ data = readl(ast->dp501_fw_buf + offset);
+ if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1) /* version: 1x */
+ return maxclk;
+
+ /* Read Link Capability */
+ offset = AST_DP501_LINKRATE;
+ plinkcap = (u32 *)linkcap;
+ *plinkcap = readl(ast->dp501_fw_buf + offset);
+ if (linkcap[2] == 0) {
+ linkrate = linkcap[0];
+ linklanes = linkcap[1];
+ data = (linkrate == 0x0a) ? (90 * linklanes) : (54 * linklanes);
+ if (data > 0xff)
+ data = 0xff;
+ maxclk = (u8)data;
+ }
}
return maxclk;
}
@@ -298,26 +334,57 @@ bool ast_dp501_read_edid(struct drm_device *dev, u8 *ediddata)
{
struct ast_private *ast = to_ast_private(dev);
u32 i, boot_address, offset, data;
+ u32 *pEDIDidx;
- boot_address = get_fw_base(ast);
-
- /* validate FW version */
- offset = 0xf000;
- data = ast_mindwm(ast, boot_address + offset);
- if ((data & 0xf0) != 0x10)
- return false;
-
- /* validate PnP Monitor */
- offset = 0xf010;
- data = ast_mindwm(ast, boot_address + offset);
- if (!(data & 0x01))
- return false;
+ if (ast->config_mode == ast_use_p2a) {
+ boot_address = get_fw_base(ast);
- /* Read EDID */
- offset = 0xf020;
- for (i = 0; i < 128; i += 4) {
- data = ast_mindwm(ast, boot_address + offset + i);
- *(u32 *)(ediddata + i) = data;
+ /* validate FW version */
+ offset = AST_DP501_GBL_VERSION;
+ data = ast_mindwm(ast, boot_address + offset);
+ if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1)
+ return false;
+
+ /* validate PnP Monitor */
+ offset = AST_DP501_PNPMONITOR;
+ data = ast_mindwm(ast, boot_address + offset);
+ if (!(data & AST_DP501_PNP_CONNECTED))
+ return false;
+
+ /* Read EDID */
+ offset = AST_DP501_EDID_DATA;
+ for (i = 0; i < 128; i += 4) {
+ data = ast_mindwm(ast, boot_address + offset + i);
+ pEDIDidx = (u32 *)(ediddata + i);
+ *pEDIDidx = data;
+ }
+ } else {
+ if (!ast->dp501_fw_buf)
+ return false;
+
+ /* dummy read */
+ offset = 0x0000;
+ data = readl(ast->dp501_fw_buf + offset);
+
+ /* validate FW version */
+ offset = AST_DP501_GBL_VERSION;
+ data = readl(ast->dp501_fw_buf + offset);
+ if ((data & AST_DP501_FW_VERSION_MASK) != AST_DP501_FW_VERSION_1)
+ return false;
+
+ /* validate PnP Monitor */
+ offset = AST_DP501_PNPMONITOR;
+ data = readl(ast->dp501_fw_buf + offset);
+ if (!(data & AST_DP501_PNP_CONNECTED))
+ return false;
+
+ /* Read EDID */
+ offset = AST_DP501_EDID_DATA;
+ for (i = 0; i < 128; i += 4) {
+ data = readl(ast->dp501_fw_buf + offset + i);
+ pEDIDidx = (u32 *)(ediddata + i);
+ *pEDIDidx = data;
+ }
}
return true;
diff --git a/drivers/gpu/drm/ast/ast_drv.c b/drivers/gpu/drm/ast/ast_drv.c
index 01837bea18c2..5aa452b4efe6 100644
--- a/drivers/gpu/drm/ast/ast_drv.c
+++ b/drivers/gpu/drm/ast/ast_drv.c
@@ -30,10 +30,10 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_probe_helper.h>
@@ -89,23 +89,18 @@ static const struct pci_device_id ast_pciidlist[] = {
MODULE_DEVICE_TABLE(pci, ast_pciidlist);
-static void ast_kick_out_firmware_fb(struct pci_dev *pdev)
+static int ast_remove_conflicting_framebuffers(struct pci_dev *pdev)
{
- struct apertures_struct *ap;
bool primary = false;
+ resource_size_t base, size;
- ap = alloc_apertures(1);
- if (!ap)
- return;
-
- ap->ranges[0].base = pci_resource_start(pdev, 0);
- ap->ranges[0].size = pci_resource_len(pdev, 0);
-
+ base = pci_resource_start(pdev, 0);
+ size = pci_resource_len(pdev, 0);
#ifdef CONFIG_X86
primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
#endif
- drm_fb_helper_remove_conflicting_framebuffers(ap, "astdrmfb", primary);
- kfree(ap);
+
+ return drm_aperture_remove_conflicting_framebuffers(base, size, primary, "astdrmfb");
}
static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
@@ -114,7 +109,9 @@ static int ast_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct drm_device *dev;
int ret;
- ast_kick_out_firmware_fb(pdev);
+ ret = ast_remove_conflicting_framebuffers(pdev);
+ if (ret)
+ return ret;
ret = pcim_enable_device(pdev);
if (ret)
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index e82ab8628770..911f9f414774 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -150,6 +150,7 @@ struct ast_private {
void __iomem *regs;
void __iomem *ioregs;
+ void __iomem *dp501_fw_buf;
enum ast_chip chip;
bool vga2_clone;
@@ -325,6 +326,17 @@ int ast_mode_config_init(struct ast_private *ast);
#define AST_MM_ALIGN_SHIFT 4
#define AST_MM_ALIGN_MASK ((1 << AST_MM_ALIGN_SHIFT) - 1)
+#define AST_DP501_FW_VERSION_MASK GENMASK(7, 4)
+#define AST_DP501_FW_VERSION_1 BIT(4)
+#define AST_DP501_PNP_CONNECTED BIT(1)
+
+#define AST_DP501_DEFAULT_DCLK 65
+
+#define AST_DP501_GBL_VERSION 0xf000
+#define AST_DP501_PNPMONITOR 0xf010
+#define AST_DP501_LINKRATE 0xf014
+#define AST_DP501_EDID_DATA 0xf020
+
int ast_mm_init(struct ast_private *ast);
/* ast post */
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index 0ac3c2039c4b..2aff2e6cf450 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -99,7 +99,7 @@ static void ast_detect_config_mode(struct drm_device *dev, u32 *scu_rev)
if (!(jregd0 & 0x80) || !(jregd1 & 0x10)) {
/* Double check it's actually working */
data = ast_read32(ast, 0xf004);
- if (data != 0xFFFFFFFF) {
+ if ((data != 0xFFFFFFFF) && (data != 0x00)) {
/* P2A works, grab silicon revision */
ast->config_mode = ast_use_p2a;
@@ -413,7 +413,7 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
pci_set_drvdata(pdev, dev);
- ast->regs = pci_iomap(pdev, 1, 0);
+ ast->regs = pcim_iomap(pdev, 1, 0);
if (!ast->regs)
return ERR_PTR(-EIO);
@@ -429,7 +429,7 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
/* "map" IO regs if the above hasn't done so already */
if (!ast->ioregs) {
- ast->ioregs = pci_iomap(pdev, 2, 0);
+ ast->ioregs = pcim_iomap(pdev, 2, 0);
if (!ast->ioregs)
return ERR_PTR(-EIO);
}
@@ -450,6 +450,14 @@ struct ast_private *ast_device_create(const struct drm_driver *drv,
if (ret)
return ERR_PTR(ret);
+ /* map reserved buffer */
+ ast->dp501_fw_buf = NULL;
+ if (dev->vram_mm->vram_size < pci_resource_len(pdev, 0)) {
+ ast->dp501_fw_buf = pci_iomap_range(pdev, 0, dev->vram_mm->vram_size, 0);
+ if (!ast->dp501_fw_buf)
+ drm_info(dev, "failed to map reserved buffer!\n");
+ }
+
ret = ast_mode_config_init(ast);
if (ret)
return ERR_PTR(ret);
diff --git a/drivers/gpu/drm/bochs/bochs.h b/drivers/gpu/drm/bochs/bochs.h
index e5bd1d517a18..e9645c612aff 100644
--- a/drivers/gpu/drm/bochs/bochs.h
+++ b/drivers/gpu/drm/bochs/bochs.h
@@ -78,6 +78,7 @@ struct bochs_device {
int bochs_hw_init(struct drm_device *dev);
void bochs_hw_fini(struct drm_device *dev);
+void bochs_hw_blank(struct bochs_device *bochs, bool blank);
void bochs_hw_setmode(struct bochs_device *bochs,
struct drm_display_mode *mode);
void bochs_hw_setformat(struct bochs_device *bochs,
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index b469624fe40d..c828cadbabff 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -6,6 +6,7 @@
#include <linux/pci.h>
#include <drm/drm_drv.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_managed.h>
@@ -109,7 +110,7 @@ static int bochs_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
}
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "bochsdrmfb");
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "bochsdrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index 2d7380a9890e..7d3426d8cc69 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -7,6 +7,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
+#include <video/vga.h>
#include "bochs.h"
/* ---------------------------------------------------------------------- */
@@ -24,6 +25,19 @@ static void bochs_vga_writeb(struct bochs_device *bochs, u16 ioport, u8 val)
}
}
+static u8 bochs_vga_readb(struct bochs_device *bochs, u16 ioport)
+{
+ if (WARN_ON(ioport < 0x3c0 || ioport > 0x3df))
+ return 0xff;
+
+ if (bochs->mmio) {
+ int offset = ioport - 0x3c0 + 0x400;
+ return readb(bochs->mmio + offset);
+ } else {
+ return inb(ioport);
+ }
+}
+
static u16 bochs_dispi_read(struct bochs_device *bochs, u16 reg)
{
u16 ret = 0;
@@ -205,6 +219,15 @@ void bochs_hw_fini(struct drm_device *dev)
kfree(bochs->edid);
}
+void bochs_hw_blank(struct bochs_device *bochs, bool blank)
+{
+ DRM_DEBUG_DRIVER("hw_blank %d\n", blank);
+ /* discard ar_flip_flop */
+ (void)bochs_vga_readb(bochs, VGA_IS1_RC);
+ /* blank or unblank; we need only update index and set 0x20 */
+ bochs_vga_writeb(bochs, VGA_ATT_W, blank ? 0 : 0x20);
+}
+
void bochs_hw_setmode(struct bochs_device *bochs,
struct drm_display_mode *mode)
{
@@ -223,7 +246,7 @@ void bochs_hw_setmode(struct bochs_device *bochs,
bochs->xres, bochs->yres, bochs->bpp,
bochs->yres_virtual);
- bochs_vga_writeb(bochs, 0x3c0, 0x20); /* unblank */
+ bochs_hw_blank(bochs, false);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE, 0);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_BPP, bochs->bpp);
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 853081d186d5..99410e77d51a 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -57,6 +57,13 @@ static void bochs_pipe_enable(struct drm_simple_display_pipe *pipe,
bochs_plane_update(bochs, plane_state);
}
+static void bochs_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct bochs_device *bochs = pipe->crtc.dev->dev_private;
+
+ bochs_hw_blank(bochs, true);
+}
+
static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
@@ -67,6 +74,7 @@ static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
.enable = bochs_pipe_enable,
+ .disable = bochs_pipe_disable,
.update = bochs_pipe_update,
.prepare_fb = drm_gem_vram_simple_display_pipe_prepare_fb,
.cleanup_fb = drm_gem_vram_simple_display_pipe_cleanup_fb,
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 400193e38d29..d25e900f07ef 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -68,6 +68,7 @@ config DRM_LONTIUM_LT8912B
select DRM_KMS_HELPER
select DRM_MIPI_DSI
select REGMAP_I2C
+ select VIDEOMODE_HELPERS
help
Driver for Lontium LT8912B DSI to HDMI bridge
chip driver.
@@ -104,6 +105,14 @@ config DRM_LONTIUM_LT9611UXC
HDMI signals
Please say Y if you have such hardware.
+config DRM_ITE_IT66121
+ tristate "ITE IT66121 HDMI bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ help
+ Support for ITE IT66121 HDMI bridge.
+
config DRM_LVDS_CODEC
tristate "Transparent LVDS encoders and decoders support"
depends on OF
@@ -172,7 +181,7 @@ config DRM_SIL_SII8620
tristate "Silicon Image SII8620 HDMI/MHL bridge"
depends on OF
select DRM_KMS_HELPER
- imply EXTCON
+ select EXTCON
depends on RC_CORE || !RC_CORE
help
Silicon Image SII8620 HDMI/MHL bridge chip driver.
@@ -270,6 +279,7 @@ config DRM_TI_SN65DSI86
select REGMAP_I2C
select DRM_PANEL
select DRM_MIPI_DSI
+ select AUXILIARY_BUS
help
Texas Instruments SN65DSI86 DSI to eDP Bridge driver
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 5c61b50c1663..965b54dccfe5 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
obj-$(CONFIG_DRM_NWL_MIPI_DSI) += nwl-dsi.o
+obj-$(CONFIG_DRM_ITE_IT66121) += ite-it66121.o
obj-y += analogix/
obj-y += cadence/
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index a9bb734366ae..05e3abb5a0c9 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -191,6 +191,7 @@
#define ADV7511_I2S_FORMAT_I2S 0
#define ADV7511_I2S_FORMAT_RIGHT_J 1
#define ADV7511_I2S_FORMAT_LEFT_J 2
+#define ADV7511_I2S_IEC958_DIRECT 3
#define ADV7511_PACKET(p, x) ((p) * 0x20 + (x))
#define ADV7511_PACKET_SDP(x) ADV7511_PACKET(0, x)
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
index 45838bd08d37..61f4a38e7d2b 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c
@@ -101,6 +101,10 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data,
case 20:
len = ADV7511_I2S_SAMPLE_LEN_20;
break;
+ case 32:
+ if (fmt->bit_fmt != SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE)
+ return -EINVAL;
+ fallthrough;
case 24:
len = ADV7511_I2S_SAMPLE_LEN_24;
break;
@@ -112,6 +116,8 @@ static int adv7511_hdmi_hw_params(struct device *dev, void *data,
case HDMI_I2S:
audio_source = ADV7511_AUDIO_SOURCE_I2S;
i2s_format = ADV7511_I2S_FORMAT_I2S;
+ if (fmt->bit_fmt == SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE)
+ i2s_format = ADV7511_I2S_IEC958_DIRECT;
break;
case HDMI_RIGHT_J:
audio_source = ADV7511_AUDIO_SOURCE_I2S;
diff --git a/drivers/gpu/drm/bridge/analogix/Kconfig b/drivers/gpu/drm/bridge/analogix/Kconfig
index 9160fd80dd70..2ef6eb2b786c 100644
--- a/drivers/gpu/drm/bridge/analogix/Kconfig
+++ b/drivers/gpu/drm/bridge/analogix/Kconfig
@@ -6,7 +6,7 @@ config DRM_ANALOGIX_ANX6345
select DRM_KMS_HELPER
select REGMAP_I2C
help
- ANX6345 is an ultra-low Full-HD DisplayPort/eDP
+ ANX6345 is an ultra-low power Full-HD DisplayPort/eDP
transmitter designed for portable devices. The
ANX6345 transforms the LVTTL RGB output of an
application processor to eDP or DisplayPort.
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index aa6cda458eb9..e33cd077595a 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -537,6 +537,7 @@ static int anx6345_bridge_attach(struct drm_bridge *bridge,
/* Register aux channel */
anx6345->aux.name = "DP-AUX";
anx6345->aux.dev = &anx6345->client->dev;
+ anx6345->aux.drm_dev = bridge->dev;
anx6345->aux.transfer = anx6345_aux_transfer;
err = drm_dp_aux_register(&anx6345->aux);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index f20558618220..5e6a0ed39199 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -905,6 +905,7 @@ static int anx78xx_bridge_attach(struct drm_bridge *bridge,
/* Register aux channel */
anx78xx->aux.name = "DP-AUX";
anx78xx->aux.dev = &anx78xx->client->dev;
+ anx78xx->aux.drm_dev = bridge->dev;
anx78xx->aux.transfer = anx78xx_aux_transfer;
err = drm_dp_aux_register(&anx78xx->aux);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index f115233b1cb9..550814ca2139 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1765,6 +1765,7 @@ int analogix_dp_bind(struct analogix_dp_device *dp, struct drm_device *drm_dev)
dp->aux.name = "DP-AUX";
dp->aux.transfer = analogix_dpaux_transfer;
dp->aux.dev = dp->dev;
+ dp->aux.drm_dev = drm_dev;
ret = drm_dp_aux_register(&dp->aux);
if (ret)
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 23283ba0c4f9..b4e349ca38fe 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -893,7 +893,7 @@ static void anx7625_power_on(struct anx7625_data *ctx)
usleep_range(2000, 2100);
}
- usleep_range(4000, 4100);
+ usleep_range(11000, 12000);
/* Power on pin enable */
gpiod_set_value(ctx->pdata.gpio_p_on, 1);
diff --git a/drivers/gpu/drm/bridge/cadence/Makefile b/drivers/gpu/drm/bridge/cadence/Makefile
index 8f647991b374..4d2db8df1bc6 100644
--- a/drivers/gpu/drm/bridge/cadence/Makefile
+++ b/drivers/gpu/drm/bridge/cadence/Makefile
@@ -1,4 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_DRM_CDNS_MHDP8546) += cdns-mhdp8546.o
-cdns-mhdp8546-y := cdns-mhdp8546-core.o
+cdns-mhdp8546-y := cdns-mhdp8546-core.o cdns-mhdp8546-hdcp.o
cdns-mhdp8546-$(CONFIG_DRM_CDNS_MHDP8546_J721E) += cdns-mhdp8546-j721e.o
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
index 989a05bc8197..0cd8f40fb690 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c
@@ -42,6 +42,7 @@
#include <drm/drm_connector.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_dp_helper.h>
+#include <drm/drm_hdcp.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
@@ -49,7 +50,7 @@
#include <asm/unaligned.h>
#include "cdns-mhdp8546-core.h"
-
+#include "cdns-mhdp8546-hdcp.h"
#include "cdns-mhdp8546-j721e.h"
static int cdns_mhdp_mailbox_read(struct cdns_mhdp_device *mhdp)
@@ -1614,10 +1615,51 @@ enum drm_mode_status cdns_mhdp_mode_valid(struct drm_connector *conn,
return MODE_OK;
}
+static int cdns_mhdp_connector_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *state)
+{
+ struct cdns_mhdp_device *mhdp = connector_to_mhdp(conn);
+ struct drm_connector_state *old_state, *new_state;
+ struct drm_crtc_state *crtc_state;
+ u64 old_cp, new_cp;
+
+ if (!mhdp->hdcp_supported)
+ return 0;
+
+ old_state = drm_atomic_get_old_connector_state(state, conn);
+ new_state = drm_atomic_get_new_connector_state(state, conn);
+ old_cp = old_state->content_protection;
+ new_cp = new_state->content_protection;
+
+ if (old_state->hdcp_content_type != new_state->hdcp_content_type &&
+ new_cp != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ goto mode_changed;
+ }
+
+ if (!new_state->crtc) {
+ if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+ new_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ return 0;
+ }
+
+ if (old_cp == new_cp ||
+ (old_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED &&
+ new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED))
+ return 0;
+
+mode_changed:
+ crtc_state = drm_atomic_get_new_crtc_state(state, new_state->crtc);
+ crtc_state->mode_changed = true;
+
+ return 0;
+}
+
static const struct drm_connector_helper_funcs cdns_mhdp_conn_helper_funcs = {
.detect_ctx = cdns_mhdp_connector_detect,
.get_modes = cdns_mhdp_get_modes,
.mode_valid = cdns_mhdp_mode_valid,
+ .atomic_check = cdns_mhdp_connector_atomic_check,
};
static const struct drm_connector_funcs cdns_mhdp_conn_funcs = {
@@ -1662,7 +1704,10 @@ static int cdns_mhdp_connector_init(struct cdns_mhdp_device *mhdp)
return ret;
}
- return 0;
+ if (mhdp->hdcp_supported)
+ ret = drm_connector_attach_content_protection_property(conn, true);
+
+ return ret;
}
static int cdns_mhdp_attach(struct drm_bridge *bridge,
@@ -1674,10 +1719,15 @@ static int cdns_mhdp_attach(struct drm_bridge *bridge,
dev_dbg(mhdp->dev, "%s\n", __func__);
+ mhdp->aux.drm_dev = bridge->dev;
+ ret = drm_dp_aux_register(&mhdp->aux);
+ if (ret < 0)
+ return ret;
+
if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) {
ret = cdns_mhdp_connector_init(mhdp);
if (ret)
- return ret;
+ goto aux_unregister;
}
spin_lock(&mhdp->start_lock);
@@ -1693,6 +1743,9 @@ static int cdns_mhdp_attach(struct drm_bridge *bridge,
mhdp->regs + CDNS_APB_INT_MASK);
return 0;
+aux_unregister:
+ drm_dp_aux_unregister(&mhdp->aux);
+ return ret;
}
static void cdns_mhdp_configure_video(struct cdns_mhdp_device *mhdp,
@@ -1957,6 +2010,15 @@ static void cdns_mhdp_atomic_enable(struct drm_bridge *bridge,
if (WARN_ON(!conn_state))
goto out;
+ if (mhdp->hdcp_supported &&
+ mhdp->hw_state == MHDP_HW_READY &&
+ conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED) {
+ mutex_unlock(&mhdp->link_mutex);
+ cdns_mhdp_hdcp_enable(mhdp, conn_state->hdcp_content_type);
+ mutex_lock(&mhdp->link_mutex);
+ }
+
crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
if (WARN_ON(!crtc_state))
goto out;
@@ -2000,6 +2062,9 @@ static void cdns_mhdp_atomic_disable(struct drm_bridge *bridge,
mutex_lock(&mhdp->link_mutex);
+ if (mhdp->hdcp_supported)
+ cdns_mhdp_hdcp_disable(mhdp);
+
mhdp->bridge_enabled = false;
cdns_mhdp_reg_read(mhdp, CDNS_DP_FRAMER_GLOBAL_CONFIG, &resp);
resp &= ~CDNS_DP_FRAMER_EN;
@@ -2025,6 +2090,8 @@ static void cdns_mhdp_detach(struct drm_bridge *bridge)
dev_dbg(mhdp->dev, "%s\n", __func__);
+ drm_dp_aux_unregister(&mhdp->aux);
+
spin_lock(&mhdp->start_lock);
mhdp->bridge_attached = false;
@@ -2288,7 +2355,6 @@ static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data)
struct cdns_mhdp_device *mhdp = data;
u32 apb_stat, sw_ev0;
bool bridge_attached;
- int ret;
apb_stat = readl(mhdp->regs + CDNS_APB_INT_STATUS);
if (!(apb_stat & CDNS_APB_INT_MASK_SW_EVENT_INT))
@@ -2307,20 +2373,54 @@ static irqreturn_t cdns_mhdp_irq_handler(int irq, void *data)
spin_unlock(&mhdp->start_lock);
if (bridge_attached && (sw_ev0 & CDNS_DPTX_HPD)) {
- ret = cdns_mhdp_update_link_status(mhdp);
- if (mhdp->connector.dev) {
- if (ret < 0)
- schedule_work(&mhdp->modeset_retry_work);
- else
- drm_kms_helper_hotplug_event(mhdp->bridge.dev);
- } else {
- drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
- }
+ schedule_work(&mhdp->hpd_work);
+ }
+
+ if (sw_ev0 & ~CDNS_DPTX_HPD) {
+ mhdp->sw_events |= (sw_ev0 & ~CDNS_DPTX_HPD);
+ wake_up(&mhdp->sw_events_wq);
}
return IRQ_HANDLED;
}
+u32 cdns_mhdp_wait_for_sw_event(struct cdns_mhdp_device *mhdp, u32 event)
+{
+ u32 ret;
+
+ ret = wait_event_timeout(mhdp->sw_events_wq,
+ mhdp->sw_events & event,
+ msecs_to_jiffies(500));
+ if (!ret) {
+ dev_dbg(mhdp->dev, "SW event 0x%x timeout\n", event);
+ goto sw_event_out;
+ }
+
+ ret = mhdp->sw_events;
+ mhdp->sw_events &= ~event;
+
+sw_event_out:
+ return ret;
+}
+
+static void cdns_mhdp_hpd_work(struct work_struct *work)
+{
+ struct cdns_mhdp_device *mhdp = container_of(work,
+ struct cdns_mhdp_device,
+ hpd_work);
+ int ret;
+
+ ret = cdns_mhdp_update_link_status(mhdp);
+ if (mhdp->connector.dev) {
+ if (ret < 0)
+ schedule_work(&mhdp->modeset_retry_work);
+ else
+ drm_kms_helper_hotplug_event(mhdp->bridge.dev);
+ } else {
+ drm_bridge_hpd_notify(&mhdp->bridge, cdns_mhdp_detect(mhdp));
+ }
+}
+
static int cdns_mhdp_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -2356,6 +2456,15 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
return PTR_ERR(mhdp->regs);
}
+ mhdp->sapb_regs = devm_platform_ioremap_resource_byname(pdev, "mhdptx-sapb");
+ if (IS_ERR(mhdp->sapb_regs)) {
+ mhdp->hdcp_supported = false;
+ dev_warn(dev,
+ "Failed to get SAPB memory resource, HDCP not supported\n");
+ } else {
+ mhdp->hdcp_supported = true;
+ }
+
mhdp->phy = devm_of_phy_get_by_index(dev, pdev->dev.of_node, 0);
if (IS_ERR(mhdp->phy)) {
dev_err(dev, "no PHY configured\n");
@@ -2430,13 +2539,18 @@ static int cdns_mhdp_probe(struct platform_device *pdev)
/* Initialize the work for modeset in case of link train failure */
INIT_WORK(&mhdp->modeset_retry_work, cdns_mhdp_modeset_retry_fn);
+ INIT_WORK(&mhdp->hpd_work, cdns_mhdp_hpd_work);
init_waitqueue_head(&mhdp->fw_load_wq);
+ init_waitqueue_head(&mhdp->sw_events_wq);
ret = cdns_mhdp_load_firmware(mhdp);
if (ret)
goto phy_exit;
+ if (mhdp->hdcp_supported)
+ cdns_mhdp_hdcp_init(mhdp);
+
drm_bridge_add(&mhdp->bridge);
return 0;
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
index 5897a85e3159..c74439d0b1a7 100644
--- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.h
@@ -47,6 +47,10 @@ struct phy;
#define CDNS_SW_EVENT0 0x00044
#define CDNS_DPTX_HPD BIT(0)
+#define CDNS_HDCP_TX_STATUS BIT(4)
+#define CDNS_HDCP2_TX_IS_KM_STORED BIT(5)
+#define CDNS_HDCP2_TX_STORE_KM BIT(6)
+#define CDNS_HDCP_TX_IS_RCVR_ID_VALID BIT(7)
#define CDNS_SW_EVENT1 0x00048
#define CDNS_SW_EVENT2 0x0004c
@@ -339,8 +343,17 @@ struct cdns_mhdp_platform_info {
#define to_cdns_mhdp_bridge_state(s) \
container_of(s, struct cdns_mhdp_bridge_state, base)
+struct cdns_mhdp_hdcp {
+ struct delayed_work check_work;
+ struct work_struct prop_work;
+ struct mutex mutex; /* mutex to protect hdcp.value */
+ u32 value;
+ u8 hdcp_content_type;
+};
+
struct cdns_mhdp_device {
void __iomem *regs;
+ void __iomem *sapb_regs;
void __iomem *j721e_regs;
struct device *dev;
@@ -392,9 +405,18 @@ struct cdns_mhdp_device {
/* Work struct to schedule a uevent on link train failure */
struct work_struct modeset_retry_work;
+ struct work_struct hpd_work;
+
+ wait_queue_head_t sw_events_wq;
+ u32 sw_events;
+
+ struct cdns_mhdp_hdcp hdcp;
+ bool hdcp_supported;
};
#define connector_to_mhdp(x) container_of(x, struct cdns_mhdp_device, connector)
#define bridge_to_mhdp(x) container_of(x, struct cdns_mhdp_device, bridge)
+u32 cdns_mhdp_wait_for_sw_event(struct cdns_mhdp_device *mhdp, uint32_t event);
+
#endif
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
new file mode 100644
index 000000000000..fccd6fbcc257
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.c
@@ -0,0 +1,570 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence MHDP8546 DP bridge driver.
+ *
+ * Copyright (C) 2020 Cadence Design Systems, Inc.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/iopoll.h>
+
+#include <asm/unaligned.h>
+
+#include <drm/drm_hdcp.h>
+
+#include "cdns-mhdp8546-hdcp.h"
+
+static int cdns_mhdp_secure_mailbox_read(struct cdns_mhdp_device *mhdp)
+{
+ int ret, empty;
+
+ WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
+
+ ret = readx_poll_timeout(readl, mhdp->sapb_regs + CDNS_MAILBOX_EMPTY,
+ empty, !empty, MAILBOX_RETRY_US,
+ MAILBOX_TIMEOUT_US);
+ if (ret < 0)
+ return ret;
+
+ return readl(mhdp->sapb_regs + CDNS_MAILBOX_RX_DATA) & 0xff;
+}
+
+static int cdns_mhdp_secure_mailbox_write(struct cdns_mhdp_device *mhdp,
+ u8 val)
+{
+ int ret, full;
+
+ WARN_ON(!mutex_is_locked(&mhdp->mbox_mutex));
+
+ ret = readx_poll_timeout(readl, mhdp->sapb_regs + CDNS_MAILBOX_FULL,
+ full, !full, MAILBOX_RETRY_US,
+ MAILBOX_TIMEOUT_US);
+ if (ret < 0)
+ return ret;
+
+ writel(val, mhdp->sapb_regs + CDNS_MAILBOX_TX_DATA);
+
+ return 0;
+}
+
+static int cdns_mhdp_secure_mailbox_recv_header(struct cdns_mhdp_device *mhdp,
+ u8 module_id,
+ u8 opcode,
+ u16 req_size)
+{
+ u32 mbox_size, i;
+ u8 header[4];
+ int ret;
+
+ /* read the header of the message */
+ for (i = 0; i < sizeof(header); i++) {
+ ret = cdns_mhdp_secure_mailbox_read(mhdp);
+ if (ret < 0)
+ return ret;
+
+ header[i] = ret;
+ }
+
+ mbox_size = get_unaligned_be16(header + 2);
+
+ if (opcode != header[0] || module_id != header[1] ||
+ (opcode != HDCP_TRAN_IS_REC_ID_VALID && req_size != mbox_size)) {
+ for (i = 0; i < mbox_size; i++)
+ if (cdns_mhdp_secure_mailbox_read(mhdp) < 0)
+ break;
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int cdns_mhdp_secure_mailbox_recv_data(struct cdns_mhdp_device *mhdp,
+ u8 *buff, u16 buff_size)
+{
+ int ret;
+ u32 i;
+
+ for (i = 0; i < buff_size; i++) {
+ ret = cdns_mhdp_secure_mailbox_read(mhdp);
+ if (ret < 0)
+ return ret;
+
+ buff[i] = ret;
+ }
+
+ return 0;
+}
+
+static int cdns_mhdp_secure_mailbox_send(struct cdns_mhdp_device *mhdp,
+ u8 module_id,
+ u8 opcode,
+ u16 size,
+ u8 *message)
+{
+ u8 header[4];
+ int ret;
+ u32 i;
+
+ header[0] = opcode;
+ header[1] = module_id;
+ put_unaligned_be16(size, header + 2);
+
+ for (i = 0; i < sizeof(header); i++) {
+ ret = cdns_mhdp_secure_mailbox_write(mhdp, header[i]);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < size; i++) {
+ ret = cdns_mhdp_secure_mailbox_write(mhdp, message[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int cdns_mhdp_hdcp_get_status(struct cdns_mhdp_device *mhdp,
+ u16 *hdcp_port_status)
+{
+ u8 hdcp_status[HDCP_STATUS_SIZE];
+ int ret;
+
+ mutex_lock(&mhdp->mbox_mutex);
+ ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_TX,
+ HDCP_TRAN_STATUS_CHANGE, 0, NULL);
+ if (ret)
+ goto err_get_hdcp_status;
+
+ ret = cdns_mhdp_secure_mailbox_recv_header(mhdp, MB_MODULE_ID_HDCP_TX,
+ HDCP_TRAN_STATUS_CHANGE,
+ sizeof(hdcp_status));
+ if (ret)
+ goto err_get_hdcp_status;
+
+ ret = cdns_mhdp_secure_mailbox_recv_data(mhdp, hdcp_status,
+ sizeof(hdcp_status));
+ if (ret)
+ goto err_get_hdcp_status;
+
+ *hdcp_port_status = ((u16)(hdcp_status[0] << 8) | hdcp_status[1]);
+
+err_get_hdcp_status:
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ return ret;
+}
+
+static u8 cdns_mhdp_hdcp_handle_status(struct cdns_mhdp_device *mhdp,
+ u16 status)
+{
+ u8 err = GET_HDCP_PORT_STS_LAST_ERR(status);
+
+ if (err)
+ dev_dbg(mhdp->dev, "HDCP Error = %d", err);
+
+ return err;
+}
+
+static int cdns_mhdp_hdcp_rx_id_valid_response(struct cdns_mhdp_device *mhdp,
+ u8 valid)
+{
+ int ret;
+
+ mutex_lock(&mhdp->mbox_mutex);
+ ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_TX,
+ HDCP_TRAN_RESPOND_RECEIVER_ID_VALID,
+ 1, &valid);
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ return ret;
+}
+
+static int cdns_mhdp_hdcp_rx_id_valid(struct cdns_mhdp_device *mhdp,
+ u8 *recv_num, u8 *hdcp_rx_id)
+{
+ u8 rec_id_hdr[2];
+ u8 status;
+ int ret;
+
+ mutex_lock(&mhdp->mbox_mutex);
+ ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_TX,
+ HDCP_TRAN_IS_REC_ID_VALID, 0, NULL);
+ if (ret)
+ goto err_rx_id_valid;
+
+ ret = cdns_mhdp_secure_mailbox_recv_header(mhdp, MB_MODULE_ID_HDCP_TX,
+ HDCP_TRAN_IS_REC_ID_VALID,
+ sizeof(status));
+ if (ret)
+ goto err_rx_id_valid;
+
+ ret = cdns_mhdp_secure_mailbox_recv_data(mhdp, rec_id_hdr, 2);
+ if (ret)
+ goto err_rx_id_valid;
+
+ *recv_num = rec_id_hdr[0];
+
+ ret = cdns_mhdp_secure_mailbox_recv_data(mhdp, hdcp_rx_id, 5 * *recv_num);
+
+err_rx_id_valid:
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ return ret;
+}
+
+static int cdns_mhdp_hdcp_km_stored_resp(struct cdns_mhdp_device *mhdp,
+ u32 size, u8 *km)
+{
+ int ret;
+
+ mutex_lock(&mhdp->mbox_mutex);
+ ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_TX,
+ HDCP2X_TX_RESPOND_KM, size, km);
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ return ret;
+}
+
+static int cdns_mhdp_hdcp_tx_is_km_stored(struct cdns_mhdp_device *mhdp,
+ u8 *resp, u32 size)
+{
+ int ret;
+
+ mutex_lock(&mhdp->mbox_mutex);
+ ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_TX,
+ HDCP2X_TX_IS_KM_STORED, 0, NULL);
+ if (ret)
+ goto err_is_km_stored;
+
+ ret = cdns_mhdp_secure_mailbox_recv_header(mhdp, MB_MODULE_ID_HDCP_TX,
+ HDCP2X_TX_IS_KM_STORED,
+ size);
+ if (ret)
+ goto err_is_km_stored;
+
+ ret = cdns_mhdp_secure_mailbox_recv_data(mhdp, resp, size);
+err_is_km_stored:
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ return ret;
+}
+
+static int cdns_mhdp_hdcp_tx_config(struct cdns_mhdp_device *mhdp,
+ u8 hdcp_cfg)
+{
+ int ret;
+
+ mutex_lock(&mhdp->mbox_mutex);
+ ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_TX,
+ HDCP_TRAN_CONFIGURATION, 1, &hdcp_cfg);
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ return ret;
+}
+
+static int cdns_mhdp_hdcp_set_config(struct cdns_mhdp_device *mhdp,
+ u8 hdcp_config, bool enable)
+{
+ u16 hdcp_port_status;
+ u32 ret_event;
+ u8 hdcp_cfg;
+ int ret;
+
+ hdcp_cfg = hdcp_config | (enable ? 0x04 : 0) |
+ (HDCP_CONTENT_TYPE_0 << 3);
+ cdns_mhdp_hdcp_tx_config(mhdp, hdcp_cfg);
+ ret_event = cdns_mhdp_wait_for_sw_event(mhdp, CDNS_HDCP_TX_STATUS);
+ if (!ret_event)
+ return -1;
+
+ ret = cdns_mhdp_hdcp_get_status(mhdp, &hdcp_port_status);
+ if (ret || cdns_mhdp_hdcp_handle_status(mhdp, hdcp_port_status))
+ return -1;
+
+ return 0;
+}
+
+static int cdns_mhdp_hdcp_auth_check(struct cdns_mhdp_device *mhdp)
+{
+ u16 hdcp_port_status;
+ u32 ret_event;
+ int ret;
+
+ ret_event = cdns_mhdp_wait_for_sw_event(mhdp, CDNS_HDCP_TX_STATUS);
+ if (!ret_event)
+ return -1;
+
+ ret = cdns_mhdp_hdcp_get_status(mhdp, &hdcp_port_status);
+ if (ret || cdns_mhdp_hdcp_handle_status(mhdp, hdcp_port_status))
+ return -1;
+
+ if (hdcp_port_status & 1) {
+ dev_dbg(mhdp->dev, "Authentication completed successfully!\n");
+ return 0;
+ }
+
+ dev_dbg(mhdp->dev, "Authentication failed\n");
+
+ return -1;
+}
+
+static int cdns_mhdp_hdcp_check_receviers(struct cdns_mhdp_device *mhdp)
+{
+ u8 hdcp_rec_id[HDCP_MAX_RECEIVERS][HDCP_RECEIVER_ID_SIZE_BYTES];
+ u8 hdcp_num_rec;
+ u32 ret_event;
+
+ ret_event = cdns_mhdp_wait_for_sw_event(mhdp,
+ CDNS_HDCP_TX_IS_RCVR_ID_VALID);
+ if (!ret_event)
+ return -1;
+
+ hdcp_num_rec = 0;
+ memset(&hdcp_rec_id, 0, sizeof(hdcp_rec_id));
+ cdns_mhdp_hdcp_rx_id_valid(mhdp, &hdcp_num_rec, (u8 *)hdcp_rec_id);
+ cdns_mhdp_hdcp_rx_id_valid_response(mhdp, 1);
+
+ return 0;
+}
+
+static int cdns_mhdp_hdcp_auth_22(struct cdns_mhdp_device *mhdp)
+{
+ u8 resp[HDCP_STATUS_SIZE];
+ u16 hdcp_port_status;
+ u32 ret_event;
+ int ret;
+
+ dev_dbg(mhdp->dev, "HDCP: Start 2.2 Authentication\n");
+ ret_event = cdns_mhdp_wait_for_sw_event(mhdp,
+ CDNS_HDCP2_TX_IS_KM_STORED);
+ if (!ret_event)
+ return -1;
+
+ if (ret_event & CDNS_HDCP_TX_STATUS) {
+ mhdp->sw_events &= ~CDNS_HDCP_TX_STATUS;
+ ret = cdns_mhdp_hdcp_get_status(mhdp, &hdcp_port_status);
+ if (ret || cdns_mhdp_hdcp_handle_status(mhdp, hdcp_port_status))
+ return -1;
+ }
+
+ cdns_mhdp_hdcp_tx_is_km_stored(mhdp, resp, sizeof(resp));
+ cdns_mhdp_hdcp_km_stored_resp(mhdp, 0, NULL);
+
+ if (cdns_mhdp_hdcp_check_receviers(mhdp))
+ return -1;
+
+ return 0;
+}
+
+static inline int cdns_mhdp_hdcp_auth_14(struct cdns_mhdp_device *mhdp)
+{
+ dev_dbg(mhdp->dev, "HDCP: Starting 1.4 Authentication\n");
+ return cdns_mhdp_hdcp_check_receviers(mhdp);
+}
+
+static int cdns_mhdp_hdcp_auth(struct cdns_mhdp_device *mhdp,
+ u8 hdcp_config)
+{
+ int ret;
+
+ ret = cdns_mhdp_hdcp_set_config(mhdp, hdcp_config, true);
+ if (ret)
+ goto auth_failed;
+
+ if (hdcp_config == HDCP_TX_1)
+ ret = cdns_mhdp_hdcp_auth_14(mhdp);
+ else
+ ret = cdns_mhdp_hdcp_auth_22(mhdp);
+
+ if (ret)
+ goto auth_failed;
+
+ ret = cdns_mhdp_hdcp_auth_check(mhdp);
+ if (ret)
+ ret = cdns_mhdp_hdcp_auth_check(mhdp);
+
+auth_failed:
+ return ret;
+}
+
+static int _cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp)
+{
+ int ret;
+
+ dev_dbg(mhdp->dev, "[%s:%d] HDCP is being disabled...\n",
+ mhdp->connector.name, mhdp->connector.base.id);
+
+ ret = cdns_mhdp_hdcp_set_config(mhdp, 0, false);
+
+ return ret;
+}
+
+static int _cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type)
+{
+ int ret, tries = 3;
+ u32 i;
+
+ for (i = 0; i < tries; i++) {
+ if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0 ||
+ content_type == DRM_MODE_HDCP_CONTENT_TYPE1) {
+ ret = cdns_mhdp_hdcp_auth(mhdp, HDCP_TX_2);
+ if (!ret)
+ return 0;
+ _cdns_mhdp_hdcp_disable(mhdp);
+ }
+
+ if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) {
+ ret = cdns_mhdp_hdcp_auth(mhdp, HDCP_TX_1);
+ if (!ret)
+ return 0;
+ _cdns_mhdp_hdcp_disable(mhdp);
+ }
+ }
+
+ dev_err(mhdp->dev, "HDCP authentication failed (%d tries/%d)\n",
+ tries, ret);
+
+ return ret;
+}
+
+static int cdns_mhdp_hdcp_check_link(struct cdns_mhdp_device *mhdp)
+{
+ u16 hdcp_port_status;
+ int ret = 0;
+
+ mutex_lock(&mhdp->hdcp.mutex);
+ if (mhdp->hdcp.value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
+ goto out;
+
+ ret = cdns_mhdp_hdcp_get_status(mhdp, &hdcp_port_status);
+ if (!ret && hdcp_port_status & HDCP_PORT_STS_AUTH)
+ goto out;
+
+ dev_err(mhdp->dev,
+ "[%s:%d] HDCP link failed, retrying authentication\n",
+ mhdp->connector.name, mhdp->connector.base.id);
+
+ ret = _cdns_mhdp_hdcp_disable(mhdp);
+ if (ret) {
+ mhdp->hdcp.value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&mhdp->hdcp.prop_work);
+ goto out;
+ }
+
+ ret = _cdns_mhdp_hdcp_enable(mhdp, mhdp->hdcp.hdcp_content_type);
+ if (ret) {
+ mhdp->hdcp.value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&mhdp->hdcp.prop_work);
+ }
+out:
+ mutex_unlock(&mhdp->hdcp.mutex);
+ return ret;
+}
+
+static void cdns_mhdp_hdcp_check_work(struct work_struct *work)
+{
+ struct delayed_work *d_work = to_delayed_work(work);
+ struct cdns_mhdp_hdcp *hdcp = container_of(d_work,
+ struct cdns_mhdp_hdcp,
+ check_work);
+ struct cdns_mhdp_device *mhdp = container_of(hdcp,
+ struct cdns_mhdp_device,
+ hdcp);
+
+ if (!cdns_mhdp_hdcp_check_link(mhdp))
+ schedule_delayed_work(&hdcp->check_work,
+ DRM_HDCP_CHECK_PERIOD_MS);
+}
+
+static void cdns_mhdp_hdcp_prop_work(struct work_struct *work)
+{
+ struct cdns_mhdp_hdcp *hdcp = container_of(work,
+ struct cdns_mhdp_hdcp,
+ prop_work);
+ struct cdns_mhdp_device *mhdp = container_of(hdcp,
+ struct cdns_mhdp_device,
+ hdcp);
+ struct drm_device *dev = mhdp->connector.dev;
+ struct drm_connector_state *state;
+
+ drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ mutex_lock(&mhdp->hdcp.mutex);
+ if (mhdp->hdcp.value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ state = mhdp->connector.state;
+ state->content_protection = mhdp->hdcp.value;
+ }
+ mutex_unlock(&mhdp->hdcp.mutex);
+ drm_modeset_unlock(&dev->mode_config.connection_mutex);
+}
+
+int cdns_mhdp_hdcp_set_lc(struct cdns_mhdp_device *mhdp, u8 *val)
+{
+ int ret;
+
+ mutex_lock(&mhdp->mbox_mutex);
+ ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_GENERAL,
+ HDCP_GENERAL_SET_LC_128,
+ 16, val);
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ return ret;
+}
+
+int
+cdns_mhdp_hdcp_set_public_key_param(struct cdns_mhdp_device *mhdp,
+ struct cdns_hdcp_tx_public_key_param *val)
+{
+ int ret;
+
+ mutex_lock(&mhdp->mbox_mutex);
+ ret = cdns_mhdp_secure_mailbox_send(mhdp, MB_MODULE_ID_HDCP_TX,
+ HDCP2X_TX_SET_PUBLIC_KEY_PARAMS,
+ sizeof(*val), (u8 *)val);
+ mutex_unlock(&mhdp->mbox_mutex);
+
+ return ret;
+}
+
+int cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type)
+{
+ int ret;
+
+ mutex_lock(&mhdp->hdcp.mutex);
+ ret = _cdns_mhdp_hdcp_enable(mhdp, content_type);
+ if (ret)
+ goto out;
+
+ mhdp->hdcp.hdcp_content_type = content_type;
+ mhdp->hdcp.value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+ schedule_work(&mhdp->hdcp.prop_work);
+ schedule_delayed_work(&mhdp->hdcp.check_work,
+ DRM_HDCP_CHECK_PERIOD_MS);
+out:
+ mutex_unlock(&mhdp->hdcp.mutex);
+ return ret;
+}
+
+int cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp)
+{
+ int ret = 0;
+
+ mutex_lock(&mhdp->hdcp.mutex);
+ if (mhdp->hdcp.value != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) {
+ mhdp->hdcp.value = DRM_MODE_CONTENT_PROTECTION_UNDESIRED;
+ schedule_work(&mhdp->hdcp.prop_work);
+ ret = _cdns_mhdp_hdcp_disable(mhdp);
+ }
+ mutex_unlock(&mhdp->hdcp.mutex);
+ cancel_delayed_work_sync(&mhdp->hdcp.check_work);
+
+ return ret;
+}
+
+void cdns_mhdp_hdcp_init(struct cdns_mhdp_device *mhdp)
+{
+ INIT_DELAYED_WORK(&mhdp->hdcp.check_work, cdns_mhdp_hdcp_check_work);
+ INIT_WORK(&mhdp->hdcp.prop_work, cdns_mhdp_hdcp_prop_work);
+ mutex_init(&mhdp->hdcp.mutex);
+}
diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h
new file mode 100644
index 000000000000..334c0b8b0d4f
--- /dev/null
+++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-hdcp.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence MHDP8546 DP bridge driver.
+ *
+ * Copyright (C) 2020 Cadence Design Systems, Inc.
+ *
+ */
+
+#ifndef CDNS_MHDP8546_HDCP_H
+#define CDNS_MHDP8546_HDCP_H
+
+#include "cdns-mhdp8546-core.h"
+
+#define HDCP_MAX_RECEIVERS 32
+#define HDCP_RECEIVER_ID_SIZE_BYTES 5
+#define HDCP_STATUS_SIZE 0x5
+#define HDCP_PORT_STS_AUTH 0x1
+#define HDCP_PORT_STS_LAST_ERR_SHIFT 0x5
+#define HDCP_PORT_STS_LAST_ERR_MASK (0x0F << 5)
+#define GET_HDCP_PORT_STS_LAST_ERR(__sts__) \
+ (((__sts__) & HDCP_PORT_STS_LAST_ERR_MASK) >> \
+ HDCP_PORT_STS_LAST_ERR_SHIFT)
+
+#define HDCP_CONFIG_1_4 BIT(0) /* use HDCP 1.4 only */
+#define HDCP_CONFIG_2_2 BIT(1) /* use HDCP 2.2 only */
+/* use All HDCP versions */
+#define HDCP_CONFIG_ALL (BIT(0) | BIT(1))
+#define HDCP_CONFIG_NONE 0
+
+enum {
+ HDCP_GENERAL_SET_LC_128,
+ HDCP_SET_SEED,
+};
+
+enum {
+ HDCP_TRAN_CONFIGURATION,
+ HDCP2X_TX_SET_PUBLIC_KEY_PARAMS,
+ HDCP2X_TX_SET_DEBUG_RANDOM_NUMBERS,
+ HDCP2X_TX_RESPOND_KM,
+ HDCP1_TX_SEND_KEYS,
+ HDCP1_TX_SEND_RANDOM_AN,
+ HDCP_TRAN_STATUS_CHANGE,
+ HDCP2X_TX_IS_KM_STORED,
+ HDCP2X_TX_STORE_KM,
+ HDCP_TRAN_IS_REC_ID_VALID,
+ HDCP_TRAN_RESPOND_RECEIVER_ID_VALID,
+ HDCP_TRAN_TEST_KEYS,
+ HDCP2X_TX_SET_KM_KEY_PARAMS,
+ HDCP_NUM_OF_SUPPORTED_MESSAGES
+};
+
+enum {
+ HDCP_CONTENT_TYPE_0,
+ HDCP_CONTENT_TYPE_1,
+};
+
+#define DRM_HDCP_CHECK_PERIOD_MS (128 * 16)
+
+#define HDCP_PAIRING_R_ID 5
+#define HDCP_PAIRING_M_LEN 16
+#define HDCP_KM_LEN 16
+#define HDCP_PAIRING_M_EKH 16
+
+struct cdns_hdcp_pairing_data {
+ u8 receiver_id[HDCP_PAIRING_R_ID];
+ u8 m[HDCP_PAIRING_M_LEN];
+ u8 km[HDCP_KM_LEN];
+ u8 ekh[HDCP_PAIRING_M_EKH];
+};
+
+enum {
+ HDCP_TX_2,
+ HDCP_TX_1,
+ HDCP_TX_BOTH,
+};
+
+#define DLP_MODULUS_N 384
+#define DLP_E 3
+
+struct cdns_hdcp_tx_public_key_param {
+ u8 N[DLP_MODULUS_N];
+ u8 E[DLP_E];
+};
+
+int cdns_mhdp_hdcp_set_public_key_param(struct cdns_mhdp_device *mhdp,
+ struct cdns_hdcp_tx_public_key_param *val);
+int cdns_mhdp_hdcp_set_lc(struct cdns_mhdp_device *mhdp, u8 *val);
+int cdns_mhdp_hdcp_enable(struct cdns_mhdp_device *mhdp, u8 content_type);
+int cdns_mhdp_hdcp_disable(struct cdns_mhdp_device *mhdp);
+void cdns_mhdp_hdcp_init(struct cdns_mhdp_device *mhdp);
+
+#endif
diff --git a/drivers/gpu/drm/bridge/ite-it66121.c b/drivers/gpu/drm/bridge/ite-it66121.c
new file mode 100644
index 000000000000..d8a60691fd32
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ite-it66121.c
@@ -0,0 +1,1021 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 BayLibre, SAS
+ * Author: Phong LE <ple@baylibre.com>
+ * Copyright (C) 2018-2019, Artem Mygaiev
+ * Copyright (C) 2017, Fresco Logic, Incorporated.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/bitfield.h>
+#include <linux/property.h>
+#include <linux/regmap.h>
+#include <linux/of_graph.h>
+#include <linux/gpio/consumer.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+#define IT66121_VENDOR_ID0_REG 0x00
+#define IT66121_VENDOR_ID1_REG 0x01
+#define IT66121_DEVICE_ID0_REG 0x02
+#define IT66121_DEVICE_ID1_REG 0x03
+
+#define IT66121_VENDOR_ID0 0x54
+#define IT66121_VENDOR_ID1 0x49
+#define IT66121_DEVICE_ID0 0x12
+#define IT66121_DEVICE_ID1 0x06
+#define IT66121_REVISION_MASK GENMASK(7, 4)
+#define IT66121_DEVICE_ID1_MASK GENMASK(3, 0)
+
+#define IT66121_MASTER_SEL_REG 0x10
+#define IT66121_MASTER_SEL_HOST BIT(0)
+
+#define IT66121_AFE_DRV_REG 0x61
+#define IT66121_AFE_DRV_RST BIT(4)
+#define IT66121_AFE_DRV_PWD BIT(5)
+
+#define IT66121_INPUT_MODE_REG 0x70
+#define IT66121_INPUT_MODE_RGB (0 << 6)
+#define IT66121_INPUT_MODE_YUV422 BIT(6)
+#define IT66121_INPUT_MODE_YUV444 (2 << 6)
+#define IT66121_INPUT_MODE_CCIR656 BIT(4)
+#define IT66121_INPUT_MODE_SYNCEMB BIT(3)
+#define IT66121_INPUT_MODE_DDR BIT(2)
+
+#define IT66121_INPUT_CSC_REG 0x72
+#define IT66121_INPUT_CSC_ENDITHER BIT(7)
+#define IT66121_INPUT_CSC_ENUDFILTER BIT(6)
+#define IT66121_INPUT_CSC_DNFREE_GO BIT(5)
+#define IT66121_INPUT_CSC_RGB_TO_YUV 0x02
+#define IT66121_INPUT_CSC_YUV_TO_RGB 0x03
+#define IT66121_INPUT_CSC_NO_CONV 0x00
+
+#define IT66121_AFE_XP_REG 0x62
+#define IT66121_AFE_XP_GAINBIT BIT(7)
+#define IT66121_AFE_XP_PWDPLL BIT(6)
+#define IT66121_AFE_XP_ENI BIT(5)
+#define IT66121_AFE_XP_ENO BIT(4)
+#define IT66121_AFE_XP_RESETB BIT(3)
+#define IT66121_AFE_XP_PWDI BIT(2)
+
+#define IT66121_AFE_IP_REG 0x64
+#define IT66121_AFE_IP_GAINBIT BIT(7)
+#define IT66121_AFE_IP_PWDPLL BIT(6)
+#define IT66121_AFE_IP_CKSEL_05 (0 << 4)
+#define IT66121_AFE_IP_CKSEL_1 BIT(4)
+#define IT66121_AFE_IP_CKSEL_2 (2 << 4)
+#define IT66121_AFE_IP_CKSEL_2OR4 (3 << 4)
+#define IT66121_AFE_IP_ER0 BIT(3)
+#define IT66121_AFE_IP_RESETB BIT(2)
+#define IT66121_AFE_IP_ENC BIT(1)
+#define IT66121_AFE_IP_EC1 BIT(0)
+
+#define IT66121_AFE_XP_EC1_REG 0x68
+#define IT66121_AFE_XP_EC1_LOWCLK BIT(4)
+
+#define IT66121_SW_RST_REG 0x04
+#define IT66121_SW_RST_REF BIT(5)
+#define IT66121_SW_RST_AREF BIT(4)
+#define IT66121_SW_RST_VID BIT(3)
+#define IT66121_SW_RST_AUD BIT(2)
+#define IT66121_SW_RST_HDCP BIT(0)
+
+#define IT66121_DDC_COMMAND_REG 0x15
+#define IT66121_DDC_COMMAND_BURST_READ 0x0
+#define IT66121_DDC_COMMAND_EDID_READ 0x3
+#define IT66121_DDC_COMMAND_FIFO_CLR 0x9
+#define IT66121_DDC_COMMAND_SCL_PULSE 0xA
+#define IT66121_DDC_COMMAND_ABORT 0xF
+
+#define IT66121_HDCP_REG 0x20
+#define IT66121_HDCP_CPDESIRED BIT(0)
+#define IT66121_HDCP_EN1P1FEAT BIT(1)
+
+#define IT66121_INT_STATUS1_REG 0x06
+#define IT66121_INT_STATUS1_AUD_OVF BIT(7)
+#define IT66121_INT_STATUS1_DDC_NOACK BIT(5)
+#define IT66121_INT_STATUS1_DDC_FIFOERR BIT(4)
+#define IT66121_INT_STATUS1_DDC_BUSHANG BIT(2)
+#define IT66121_INT_STATUS1_RX_SENS_STATUS BIT(1)
+#define IT66121_INT_STATUS1_HPD_STATUS BIT(0)
+
+#define IT66121_DDC_HEADER_REG 0x11
+#define IT66121_DDC_HEADER_HDCP 0x74
+#define IT66121_DDC_HEADER_EDID 0xA0
+
+#define IT66121_DDC_OFFSET_REG 0x12
+#define IT66121_DDC_BYTE_REG 0x13
+#define IT66121_DDC_SEGMENT_REG 0x14
+#define IT66121_DDC_RD_FIFO_REG 0x17
+
+#define IT66121_CLK_BANK_REG 0x0F
+#define IT66121_CLK_BANK_PWROFF_RCLK BIT(6)
+#define IT66121_CLK_BANK_PWROFF_ACLK BIT(5)
+#define IT66121_CLK_BANK_PWROFF_TXCLK BIT(4)
+#define IT66121_CLK_BANK_PWROFF_CRCLK BIT(3)
+#define IT66121_CLK_BANK_0 0
+#define IT66121_CLK_BANK_1 1
+
+#define IT66121_INT_REG 0x05
+#define IT66121_INT_ACTIVE_HIGH BIT(7)
+#define IT66121_INT_OPEN_DRAIN BIT(6)
+#define IT66121_INT_TX_CLK_OFF BIT(0)
+
+#define IT66121_INT_MASK1_REG 0x09
+#define IT66121_INT_MASK1_AUD_OVF BIT(7)
+#define IT66121_INT_MASK1_DDC_NOACK BIT(5)
+#define IT66121_INT_MASK1_DDC_FIFOERR BIT(4)
+#define IT66121_INT_MASK1_DDC_BUSHANG BIT(2)
+#define IT66121_INT_MASK1_RX_SENS BIT(1)
+#define IT66121_INT_MASK1_HPD BIT(0)
+
+#define IT66121_INT_CLR1_REG 0x0C
+#define IT66121_INT_CLR1_PKTACP BIT(7)
+#define IT66121_INT_CLR1_PKTNULL BIT(6)
+#define IT66121_INT_CLR1_PKTGEN BIT(5)
+#define IT66121_INT_CLR1_KSVLISTCHK BIT(4)
+#define IT66121_INT_CLR1_AUTHDONE BIT(3)
+#define IT66121_INT_CLR1_AUTHFAIL BIT(2)
+#define IT66121_INT_CLR1_RX_SENS BIT(1)
+#define IT66121_INT_CLR1_HPD BIT(0)
+
+#define IT66121_AV_MUTE_REG 0xC1
+#define IT66121_AV_MUTE_ON BIT(0)
+#define IT66121_AV_MUTE_BLUESCR BIT(1)
+
+#define IT66121_PKT_GEN_CTRL_REG 0xC6
+#define IT66121_PKT_GEN_CTRL_ON BIT(0)
+#define IT66121_PKT_GEN_CTRL_RPT BIT(1)
+
+#define IT66121_AVIINFO_DB1_REG 0x158
+#define IT66121_AVIINFO_DB2_REG 0x159
+#define IT66121_AVIINFO_DB3_REG 0x15A
+#define IT66121_AVIINFO_DB4_REG 0x15B
+#define IT66121_AVIINFO_DB5_REG 0x15C
+#define IT66121_AVIINFO_CSUM_REG 0x15D
+#define IT66121_AVIINFO_DB6_REG 0x15E
+#define IT66121_AVIINFO_DB7_REG 0x15F
+#define IT66121_AVIINFO_DB8_REG 0x160
+#define IT66121_AVIINFO_DB9_REG 0x161
+#define IT66121_AVIINFO_DB10_REG 0x162
+#define IT66121_AVIINFO_DB11_REG 0x163
+#define IT66121_AVIINFO_DB12_REG 0x164
+#define IT66121_AVIINFO_DB13_REG 0x165
+
+#define IT66121_AVI_INFO_PKT_REG 0xCD
+#define IT66121_AVI_INFO_PKT_ON BIT(0)
+#define IT66121_AVI_INFO_PKT_RPT BIT(1)
+
+#define IT66121_HDMI_MODE_REG 0xC0
+#define IT66121_HDMI_MODE_HDMI BIT(0)
+
+#define IT66121_SYS_STATUS_REG 0x0E
+#define IT66121_SYS_STATUS_ACTIVE_IRQ BIT(7)
+#define IT66121_SYS_STATUS_HPDETECT BIT(6)
+#define IT66121_SYS_STATUS_SENDECTECT BIT(5)
+#define IT66121_SYS_STATUS_VID_STABLE BIT(4)
+#define IT66121_SYS_STATUS_AUD_CTS_CLR BIT(1)
+#define IT66121_SYS_STATUS_CLEAR_IRQ BIT(0)
+
+#define IT66121_DDC_STATUS_REG 0x16
+#define IT66121_DDC_STATUS_TX_DONE BIT(7)
+#define IT66121_DDC_STATUS_ACTIVE BIT(6)
+#define IT66121_DDC_STATUS_NOACK BIT(5)
+#define IT66121_DDC_STATUS_WAIT_BUS BIT(4)
+#define IT66121_DDC_STATUS_ARBI_LOSE BIT(3)
+#define IT66121_DDC_STATUS_FIFO_FULL BIT(2)
+#define IT66121_DDC_STATUS_FIFO_EMPTY BIT(1)
+#define IT66121_DDC_STATUS_FIFO_VALID BIT(0)
+
+#define IT66121_EDID_SLEEP_US 20000
+#define IT66121_EDID_TIMEOUT_US 200000
+#define IT66121_EDID_FIFO_SIZE 32
+#define IT66121_AFE_CLK_HIGH 80000 /* Khz */
+
+struct it66121_ctx {
+ struct regmap *regmap;
+ struct drm_bridge bridge;
+ struct drm_bridge *next_bridge;
+ struct drm_connector *connector;
+ struct device *dev;
+ struct gpio_desc *gpio_reset;
+ struct i2c_client *client;
+ struct regulator_bulk_data supplies[3];
+ u32 bus_width;
+ struct mutex lock; /* Protects fields below and device registers */
+ struct hdmi_avi_infoframe hdmi_avi_infoframe;
+};
+
+static const struct regmap_range_cfg it66121_regmap_banks[] = {
+ {
+ .name = "it66121",
+ .range_min = 0x00,
+ .range_max = 0x1FF,
+ .selector_reg = IT66121_CLK_BANK_REG,
+ .selector_mask = 0x1,
+ .selector_shift = 0,
+ .window_start = 0x00,
+ .window_len = 0x130,
+ },
+};
+
+static const struct regmap_config it66121_regmap_config = {
+ .val_bits = 8,
+ .reg_bits = 8,
+ .max_register = 0x1FF,
+ .ranges = it66121_regmap_banks,
+ .num_ranges = ARRAY_SIZE(it66121_regmap_banks),
+};
+
+static void it66121_hw_reset(struct it66121_ctx *ctx)
+{
+ gpiod_set_value(ctx->gpio_reset, 1);
+ msleep(20);
+ gpiod_set_value(ctx->gpio_reset, 0);
+}
+
+static inline int ite66121_power_on(struct it66121_ctx *ctx)
+{
+ return regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static inline int ite66121_power_off(struct it66121_ctx *ctx)
+{
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static inline int it66121_preamble_ddc(struct it66121_ctx *ctx)
+{
+ return regmap_write(ctx->regmap, IT66121_MASTER_SEL_REG, IT66121_MASTER_SEL_HOST);
+}
+
+static inline int it66121_fire_afe(struct it66121_ctx *ctx)
+{
+ return regmap_write(ctx->regmap, IT66121_AFE_DRV_REG, 0);
+}
+
+/* TOFIX: Handle YCbCr Input & Output */
+static int it66121_configure_input(struct it66121_ctx *ctx)
+{
+ int ret;
+ u8 mode = IT66121_INPUT_MODE_RGB;
+
+ if (ctx->bus_width == 12)
+ mode |= IT66121_INPUT_MODE_DDR;
+
+ ret = regmap_write(ctx->regmap, IT66121_INPUT_MODE_REG, mode);
+ if (ret)
+ return ret;
+
+ return regmap_write(ctx->regmap, IT66121_INPUT_CSC_REG, IT66121_INPUT_CSC_NO_CONV);
+}
+
+/**
+ * it66121_configure_afe() - Configure the analog front end
+ * @ctx: it66121_ctx object
+ * @mode: mode to configure
+ *
+ * RETURNS:
+ * zero if success, a negative error code otherwise.
+ */
+static int it66121_configure_afe(struct it66121_ctx *ctx,
+ const struct drm_display_mode *mode)
+{
+ int ret;
+
+ ret = regmap_write(ctx->regmap, IT66121_AFE_DRV_REG,
+ IT66121_AFE_DRV_RST);
+ if (ret)
+ return ret;
+
+ if (mode->clock > IT66121_AFE_CLK_HIGH) {
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG,
+ IT66121_AFE_XP_GAINBIT |
+ IT66121_AFE_XP_ENO,
+ IT66121_AFE_XP_GAINBIT);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
+ IT66121_AFE_IP_GAINBIT |
+ IT66121_AFE_IP_ER0 |
+ IT66121_AFE_IP_EC1,
+ IT66121_AFE_IP_GAINBIT);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG,
+ IT66121_AFE_XP_EC1_LOWCLK, 0x80);
+ if (ret)
+ return ret;
+ } else {
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG,
+ IT66121_AFE_XP_GAINBIT |
+ IT66121_AFE_XP_ENO,
+ IT66121_AFE_XP_ENO);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
+ IT66121_AFE_IP_GAINBIT |
+ IT66121_AFE_IP_ER0 |
+ IT66121_AFE_IP_EC1, IT66121_AFE_IP_ER0 |
+ IT66121_AFE_IP_EC1);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_EC1_REG,
+ IT66121_AFE_XP_EC1_LOWCLK,
+ IT66121_AFE_XP_EC1_LOWCLK);
+ if (ret)
+ return ret;
+ }
+
+ /* Clear reset flags */
+ ret = regmap_write_bits(ctx->regmap, IT66121_SW_RST_REG,
+ IT66121_SW_RST_REF | IT66121_SW_RST_VID, 0);
+ if (ret)
+ return ret;
+
+ return it66121_fire_afe(ctx);
+}
+
+static inline int it66121_wait_ddc_ready(struct it66121_ctx *ctx)
+{
+ int ret, val;
+ u32 busy = IT66121_DDC_STATUS_NOACK | IT66121_DDC_STATUS_WAIT_BUS |
+ IT66121_DDC_STATUS_ARBI_LOSE;
+
+ ret = regmap_read_poll_timeout(ctx->regmap, IT66121_DDC_STATUS_REG, val, true,
+ IT66121_EDID_SLEEP_US, IT66121_EDID_TIMEOUT_US);
+ if (ret)
+ return ret;
+
+ if (val & busy)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int it66121_clear_ddc_fifo(struct it66121_ctx *ctx)
+{
+ int ret;
+
+ ret = it66121_preamble_ddc(ctx);
+ if (ret)
+ return ret;
+
+ return regmap_write(ctx->regmap, IT66121_DDC_COMMAND_REG,
+ IT66121_DDC_COMMAND_FIFO_CLR);
+}
+
+static int it66121_abort_ddc_ops(struct it66121_ctx *ctx)
+{
+ int ret;
+ unsigned int swreset, cpdesire;
+
+ ret = regmap_read(ctx->regmap, IT66121_SW_RST_REG, &swreset);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(ctx->regmap, IT66121_HDCP_REG, &cpdesire);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(ctx->regmap, IT66121_HDCP_REG,
+ cpdesire & (~IT66121_HDCP_CPDESIRED & 0xFF));
+ if (ret)
+ return ret;
+
+ ret = regmap_write(ctx->regmap, IT66121_SW_RST_REG,
+ (swreset | IT66121_SW_RST_HDCP));
+ if (ret)
+ return ret;
+
+ ret = it66121_preamble_ddc(ctx);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(ctx->regmap, IT66121_DDC_COMMAND_REG,
+ IT66121_DDC_COMMAND_ABORT);
+ if (ret)
+ return ret;
+
+ return it66121_wait_ddc_ready(ctx);
+}
+
+static int it66121_get_edid_block(void *context, u8 *buf,
+ unsigned int block, size_t len)
+{
+ struct it66121_ctx *ctx = context;
+ unsigned int val;
+ int remain = len;
+ int offset = 0;
+ int ret, cnt;
+
+ offset = (block % 2) * len;
+ block = block / 2;
+
+ ret = regmap_read(ctx->regmap, IT66121_INT_STATUS1_REG, &val);
+ if (ret)
+ return ret;
+
+ if (val & IT66121_INT_STATUS1_DDC_BUSHANG) {
+ ret = it66121_abort_ddc_ops(ctx);
+ if (ret)
+ return ret;
+ }
+
+ ret = it66121_clear_ddc_fifo(ctx);
+ if (ret)
+ return ret;
+
+ while (remain > 0) {
+ cnt = (remain > IT66121_EDID_FIFO_SIZE) ?
+ IT66121_EDID_FIFO_SIZE : remain;
+ ret = it66121_preamble_ddc(ctx);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(ctx->regmap, IT66121_DDC_COMMAND_REG,
+ IT66121_DDC_COMMAND_FIFO_CLR);
+ if (ret)
+ return ret;
+
+ ret = it66121_wait_ddc_ready(ctx);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(ctx->regmap, IT66121_INT_STATUS1_REG, &val);
+ if (ret)
+ return ret;
+
+ if (val & IT66121_INT_STATUS1_DDC_BUSHANG) {
+ ret = it66121_abort_ddc_ops(ctx);
+ if (ret)
+ return ret;
+ }
+
+ ret = it66121_preamble_ddc(ctx);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(ctx->regmap, IT66121_DDC_HEADER_REG,
+ IT66121_DDC_HEADER_EDID);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(ctx->regmap, IT66121_DDC_OFFSET_REG, offset);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(ctx->regmap, IT66121_DDC_BYTE_REG, cnt);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(ctx->regmap, IT66121_DDC_SEGMENT_REG, block);
+ if (ret)
+ return ret;
+
+ ret = regmap_write(ctx->regmap, IT66121_DDC_COMMAND_REG,
+ IT66121_DDC_COMMAND_EDID_READ);
+ if (ret)
+ return ret;
+
+ offset += cnt;
+ remain -= cnt;
+
+ /* Per programming manual, sleep here before emptying the FIFO */
+ msleep(20);
+
+ ret = it66121_wait_ddc_ready(ctx);
+ if (ret)
+ return ret;
+
+ do {
+ ret = regmap_read(ctx->regmap, IT66121_DDC_RD_FIFO_REG, &val);
+ if (ret)
+ return ret;
+ *(buf++) = val;
+ cnt--;
+ } while (cnt > 0);
+ }
+
+ return 0;
+}
+
+static bool it66121_is_hpd_detect(struct it66121_ctx *ctx)
+{
+ int val;
+
+ if (regmap_read(ctx->regmap, IT66121_SYS_STATUS_REG, &val))
+ return false;
+
+ return val & IT66121_SYS_STATUS_HPDETECT;
+}
+
+static int it66121_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
+ int ret;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ ret = drm_bridge_attach(bridge->encoder, ctx->next_bridge, bridge, flags);
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
+ IT66121_CLK_BANK_PWROFF_RCLK, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_INT_REG,
+ IT66121_INT_TX_CLK_OFF, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_DRV_REG,
+ IT66121_AFE_DRV_PWD, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG,
+ IT66121_AFE_XP_PWDI | IT66121_AFE_XP_PWDPLL, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
+ IT66121_AFE_IP_PWDPLL, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_DRV_REG,
+ IT66121_AFE_DRV_RST, 0);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_XP_REG,
+ IT66121_AFE_XP_RESETB, IT66121_AFE_XP_RESETB);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_AFE_IP_REG,
+ IT66121_AFE_IP_RESETB, IT66121_AFE_IP_RESETB);
+ if (ret)
+ return ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_SW_RST_REG,
+ IT66121_SW_RST_REF,
+ IT66121_SW_RST_REF);
+ if (ret)
+ return ret;
+
+ /* Per programming manual, sleep here for bridge to settle */
+ msleep(50);
+
+ /* Start interrupts */
+ return regmap_write_bits(ctx->regmap, IT66121_INT_MASK1_REG,
+ IT66121_INT_MASK1_DDC_NOACK |
+ IT66121_INT_MASK1_DDC_FIFOERR |
+ IT66121_INT_MASK1_DDC_BUSHANG, 0);
+}
+
+static int it66121_set_mute(struct it66121_ctx *ctx, bool mute)
+{
+ int ret;
+ unsigned int val = 0;
+
+ if (mute)
+ val = IT66121_AV_MUTE_ON;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_AV_MUTE_REG, IT66121_AV_MUTE_ON, val);
+ if (ret)
+ return ret;
+
+ return regmap_write(ctx->regmap, IT66121_PKT_GEN_CTRL_REG,
+ IT66121_PKT_GEN_CTRL_ON | IT66121_PKT_GEN_CTRL_RPT);
+}
+
+#define MAX_OUTPUT_SEL_FORMATS 1
+
+static u32 *it66121_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ u32 *output_fmts;
+
+ output_fmts = kcalloc(MAX_OUTPUT_SEL_FORMATS, sizeof(*output_fmts),
+ GFP_KERNEL);
+ if (!output_fmts)
+ return NULL;
+
+ /* TOFIX handle more than MEDIA_BUS_FMT_RGB888_1X24 as output format */
+ output_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
+ *num_output_fmts = 1;
+
+ return output_fmts;
+}
+
+#define MAX_INPUT_SEL_FORMATS 1
+
+static u32 *it66121_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
+ u32 *input_fmts;
+
+ *num_input_fmts = 0;
+
+ input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ if (ctx->bus_width == 12)
+ /* IT66121FN Datasheet specifies Little-Endian ordering */
+ input_fmts[0] = MEDIA_BUS_FMT_RGB888_2X12_LE;
+ else
+ /* TOFIX support more input bus formats in 24bit width */
+ input_fmts[0] = MEDIA_BUS_FMT_RGB888_1X24;
+ *num_input_fmts = 1;
+
+ return input_fmts;
+}
+
+static void it66121_bridge_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
+ struct drm_atomic_state *state = bridge_state->base.state;
+
+ ctx->connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder);
+
+ it66121_set_mute(ctx, false);
+}
+
+static void it66121_bridge_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
+
+ it66121_set_mute(ctx, true);
+
+ ctx->connector = NULL;
+}
+
+static
+void it66121_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ int ret, i;
+ u8 buf[HDMI_INFOFRAME_SIZE(AVI)];
+ struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
+ const u16 aviinfo_reg[HDMI_AVI_INFOFRAME_SIZE] = {
+ IT66121_AVIINFO_DB1_REG,
+ IT66121_AVIINFO_DB2_REG,
+ IT66121_AVIINFO_DB3_REG,
+ IT66121_AVIINFO_DB4_REG,
+ IT66121_AVIINFO_DB5_REG,
+ IT66121_AVIINFO_DB6_REG,
+ IT66121_AVIINFO_DB7_REG,
+ IT66121_AVIINFO_DB8_REG,
+ IT66121_AVIINFO_DB9_REG,
+ IT66121_AVIINFO_DB10_REG,
+ IT66121_AVIINFO_DB11_REG,
+ IT66121_AVIINFO_DB12_REG,
+ IT66121_AVIINFO_DB13_REG
+ };
+
+ mutex_lock(&ctx->lock);
+
+ hdmi_avi_infoframe_init(&ctx->hdmi_avi_infoframe);
+
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&ctx->hdmi_avi_infoframe, ctx->connector,
+ adjusted_mode);
+ if (ret) {
+ DRM_ERROR("Failed to setup AVI infoframe: %d\n", ret);
+ goto unlock;
+ }
+
+ ret = hdmi_avi_infoframe_pack(&ctx->hdmi_avi_infoframe, buf, sizeof(buf));
+ if (ret < 0) {
+ DRM_ERROR("Failed to pack infoframe: %d\n", ret);
+ goto unlock;
+ }
+
+ /* Write new AVI infoframe packet */
+ for (i = 0; i < HDMI_AVI_INFOFRAME_SIZE; i++) {
+ if (regmap_write(ctx->regmap, aviinfo_reg[i], buf[i + HDMI_INFOFRAME_HEADER_SIZE]))
+ goto unlock;
+ }
+ if (regmap_write(ctx->regmap, IT66121_AVIINFO_CSUM_REG, buf[3]))
+ goto unlock;
+
+ /* Enable AVI infoframe */
+ if (regmap_write(ctx->regmap, IT66121_AVI_INFO_PKT_REG,
+ IT66121_AVI_INFO_PKT_ON | IT66121_AVI_INFO_PKT_RPT))
+ goto unlock;
+
+ /* Set TX mode to HDMI */
+ if (regmap_write(ctx->regmap, IT66121_HDMI_MODE_REG, IT66121_HDMI_MODE_HDMI))
+ goto unlock;
+
+ if (regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG,
+ IT66121_CLK_BANK_PWROFF_TXCLK, IT66121_CLK_BANK_PWROFF_TXCLK))
+ goto unlock;
+
+ if (it66121_configure_input(ctx))
+ goto unlock;
+
+ if (it66121_configure_afe(ctx, adjusted_mode))
+ goto unlock;
+
+ regmap_write_bits(ctx->regmap, IT66121_CLK_BANK_REG, IT66121_CLK_BANK_PWROFF_TXCLK, 0);
+
+unlock:
+ mutex_unlock(&ctx->lock);
+}
+
+static enum drm_mode_status it66121_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_info *info,
+ const struct drm_display_mode *mode)
+{
+ struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
+ unsigned long max_clock;
+
+ max_clock = (ctx->bus_width == 12) ? 74250 : 148500;
+
+ if (mode->clock > max_clock)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->clock < 25000)
+ return MODE_CLOCK_LOW;
+
+ return MODE_OK;
+}
+
+static enum drm_connector_status it66121_bridge_detect(struct drm_bridge *bridge)
+{
+ struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
+
+ return it66121_is_hpd_detect(ctx) ? connector_status_connected
+ : connector_status_disconnected;
+}
+
+static void it66121_bridge_hpd_enable(struct drm_bridge *bridge)
+{
+ struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
+ int ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_INT_MASK1_REG, IT66121_INT_MASK1_HPD, 0);
+ if (ret)
+ dev_err(ctx->dev, "failed to enable HPD IRQ\n");
+}
+
+static void it66121_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+ struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
+ int ret;
+
+ ret = regmap_write_bits(ctx->regmap, IT66121_INT_MASK1_REG,
+ IT66121_INT_MASK1_HPD, IT66121_INT_MASK1_HPD);
+ if (ret)
+ dev_err(ctx->dev, "failed to disable HPD IRQ\n");
+}
+
+static struct edid *it66121_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct it66121_ctx *ctx = container_of(bridge, struct it66121_ctx, bridge);
+ struct edid *edid;
+
+ mutex_lock(&ctx->lock);
+ edid = drm_do_get_edid(connector, it66121_get_edid_block, ctx);
+ mutex_unlock(&ctx->lock);
+
+ return edid;
+}
+
+static const struct drm_bridge_funcs it66121_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .attach = it66121_bridge_attach,
+ .atomic_get_output_bus_fmts = it66121_bridge_atomic_get_output_bus_fmts,
+ .atomic_get_input_bus_fmts = it66121_bridge_atomic_get_input_bus_fmts,
+ .atomic_enable = it66121_bridge_enable,
+ .atomic_disable = it66121_bridge_disable,
+ .mode_set = it66121_bridge_mode_set,
+ .mode_valid = it66121_bridge_mode_valid,
+ .detect = it66121_bridge_detect,
+ .get_edid = it66121_bridge_get_edid,
+ .hpd_enable = it66121_bridge_hpd_enable,
+ .hpd_disable = it66121_bridge_hpd_disable,
+};
+
+static irqreturn_t it66121_irq_threaded_handler(int irq, void *dev_id)
+{
+ int ret;
+ unsigned int val;
+ struct it66121_ctx *ctx = dev_id;
+ struct device *dev = ctx->dev;
+ enum drm_connector_status status;
+ bool event = false;
+
+ mutex_lock(&ctx->lock);
+
+ ret = regmap_read(ctx->regmap, IT66121_SYS_STATUS_REG, &val);
+ if (ret)
+ goto unlock;
+
+ if (!(val & IT66121_SYS_STATUS_ACTIVE_IRQ))
+ goto unlock;
+
+ ret = regmap_read(ctx->regmap, IT66121_INT_STATUS1_REG, &val);
+ if (ret) {
+ dev_err(dev, "Cannot read STATUS1_REG %d\n", ret);
+ } else {
+ if (val & IT66121_INT_STATUS1_DDC_FIFOERR)
+ it66121_clear_ddc_fifo(ctx);
+ if (val & (IT66121_INT_STATUS1_DDC_BUSHANG |
+ IT66121_INT_STATUS1_DDC_NOACK))
+ it66121_abort_ddc_ops(ctx);
+ if (val & IT66121_INT_STATUS1_HPD_STATUS) {
+ regmap_write_bits(ctx->regmap, IT66121_INT_CLR1_REG,
+ IT66121_INT_CLR1_HPD, IT66121_INT_CLR1_HPD);
+
+ status = it66121_is_hpd_detect(ctx) ? connector_status_connected
+ : connector_status_disconnected;
+
+ event = true;
+ }
+ }
+
+ regmap_write_bits(ctx->regmap, IT66121_SYS_STATUS_REG,
+ IT66121_SYS_STATUS_CLEAR_IRQ,
+ IT66121_SYS_STATUS_CLEAR_IRQ);
+
+unlock:
+ mutex_unlock(&ctx->lock);
+
+ if (event)
+ drm_bridge_hpd_notify(&ctx->bridge, status);
+
+ return IRQ_HANDLED;
+}
+
+static int it66121_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ u32 vendor_ids[2], device_ids[2], revision_id;
+ struct device_node *ep;
+ int ret;
+ struct it66121_ctx *ctx;
+ struct device *dev = &client->dev;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ dev_err(dev, "I2C check functionality failed.\n");
+ return -ENXIO;
+ }
+
+ ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, 0);
+ if (!ep)
+ return -EINVAL;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->dev = dev;
+ ctx->client = client;
+
+ of_property_read_u32(ep, "bus-width", &ctx->bus_width);
+ of_node_put(ep);
+
+ if (ctx->bus_width != 12 && ctx->bus_width != 24)
+ return -EINVAL;
+
+ ep = of_graph_get_remote_node(dev->of_node, 1, -1);
+ if (!ep)
+ return -EPROBE_DEFER;
+
+ ctx->next_bridge = of_drm_find_bridge(ep);
+ of_node_put(ep);
+
+ i2c_set_clientdata(client, ctx);
+ mutex_init(&ctx->lock);
+
+ ctx->supplies[0].supply = "vcn33";
+ ctx->supplies[1].supply = "vcn18";
+ ctx->supplies[2].supply = "vrf12";
+ ret = devm_regulator_bulk_get(ctx->dev, 3, ctx->supplies);
+ if (ret) {
+ dev_err(ctx->dev, "regulator_bulk failed\n");
+ return ret;
+ }
+
+ ret = ite66121_power_on(ctx);
+ if (ret)
+ return ret;
+
+ it66121_hw_reset(ctx);
+
+ ctx->regmap = devm_regmap_init_i2c(client, &it66121_regmap_config);
+ if (IS_ERR(ctx->regmap)) {
+ ite66121_power_off(ctx);
+ return PTR_ERR(ctx);
+ }
+
+ regmap_read(ctx->regmap, IT66121_VENDOR_ID0_REG, &vendor_ids[0]);
+ regmap_read(ctx->regmap, IT66121_VENDOR_ID1_REG, &vendor_ids[1]);
+ regmap_read(ctx->regmap, IT66121_DEVICE_ID0_REG, &device_ids[0]);
+ regmap_read(ctx->regmap, IT66121_DEVICE_ID1_REG, &device_ids[1]);
+
+ /* Revision is shared with DEVICE_ID1 */
+ revision_id = FIELD_GET(IT66121_REVISION_MASK, device_ids[1]);
+ device_ids[1] &= IT66121_DEVICE_ID1_MASK;
+
+ if (vendor_ids[0] != IT66121_VENDOR_ID0 || vendor_ids[1] != IT66121_VENDOR_ID1 ||
+ device_ids[0] != IT66121_DEVICE_ID0 || device_ids[1] != IT66121_DEVICE_ID1) {
+ ite66121_power_off(ctx);
+ return -ENODEV;
+ }
+
+ ctx->bridge.funcs = &it66121_bridge_funcs;
+ ctx->bridge.of_node = dev->of_node;
+ ctx->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ ctx->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD;
+
+ ret = devm_request_threaded_irq(dev, client->irq, NULL, it66121_irq_threaded_handler,
+ IRQF_ONESHOT, dev_name(dev), ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to request irq %d:%d\n", client->irq, ret);
+ ite66121_power_off(ctx);
+ return ret;
+ }
+
+ drm_bridge_add(&ctx->bridge);
+
+ dev_info(ctx->dev, "IT66121 revision %d probed\n", revision_id);
+
+ return 0;
+}
+
+static int it66121_remove(struct i2c_client *client)
+{
+ struct it66121_ctx *ctx = i2c_get_clientdata(client);
+
+ ite66121_power_off(ctx);
+ drm_bridge_remove(&ctx->bridge);
+ mutex_destroy(&ctx->lock);
+
+ return 0;
+}
+
+static const struct of_device_id it66121_dt_match[] = {
+ { .compatible = "ite,it66121" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, it66121_dt_match);
+
+static const struct i2c_device_id it66121_id[] = {
+ { "it66121", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, it66121_id);
+
+static struct i2c_driver it66121_driver = {
+ .driver = {
+ .name = "it66121",
+ .of_match_table = it66121_dt_match,
+ },
+ .probe = it66121_probe,
+ .remove = it66121_remove,
+ .id_table = it66121_id,
+};
+
+module_i2c_driver(it66121_driver);
+
+MODULE_AUTHOR("Phong LE");
+MODULE_DESCRIPTION("IT66121 HDMI transmitter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c
index 66b67402f1ac..873995f0a741 100644
--- a/drivers/gpu/drm/bridge/nwl-dsi.c
+++ b/drivers/gpu/drm/bridge/nwl-dsi.c
@@ -21,6 +21,7 @@
#include <linux/sys_soc.h>
#include <linux/time64.h>
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_mipi_dsi.h>
#include <drm/drm_of.h>
@@ -661,7 +662,7 @@ static irqreturn_t nwl_dsi_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int nwl_dsi_enable(struct nwl_dsi *dsi)
+static int nwl_dsi_mode_set(struct nwl_dsi *dsi)
{
struct device *dev = dsi->dev;
union phy_configure_opts *phy_cfg = &dsi->phy_cfg;
@@ -742,7 +743,9 @@ static int nwl_dsi_disable(struct nwl_dsi *dsi)
return 0;
}
-static void nwl_dsi_bridge_disable(struct drm_bridge *bridge)
+static void
+nwl_dsi_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
int ret;
@@ -803,17 +806,6 @@ static int nwl_dsi_get_dphy_params(struct nwl_dsi *dsi,
return 0;
}
-static bool nwl_dsi_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
- /* At least LCDIF + NWL needs active high sync */
- adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
- adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
-
- return true;
-}
-
static enum drm_mode_status
nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_info *info,
@@ -831,6 +823,29 @@ nwl_dsi_bridge_mode_valid(struct drm_bridge *bridge,
return MODE_OK;
}
+static int nwl_dsi_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode;
+
+ /* At least LCDIF + NWL needs active high sync */
+ adjusted_mode->flags |= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
+ adjusted_mode->flags &= ~(DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC);
+
+ /*
+ * Do a full modeset if crtc_state->active is changed to be true.
+ * This ensures our ->mode_set() is called to get the DSI controller
+ * and the PHY ready to send DCS commands, when only the connector's
+ * DPMS is brought out of "Off" status.
+ */
+ if (crtc_state->active_changed && crtc_state->active)
+ crtc_state->mode_changed = true;
+
+ return 0;
+}
+
static void
nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
const struct drm_display_mode *mode,
@@ -846,13 +861,6 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
if (ret < 0)
return;
- /*
- * If hs clock is unchanged, we're all good - all parameters are
- * derived from it atm.
- */
- if (new_cfg.mipi_dphy.hs_clk_rate == dsi->phy_cfg.mipi_dphy.hs_clk_rate)
- return;
-
phy_ref_rate = clk_get_rate(dsi->phy_ref_clk);
DRM_DEV_DEBUG_DRIVER(dev, "PHY at ref rate: %lu\n", phy_ref_rate);
/* Save the new desired phy config */
@@ -860,14 +868,8 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge,
memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode));
drm_mode_debug_printmodeline(adjusted_mode);
-}
-static void nwl_dsi_bridge_pre_enable(struct drm_bridge *bridge)
-{
- struct nwl_dsi *dsi = bridge_to_dsi(bridge);
- int ret;
-
- pm_runtime_get_sync(dsi->dev);
+ pm_runtime_get_sync(dev);
if (clk_prepare_enable(dsi->lcdif_clk) < 0)
return;
@@ -877,27 +879,29 @@ static void nwl_dsi_bridge_pre_enable(struct drm_bridge *bridge)
/* Step 1 from DSI reset-out instructions */
ret = reset_control_deassert(dsi->rst_pclk);
if (ret < 0) {
- DRM_DEV_ERROR(dsi->dev, "Failed to deassert PCLK: %d\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to deassert PCLK: %d\n", ret);
return;
}
/* Step 2 from DSI reset-out instructions */
- nwl_dsi_enable(dsi);
+ nwl_dsi_mode_set(dsi);
/* Step 3 from DSI reset-out instructions */
ret = reset_control_deassert(dsi->rst_esc);
if (ret < 0) {
- DRM_DEV_ERROR(dsi->dev, "Failed to deassert ESC: %d\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to deassert ESC: %d\n", ret);
return;
}
ret = reset_control_deassert(dsi->rst_byte);
if (ret < 0) {
- DRM_DEV_ERROR(dsi->dev, "Failed to deassert BYTE: %d\n", ret);
+ DRM_DEV_ERROR(dev, "Failed to deassert BYTE: %d\n", ret);
return;
}
}
-static void nwl_dsi_bridge_enable(struct drm_bridge *bridge)
+static void
+nwl_dsi_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
struct nwl_dsi *dsi = bridge_to_dsi(bridge);
int ret;
@@ -942,14 +946,16 @@ static void nwl_dsi_bridge_detach(struct drm_bridge *bridge)
}
static const struct drm_bridge_funcs nwl_dsi_bridge_funcs = {
- .pre_enable = nwl_dsi_bridge_pre_enable,
- .enable = nwl_dsi_bridge_enable,
- .disable = nwl_dsi_bridge_disable,
- .mode_fixup = nwl_dsi_bridge_mode_fixup,
- .mode_set = nwl_dsi_bridge_mode_set,
- .mode_valid = nwl_dsi_bridge_mode_valid,
- .attach = nwl_dsi_bridge_attach,
- .detach = nwl_dsi_bridge_detach,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_check = nwl_dsi_bridge_atomic_check,
+ .atomic_enable = nwl_dsi_bridge_atomic_enable,
+ .atomic_disable = nwl_dsi_bridge_atomic_disable,
+ .mode_set = nwl_dsi_bridge_mode_set,
+ .mode_valid = nwl_dsi_bridge_mode_valid,
+ .attach = nwl_dsi_bridge_attach,
+ .detach = nwl_dsi_bridge_detach,
};
static int nwl_dsi_parse_dt(struct nwl_dsi *dsi)
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index dda4fa9a1a08..e7c7c9b9c646 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -2395,21 +2395,6 @@ static int dw_hdmi_connector_get_modes(struct drm_connector *connector)
return ret;
}
-static bool hdr_metadata_equal(const struct drm_connector_state *old_state,
- const struct drm_connector_state *new_state)
-{
- struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
- struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
-
- if (!old_blob || !new_blob)
- return old_blob == new_blob;
-
- if (old_blob->length != new_blob->length)
- return false;
-
- return !memcmp(old_blob->data, new_blob->data, old_blob->length);
-}
-
static int dw_hdmi_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
{
@@ -2423,7 +2408,7 @@ static int dw_hdmi_connector_atomic_check(struct drm_connector *connector,
if (!crtc)
return 0;
- if (!hdr_metadata_equal(old_state, new_state)) {
+ if (!drm_connector_atomic_hdr_metadata_equal(old_state, new_state)) {
crtc_state = drm_atomic_get_crtc_state(state, crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
@@ -2492,8 +2477,7 @@ static int dw_hdmi_connector_create(struct dw_hdmi *hdmi)
drm_connector_attach_max_bpc_property(connector, 8, 16);
if (hdmi->version >= 0x200a && hdmi->plat_data->use_drm_infoframe)
- drm_object_attach_property(&connector->base,
- connector->dev->mode_config.hdr_output_metadata_property, 0);
+ drm_connector_attach_hdr_output_metadata_property(connector);
drm_connector_attach_encoder(connector, hdmi->bridge.encoder);
@@ -3421,7 +3405,7 @@ struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
hdmi->audio = platform_device_register_full(&pdevinfo);
}
- if (config0 & HDMI_CONFIG0_CEC) {
+ if (!plat_data->disable_cec && (config0 & HDMI_CONFIG0_CEC)) {
cec.hdmi = hdmi;
cec.ops = &dw_hdmi_cec_ops;
cec.irq = irq;
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index da89922721ed..23a6f90b694b 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -1414,6 +1414,7 @@ static int tc_bridge_attach(struct drm_bridge *bridge,
if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
return 0;
+ tc->aux.drm_dev = drm;
ret = drm_dp_aux_register(&tc->aux);
if (ret < 0)
return ret;
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 88df4dd0f39d..bb0a0e1c6341 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -4,6 +4,7 @@
* datasheet: https://www.ti.com/lit/ds/symlink/sn65dsi86.pdf
*/
+#include <linux/auxiliary_bus.h>
#include <linux/bits.h>
#include <linux/clk.h>
#include <linux/debugfs.h>
@@ -112,13 +113,15 @@
#define SN_LINK_TRAINING_TRIES 10
/**
- * struct ti_sn_bridge - Platform data for ti-sn65dsi86 driver.
- * @dev: Pointer to our device.
+ * struct ti_sn65dsi86 - Platform data for ti-sn65dsi86 driver.
+ * @bridge_aux: AUX-bus sub device for MIPI-to-eDP bridge functionality.
+ * @gpio_aux: AUX-bus sub device for GPIO controller functionality.
+ *
+ * @dev: Pointer to the top level (i2c) device.
* @regmap: Regmap for accessing i2c.
* @aux: Our aux channel.
* @bridge: Our bridge.
* @connector: Our connector.
- * @debugfs: Used for managing our debugfs.
* @host_node: Remote DSI node.
* @dsi: Our MIPI DSI source.
* @edid: Detected EDID of eDP panel.
@@ -129,6 +132,8 @@
* @dp_lanes: Count of dp_lanes we're using.
* @ln_assign: Value to program to the LN_ASSIGN register.
* @ln_polrs: Value for the 4-bit LN_POLRS field of SN_ENH_FRAME_REG.
+ * @comms_enabled: If true then communication over the aux channel is enabled.
+ * @comms_mutex: Protects modification of comms_enabled.
*
* @gchip: If we expose our GPIOs, this is used.
* @gchip_output: A cache of whether we've set GPIOs to output. This
@@ -140,13 +145,15 @@
* lock so concurrent users of our 4 GPIOs don't stomp on
* each other's read-modify-write.
*/
-struct ti_sn_bridge {
+struct ti_sn65dsi86 {
+ struct auxiliary_device bridge_aux;
+ struct auxiliary_device gpio_aux;
+
struct device *dev;
struct regmap *regmap;
struct drm_dp_aux aux;
struct drm_bridge bridge;
struct drm_connector connector;
- struct dentry *debugfs;
struct edid *edid;
struct device_node *host_node;
struct mipi_dsi_device *dsi;
@@ -157,6 +164,8 @@ struct ti_sn_bridge {
int dp_lanes;
u8 ln_assign;
u8 ln_polrs;
+ bool comms_enabled;
+ struct mutex comms_mutex;
#if defined(CONFIG_OF_GPIO)
struct gpio_chip gchip;
@@ -164,32 +173,131 @@ struct ti_sn_bridge {
#endif
};
-static const struct regmap_range ti_sn_bridge_volatile_ranges[] = {
+static const struct regmap_range ti_sn65dsi86_volatile_ranges[] = {
{ .range_min = 0, .range_max = 0xFF },
};
static const struct regmap_access_table ti_sn_bridge_volatile_table = {
- .yes_ranges = ti_sn_bridge_volatile_ranges,
- .n_yes_ranges = ARRAY_SIZE(ti_sn_bridge_volatile_ranges),
+ .yes_ranges = ti_sn65dsi86_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ti_sn65dsi86_volatile_ranges),
};
-static const struct regmap_config ti_sn_bridge_regmap_config = {
+static const struct regmap_config ti_sn65dsi86_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.volatile_table = &ti_sn_bridge_volatile_table,
.cache_type = REGCACHE_NONE,
};
-static void ti_sn_bridge_write_u16(struct ti_sn_bridge *pdata,
+static void ti_sn65dsi86_write_u16(struct ti_sn65dsi86 *pdata,
unsigned int reg, u16 val)
{
regmap_write(pdata->regmap, reg, val & 0xFF);
regmap_write(pdata->regmap, reg + 1, val >> 8);
}
-static int __maybe_unused ti_sn_bridge_resume(struct device *dev)
+static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn65dsi86 *pdata)
+{
+ u32 bit_rate_khz, clk_freq_khz;
+ struct drm_display_mode *mode =
+ &pdata->bridge.encoder->crtc->state->adjusted_mode;
+
+ bit_rate_khz = mode->clock *
+ mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
+ clk_freq_khz = bit_rate_khz / (pdata->dsi->lanes * 2);
+
+ return clk_freq_khz;
+}
+
+/* clk frequencies supported by bridge in Hz in case derived from REFCLK pin */
+static const u32 ti_sn_bridge_refclk_lut[] = {
+ 12000000,
+ 19200000,
+ 26000000,
+ 27000000,
+ 38400000,
+};
+
+/* clk frequencies supported by bridge in Hz in case derived from DACP/N pin */
+static const u32 ti_sn_bridge_dsiclk_lut[] = {
+ 468000000,
+ 384000000,
+ 416000000,
+ 486000000,
+ 460800000,
+};
+
+static void ti_sn_bridge_set_refclk_freq(struct ti_sn65dsi86 *pdata)
+{
+ int i;
+ u32 refclk_rate;
+ const u32 *refclk_lut;
+ size_t refclk_lut_size;
+
+ if (pdata->refclk) {
+ refclk_rate = clk_get_rate(pdata->refclk);
+ refclk_lut = ti_sn_bridge_refclk_lut;
+ refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_refclk_lut);
+ clk_prepare_enable(pdata->refclk);
+ } else {
+ refclk_rate = ti_sn_bridge_get_dsi_freq(pdata) * 1000;
+ refclk_lut = ti_sn_bridge_dsiclk_lut;
+ refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_dsiclk_lut);
+ }
+
+ /* for i equals to refclk_lut_size means default frequency */
+ for (i = 0; i < refclk_lut_size; i++)
+ if (refclk_lut[i] == refclk_rate)
+ break;
+
+ regmap_update_bits(pdata->regmap, SN_DPPLL_SRC_REG, REFCLK_FREQ_MASK,
+ REFCLK_FREQ(i));
+}
+
+static void ti_sn65dsi86_enable_comms(struct ti_sn65dsi86 *pdata)
{
- struct ti_sn_bridge *pdata = dev_get_drvdata(dev);
+ mutex_lock(&pdata->comms_mutex);
+
+ /* configure bridge ref_clk */
+ ti_sn_bridge_set_refclk_freq(pdata);
+
+ /*
+ * HPD on this bridge chip is a bit useless. This is an eDP bridge
+ * so the HPD is an internal signal that's only there to signal that
+ * the panel is done powering up. ...but the bridge chip debounces
+ * this signal by between 100 ms and 400 ms (depending on process,
+ * voltage, and temperate--I measured it at about 200 ms). One
+ * particular panel asserted HPD 84 ms after it was powered on meaning
+ * that we saw HPD 284 ms after power on. ...but the same panel said
+ * that instead of looking at HPD you could just hardcode a delay of
+ * 200 ms. We'll assume that the panel driver will have the hardcoded
+ * delay in its prepare and always disable HPD.
+ *
+ * If HPD somehow makes sense on some future panel we'll have to
+ * change this to be conditional on someone specifying that HPD should
+ * be used.
+ */
+ regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
+ HPD_DISABLE);
+
+ pdata->comms_enabled = true;
+
+ mutex_unlock(&pdata->comms_mutex);
+}
+
+static void ti_sn65dsi86_disable_comms(struct ti_sn65dsi86 *pdata)
+{
+ mutex_lock(&pdata->comms_mutex);
+
+ pdata->comms_enabled = false;
+ clk_disable_unprepare(pdata->refclk);
+
+ mutex_unlock(&pdata->comms_mutex);
+}
+
+static int __maybe_unused ti_sn65dsi86_resume(struct device *dev)
+{
+ struct ti_sn65dsi86 *pdata = dev_get_drvdata(dev);
int ret;
ret = regulator_bulk_enable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies);
@@ -200,14 +308,27 @@ static int __maybe_unused ti_sn_bridge_resume(struct device *dev)
gpiod_set_value(pdata->enable_gpio, 1);
+ /*
+ * If we have a reference clock we can enable communication w/ the
+ * panel (including the aux channel) w/out any need for an input clock
+ * so we can do it in resume which lets us read the EDID before
+ * pre_enable(). Without a reference clock we need the MIPI reference
+ * clock so reading early doesn't work.
+ */
+ if (pdata->refclk)
+ ti_sn65dsi86_enable_comms(pdata);
+
return ret;
}
-static int __maybe_unused ti_sn_bridge_suspend(struct device *dev)
+static int __maybe_unused ti_sn65dsi86_suspend(struct device *dev)
{
- struct ti_sn_bridge *pdata = dev_get_drvdata(dev);
+ struct ti_sn65dsi86 *pdata = dev_get_drvdata(dev);
int ret;
+ if (pdata->refclk)
+ ti_sn65dsi86_disable_comms(pdata);
+
gpiod_set_value(pdata->enable_gpio, 0);
ret = regulator_bulk_disable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies);
@@ -217,15 +338,15 @@ static int __maybe_unused ti_sn_bridge_suspend(struct device *dev)
return ret;
}
-static const struct dev_pm_ops ti_sn_bridge_pm_ops = {
- SET_RUNTIME_PM_OPS(ti_sn_bridge_suspend, ti_sn_bridge_resume, NULL)
+static const struct dev_pm_ops ti_sn65dsi86_pm_ops = {
+ SET_RUNTIME_PM_OPS(ti_sn65dsi86_suspend, ti_sn65dsi86_resume, NULL)
SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
pm_runtime_force_resume)
};
static int status_show(struct seq_file *s, void *data)
{
- struct ti_sn_bridge *pdata = s->private;
+ struct ti_sn65dsi86 *pdata = s->private;
unsigned int reg, val;
seq_puts(s, "STATUS REGISTERS:\n");
@@ -238,44 +359,57 @@ static int status_show(struct seq_file *s, void *data)
seq_printf(s, "[0x%02x] = 0x%08x\n", reg, val);
}
- pm_runtime_put(pdata->dev);
+ pm_runtime_put_autosuspend(pdata->dev);
return 0;
}
DEFINE_SHOW_ATTRIBUTE(status);
-static void ti_sn_debugfs_init(struct ti_sn_bridge *pdata)
+static void ti_sn65dsi86_debugfs_remove(void *data)
{
- pdata->debugfs = debugfs_create_dir(dev_name(pdata->dev), NULL);
-
- debugfs_create_file("status", 0600, pdata->debugfs, pdata,
- &status_fops);
+ debugfs_remove_recursive(data);
}
-static void ti_sn_debugfs_remove(struct ti_sn_bridge *pdata)
+static void ti_sn65dsi86_debugfs_init(struct ti_sn65dsi86 *pdata)
{
- debugfs_remove_recursive(pdata->debugfs);
- pdata->debugfs = NULL;
+ struct device *dev = pdata->dev;
+ struct dentry *debugfs;
+ int ret;
+
+ debugfs = debugfs_create_dir(dev_name(dev), NULL);
+
+ /*
+ * We might get an error back if debugfs wasn't enabled in the kernel
+ * so let's just silently return upon failure.
+ */
+ if (IS_ERR_OR_NULL(debugfs))
+ return;
+
+ ret = devm_add_action_or_reset(dev, ti_sn65dsi86_debugfs_remove, debugfs);
+ if (ret)
+ return;
+
+ debugfs_create_file("status", 0600, debugfs, pdata, &status_fops);
}
/* Connector funcs */
-static struct ti_sn_bridge *
-connector_to_ti_sn_bridge(struct drm_connector *connector)
+static struct ti_sn65dsi86 *
+connector_to_ti_sn65dsi86(struct drm_connector *connector)
{
- return container_of(connector, struct ti_sn_bridge, connector);
+ return container_of(connector, struct ti_sn65dsi86, connector);
}
static int ti_sn_bridge_connector_get_modes(struct drm_connector *connector)
{
- struct ti_sn_bridge *pdata = connector_to_ti_sn_bridge(connector);
+ struct ti_sn65dsi86 *pdata = connector_to_ti_sn65dsi86(connector);
struct edid *edid = pdata->edid;
int num, ret;
if (!edid) {
pm_runtime_get_sync(pdata->dev);
edid = pdata->edid = drm_get_edid(connector, &pdata->aux.ddc);
- pm_runtime_put(pdata->dev);
+ pm_runtime_put_autosuspend(pdata->dev);
}
if (edid && drm_edid_is_valid(edid)) {
@@ -306,32 +440,20 @@ static struct drm_connector_helper_funcs ti_sn_bridge_connector_helper_funcs = {
.mode_valid = ti_sn_bridge_connector_mode_valid,
};
-static enum drm_connector_status
-ti_sn_bridge_connector_detect(struct drm_connector *connector, bool force)
-{
- /**
- * TODO: Currently if drm_panel is present, then always
- * return the status as connected. Need to add support to detect
- * device state for hot pluggable scenarios.
- */
- return connector_status_connected;
-}
-
static const struct drm_connector_funcs ti_sn_bridge_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
- .detect = ti_sn_bridge_connector_detect,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static struct ti_sn_bridge *bridge_to_ti_sn_bridge(struct drm_bridge *bridge)
+static struct ti_sn65dsi86 *bridge_to_ti_sn65dsi86(struct drm_bridge *bridge)
{
- return container_of(bridge, struct ti_sn_bridge, bridge);
+ return container_of(bridge, struct ti_sn65dsi86, bridge);
}
-static int ti_sn_bridge_parse_regulators(struct ti_sn_bridge *pdata)
+static int ti_sn65dsi86_parse_regulators(struct ti_sn65dsi86 *pdata)
{
unsigned int i;
const char * const ti_sn_bridge_supply_names[] = {
@@ -349,7 +471,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
enum drm_bridge_attach_flags flags)
{
int ret, val;
- struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
struct mipi_dsi_host *host;
struct mipi_dsi_device *dsi;
const struct mipi_dsi_device_info info = { .type = "ti_sn_bridge",
@@ -362,6 +484,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
return -EINVAL;
}
+ pdata->aux.drm_dev = bridge->dev;
ret = drm_dp_aux_register(&pdata->aux);
if (ret < 0) {
drm_err(bridge->dev, "Failed to register DP AUX channel: %d\n", ret);
@@ -413,7 +536,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge,
/* check if continuous dsi clock is required or not */
pm_runtime_get_sync(pdata->dev);
regmap_read(pdata->regmap, SN_DPPLL_SRC_REG, &val);
- pm_runtime_put(pdata->dev);
+ pm_runtime_put_autosuspend(pdata->dev);
if (!(val & DPPLL_CLK_SRC_DSICLK))
dsi->mode_flags |= MIPI_DSI_CLOCK_NON_CONTINUOUS;
@@ -437,12 +560,12 @@ err_conn_init:
static void ti_sn_bridge_detach(struct drm_bridge *bridge)
{
- drm_dp_aux_unregister(&bridge_to_ti_sn_bridge(bridge)->aux);
+ drm_dp_aux_unregister(&bridge_to_ti_sn65dsi86(bridge)->aux);
}
static void ti_sn_bridge_disable(struct drm_bridge *bridge)
{
- struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
drm_panel_disable(pdata->panel);
@@ -452,69 +575,9 @@ static void ti_sn_bridge_disable(struct drm_bridge *bridge)
regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0);
/* disable DP PLL */
regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 0);
-
- drm_panel_unprepare(pdata->panel);
-}
-
-static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn_bridge *pdata)
-{
- u32 bit_rate_khz, clk_freq_khz;
- struct drm_display_mode *mode =
- &pdata->bridge.encoder->crtc->state->adjusted_mode;
-
- bit_rate_khz = mode->clock *
- mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
- clk_freq_khz = bit_rate_khz / (pdata->dsi->lanes * 2);
-
- return clk_freq_khz;
-}
-
-/* clk frequencies supported by bridge in Hz in case derived from REFCLK pin */
-static const u32 ti_sn_bridge_refclk_lut[] = {
- 12000000,
- 19200000,
- 26000000,
- 27000000,
- 38400000,
-};
-
-/* clk frequencies supported by bridge in Hz in case derived from DACP/N pin */
-static const u32 ti_sn_bridge_dsiclk_lut[] = {
- 468000000,
- 384000000,
- 416000000,
- 486000000,
- 460800000,
-};
-
-static void ti_sn_bridge_set_refclk_freq(struct ti_sn_bridge *pdata)
-{
- int i;
- u32 refclk_rate;
- const u32 *refclk_lut;
- size_t refclk_lut_size;
-
- if (pdata->refclk) {
- refclk_rate = clk_get_rate(pdata->refclk);
- refclk_lut = ti_sn_bridge_refclk_lut;
- refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_refclk_lut);
- clk_prepare_enable(pdata->refclk);
- } else {
- refclk_rate = ti_sn_bridge_get_dsi_freq(pdata) * 1000;
- refclk_lut = ti_sn_bridge_dsiclk_lut;
- refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_dsiclk_lut);
- }
-
- /* for i equals to refclk_lut_size means default frequency */
- for (i = 0; i < refclk_lut_size; i++)
- if (refclk_lut[i] == refclk_rate)
- break;
-
- regmap_update_bits(pdata->regmap, SN_DPPLL_SRC_REG, REFCLK_FREQ_MASK,
- REFCLK_FREQ(i));
}
-static void ti_sn_bridge_set_dsi_rate(struct ti_sn_bridge *pdata)
+static void ti_sn_bridge_set_dsi_rate(struct ti_sn65dsi86 *pdata)
{
unsigned int bit_rate_mhz, clk_freq_mhz;
unsigned int val;
@@ -532,7 +595,7 @@ static void ti_sn_bridge_set_dsi_rate(struct ti_sn_bridge *pdata)
regmap_write(pdata->regmap, SN_DSIA_CLK_FREQ_REG, val);
}
-static unsigned int ti_sn_bridge_get_bpp(struct ti_sn_bridge *pdata)
+static unsigned int ti_sn_bridge_get_bpp(struct ti_sn65dsi86 *pdata)
{
if (pdata->connector.display_info.bpc <= 6)
return 18;
@@ -549,7 +612,7 @@ static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
};
-static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn_bridge *pdata)
+static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn65dsi86 *pdata)
{
unsigned int bit_rate_khz, dp_rate_mhz;
unsigned int i;
@@ -570,7 +633,7 @@ static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn_bridge *pdata)
return i;
}
-static void ti_sn_bridge_read_valid_rates(struct ti_sn_bridge *pdata,
+static void ti_sn_bridge_read_valid_rates(struct ti_sn65dsi86 *pdata,
bool rate_valid[])
{
unsigned int rate_per_200khz;
@@ -651,7 +714,7 @@ static void ti_sn_bridge_read_valid_rates(struct ti_sn_bridge *pdata,
}
}
-static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
+static void ti_sn_bridge_set_video_timings(struct ti_sn65dsi86 *pdata)
{
struct drm_display_mode *mode =
&pdata->bridge.encoder->crtc->state->adjusted_mode;
@@ -662,9 +725,9 @@ static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
vsync_polarity = CHA_VSYNC_POLARITY;
- ti_sn_bridge_write_u16(pdata, SN_CHA_ACTIVE_LINE_LENGTH_LOW_REG,
+ ti_sn65dsi86_write_u16(pdata, SN_CHA_ACTIVE_LINE_LENGTH_LOW_REG,
mode->hdisplay);
- ti_sn_bridge_write_u16(pdata, SN_CHA_VERTICAL_DISPLAY_SIZE_LOW_REG,
+ ti_sn65dsi86_write_u16(pdata, SN_CHA_VERTICAL_DISPLAY_SIZE_LOW_REG,
mode->vdisplay);
regmap_write(pdata->regmap, SN_CHA_HSYNC_PULSE_WIDTH_LOW_REG,
(mode->hsync_end - mode->hsync_start) & 0xFF);
@@ -690,7 +753,7 @@ static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
usleep_range(10000, 10500); /* 10ms delay recommended by spec */
}
-static unsigned int ti_sn_get_max_lanes(struct ti_sn_bridge *pdata)
+static unsigned int ti_sn_get_max_lanes(struct ti_sn65dsi86 *pdata)
{
u8 data;
int ret;
@@ -705,7 +768,7 @@ static unsigned int ti_sn_get_max_lanes(struct ti_sn_bridge *pdata)
return data & DP_LANE_COUNT_MASK;
}
-static int ti_sn_link_training(struct ti_sn_bridge *pdata, int dp_rate_idx,
+static int ti_sn_link_training(struct ti_sn65dsi86 *pdata, int dp_rate_idx,
const char **last_err_str)
{
unsigned int val;
@@ -765,7 +828,7 @@ exit:
static void ti_sn_bridge_enable(struct drm_bridge *bridge)
{
- struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
bool rate_valid[ARRAY_SIZE(ti_sn_bridge_dp_rate_lut)] = { };
const char *last_err_str = "No supported DP rate";
int dp_rate_idx;
@@ -788,7 +851,7 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
/* set dsi clk frequency value */
ti_sn_bridge_set_dsi_rate(pdata);
- /**
+ /*
* The SN65DSI86 only supports ASSR Display Authentication method and
* this method is enabled by default. An eDP panel must support this
* authentication method. We need to enable this method in the eDP panel
@@ -836,40 +899,24 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
static void ti_sn_bridge_pre_enable(struct drm_bridge *bridge)
{
- struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
pm_runtime_get_sync(pdata->dev);
- /* configure bridge ref_clk */
- ti_sn_bridge_set_refclk_freq(pdata);
-
- /*
- * HPD on this bridge chip is a bit useless. This is an eDP bridge
- * so the HPD is an internal signal that's only there to signal that
- * the panel is done powering up. ...but the bridge chip debounces
- * this signal by between 100 ms and 400 ms (depending on process,
- * voltage, and temperate--I measured it at about 200 ms). One
- * particular panel asserted HPD 84 ms after it was powered on meaning
- * that we saw HPD 284 ms after power on. ...but the same panel said
- * that instead of looking at HPD you could just hardcode a delay of
- * 200 ms. We'll assume that the panel driver will have the hardcoded
- * delay in its prepare and always disable HPD.
- *
- * If HPD somehow makes sense on some future panel we'll have to
- * change this to be conditional on someone specifying that HPD should
- * be used.
- */
- regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
- HPD_DISABLE);
+ if (!pdata->refclk)
+ ti_sn65dsi86_enable_comms(pdata);
drm_panel_prepare(pdata->panel);
}
static void ti_sn_bridge_post_disable(struct drm_bridge *bridge)
{
- struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+ struct ti_sn65dsi86 *pdata = bridge_to_ti_sn65dsi86(bridge);
- clk_disable_unprepare(pdata->refclk);
+ drm_panel_unprepare(pdata->panel);
+
+ if (!pdata->refclk)
+ ti_sn65dsi86_disable_comms(pdata);
pm_runtime_put_sync(pdata->dev);
}
@@ -883,15 +930,15 @@ static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
.post_disable = ti_sn_bridge_post_disable,
};
-static struct ti_sn_bridge *aux_to_ti_sn_bridge(struct drm_dp_aux *aux)
+static struct ti_sn65dsi86 *aux_to_ti_sn65dsi86(struct drm_dp_aux *aux)
{
- return container_of(aux, struct ti_sn_bridge, aux);
+ return container_of(aux, struct ti_sn65dsi86, aux);
}
static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg)
{
- struct ti_sn_bridge *pdata = aux_to_ti_sn_bridge(aux);
+ struct ti_sn65dsi86 *pdata = aux_to_ti_sn65dsi86(aux);
u32 request = msg->request & ~(DP_AUX_I2C_MOT | DP_AUX_I2C_WRITE_STATUS_UPDATE);
u32 request_val = AUX_CMD_REQ(msg->request);
u8 *buf = msg->buffer;
@@ -903,6 +950,20 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
if (len > SN_AUX_MAX_PAYLOAD_BYTES)
return -EINVAL;
+ pm_runtime_get_sync(pdata->dev);
+ mutex_lock(&pdata->comms_mutex);
+
+ /*
+ * If someone tries to do a DDC over AUX transaction before pre_enable()
+ * on a device without a dedicated reference clock then we just can't
+ * do it. Fail right away. This prevents non-refclk users from reading
+ * the EDID before enabling the panel but such is life.
+ */
+ if (!pdata->comms_enabled) {
+ ret = -EIO;
+ goto exit;
+ }
+
switch (request) {
case DP_AUX_NATIVE_WRITE:
case DP_AUX_I2C_WRITE:
@@ -913,7 +974,8 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
msg->reply = 0;
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto exit;
}
BUILD_BUG_ON(sizeof(addr_len) != sizeof(__be32));
@@ -937,11 +999,11 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
ret = regmap_read_poll_timeout(pdata->regmap, SN_AUX_CMD_REG, val,
!(val & AUX_CMD_SEND), 0, 50 * 1000);
if (ret)
- return ret;
+ goto exit;
ret = regmap_read(pdata->regmap, SN_AUX_CMD_STATUS_REG, &val);
if (ret)
- return ret;
+ goto exit;
if (val & AUX_IRQ_STATUS_AUX_RPLY_TOUT) {
/*
@@ -949,13 +1011,14 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
* but it hit a timeout. We ignore defers here because they're
* handled in hardware.
*/
- return -ETIMEDOUT;
+ ret = -ETIMEDOUT;
+ goto exit;
}
if (val & AUX_IRQ_STATUS_AUX_SHORT) {
ret = regmap_read(pdata->regmap, SN_AUX_LENGTH_REG, &len);
if (ret)
- return ret;
+ goto exit;
} else if (val & AUX_IRQ_STATUS_NAT_I2C_FAIL) {
switch (request) {
case DP_AUX_I2C_WRITE:
@@ -967,21 +1030,22 @@ static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
msg->reply |= DP_AUX_NATIVE_REPLY_NACK;
break;
}
- return 0;
+ len = 0;
+ goto exit;
}
- if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE ||
- len == 0)
- return len;
+ if (request != DP_AUX_NATIVE_WRITE && request != DP_AUX_I2C_WRITE && len != 0)
+ ret = regmap_bulk_read(pdata->regmap, SN_AUX_RDATA_REG(0), buf, len);
- ret = regmap_bulk_read(pdata->regmap, SN_AUX_RDATA_REG(0), buf, len);
- if (ret)
- return ret;
+exit:
+ mutex_unlock(&pdata->comms_mutex);
+ pm_runtime_mark_last_busy(pdata->dev);
+ pm_runtime_put_autosuspend(pdata->dev);
- return len;
+ return ret ? ret : len;
}
-static int ti_sn_bridge_parse_dsi_host(struct ti_sn_bridge *pdata)
+static int ti_sn_bridge_parse_dsi_host(struct ti_sn65dsi86 *pdata)
{
struct device_node *np = pdata->dev->of_node;
@@ -1016,7 +1080,7 @@ static int tn_sn_bridge_of_xlate(struct gpio_chip *chip,
static int ti_sn_bridge_gpio_get_direction(struct gpio_chip *chip,
unsigned int offset)
{
- struct ti_sn_bridge *pdata = gpiochip_get_data(chip);
+ struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip);
/*
* We already have to keep track of the direction because we use
@@ -1030,7 +1094,7 @@ static int ti_sn_bridge_gpio_get_direction(struct gpio_chip *chip,
static int ti_sn_bridge_gpio_get(struct gpio_chip *chip, unsigned int offset)
{
- struct ti_sn_bridge *pdata = gpiochip_get_data(chip);
+ struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip);
unsigned int val;
int ret;
@@ -1044,7 +1108,7 @@ static int ti_sn_bridge_gpio_get(struct gpio_chip *chip, unsigned int offset)
*/
pm_runtime_get_sync(pdata->dev);
ret = regmap_read(pdata->regmap, SN_GPIO_IO_REG, &val);
- pm_runtime_put(pdata->dev);
+ pm_runtime_put_autosuspend(pdata->dev);
if (ret)
return ret;
@@ -1055,7 +1119,7 @@ static int ti_sn_bridge_gpio_get(struct gpio_chip *chip, unsigned int offset)
static void ti_sn_bridge_gpio_set(struct gpio_chip *chip, unsigned int offset,
int val)
{
- struct ti_sn_bridge *pdata = gpiochip_get_data(chip);
+ struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip);
int ret;
if (!test_bit(offset, pdata->gchip_output)) {
@@ -1075,7 +1139,7 @@ static void ti_sn_bridge_gpio_set(struct gpio_chip *chip, unsigned int offset,
static int ti_sn_bridge_gpio_direction_input(struct gpio_chip *chip,
unsigned int offset)
{
- struct ti_sn_bridge *pdata = gpiochip_get_data(chip);
+ struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip);
int shift = offset * 2;
int ret;
@@ -1095,7 +1159,7 @@ static int ti_sn_bridge_gpio_direction_input(struct gpio_chip *chip,
* it off and when it comes back it will have lost all state, but
* that's OK because the default is input and we're now an input.
*/
- pm_runtime_put(pdata->dev);
+ pm_runtime_put_autosuspend(pdata->dev);
return 0;
}
@@ -1103,7 +1167,7 @@ static int ti_sn_bridge_gpio_direction_input(struct gpio_chip *chip,
static int ti_sn_bridge_gpio_direction_output(struct gpio_chip *chip,
unsigned int offset, int val)
{
- struct ti_sn_bridge *pdata = gpiochip_get_data(chip);
+ struct ti_sn65dsi86 *pdata = gpiochip_get_data(chip);
int shift = offset * 2;
int ret;
@@ -1121,7 +1185,7 @@ static int ti_sn_bridge_gpio_direction_output(struct gpio_chip *chip,
SN_GPIO_MUX_OUTPUT << shift);
if (ret) {
clear_bit(offset, pdata->gchip_output);
- pm_runtime_put(pdata->dev);
+ pm_runtime_put_autosuspend(pdata->dev);
}
return ret;
@@ -1137,8 +1201,10 @@ static const char * const ti_sn_bridge_gpio_names[SN_NUM_GPIOS] = {
"GPIO1", "GPIO2", "GPIO3", "GPIO4"
};
-static int ti_sn_setup_gpio_controller(struct ti_sn_bridge *pdata)
+static int ti_sn_gpio_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
{
+ struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent);
int ret;
/* Only init if someone is going to use us as a GPIO controller */
@@ -1160,23 +1226,44 @@ static int ti_sn_setup_gpio_controller(struct ti_sn_bridge *pdata)
pdata->gchip.names = ti_sn_bridge_gpio_names;
pdata->gchip.ngpio = SN_NUM_GPIOS;
pdata->gchip.base = -1;
- ret = devm_gpiochip_add_data(pdata->dev, &pdata->gchip, pdata);
+ ret = devm_gpiochip_add_data(&adev->dev, &pdata->gchip, pdata);
if (ret)
dev_err(pdata->dev, "can't add gpio chip\n");
return ret;
}
-#else
+static const struct auxiliary_device_id ti_sn_gpio_id_table[] = {
+ { .name = "ti_sn65dsi86.gpio", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(auxiliary, ti_sn_gpio_id_table);
+
+static struct auxiliary_driver ti_sn_gpio_driver = {
+ .name = "gpio",
+ .probe = ti_sn_gpio_probe,
+ .id_table = ti_sn_gpio_id_table,
+};
-static inline int ti_sn_setup_gpio_controller(struct ti_sn_bridge *pdata)
+static int __init ti_sn_gpio_register(void)
{
- return 0;
+ return auxiliary_driver_register(&ti_sn_gpio_driver);
+}
+
+static void ti_sn_gpio_unregister(void)
+{
+ auxiliary_driver_unregister(&ti_sn_gpio_driver);
}
+#else
+
+static inline int ti_sn_gpio_register(void) { return 0; }
+static inline void ti_sn_gpio_unregister(void) {}
+
#endif
-static void ti_sn_bridge_parse_lanes(struct ti_sn_bridge *pdata,
+static void ti_sn_bridge_parse_lanes(struct ti_sn65dsi86 *pdata,
struct device_node *np)
{
u32 lane_assignments[SN_MAX_DP_LANES] = { 0, 1, 2, 3 };
@@ -1225,141 +1312,253 @@ static void ti_sn_bridge_parse_lanes(struct ti_sn_bridge *pdata,
pdata->ln_polrs = ln_polrs;
}
-static int ti_sn_bridge_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+static int ti_sn_bridge_probe(struct auxiliary_device *adev,
+ const struct auxiliary_device_id *id)
{
- struct ti_sn_bridge *pdata;
+ struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent);
+ struct device_node *np = pdata->dev->of_node;
int ret;
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- DRM_ERROR("device doesn't support I2C\n");
- return -ENODEV;
- }
-
- pdata = devm_kzalloc(&client->dev, sizeof(struct ti_sn_bridge),
- GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- pdata->regmap = devm_regmap_init_i2c(client,
- &ti_sn_bridge_regmap_config);
- if (IS_ERR(pdata->regmap)) {
- DRM_ERROR("regmap i2c init failed\n");
- return PTR_ERR(pdata->regmap);
- }
-
- pdata->dev = &client->dev;
-
- ret = drm_of_find_panel_or_bridge(pdata->dev->of_node, 1, 0,
- &pdata->panel, NULL);
+ ret = drm_of_find_panel_or_bridge(np, 1, 0, &pdata->panel, NULL);
if (ret) {
DRM_ERROR("could not find any panel node\n");
return ret;
}
- dev_set_drvdata(&client->dev, pdata);
-
- pdata->enable_gpio = devm_gpiod_get(pdata->dev, "enable",
- GPIOD_OUT_LOW);
- if (IS_ERR(pdata->enable_gpio)) {
- DRM_ERROR("failed to get enable gpio from DT\n");
- ret = PTR_ERR(pdata->enable_gpio);
- return ret;
- }
-
- ti_sn_bridge_parse_lanes(pdata, client->dev.of_node);
-
- ret = ti_sn_bridge_parse_regulators(pdata);
- if (ret) {
- DRM_ERROR("failed to parse regulators\n");
- return ret;
- }
-
- pdata->refclk = devm_clk_get(pdata->dev, "refclk");
- if (IS_ERR(pdata->refclk)) {
- ret = PTR_ERR(pdata->refclk);
- if (ret == -EPROBE_DEFER)
- return ret;
- DRM_DEBUG_KMS("refclk not found\n");
- pdata->refclk = NULL;
- }
+ ti_sn_bridge_parse_lanes(pdata, np);
ret = ti_sn_bridge_parse_dsi_host(pdata);
if (ret)
return ret;
- pm_runtime_enable(pdata->dev);
-
- ret = ti_sn_setup_gpio_controller(pdata);
- if (ret) {
- pm_runtime_disable(pdata->dev);
- return ret;
- }
-
- i2c_set_clientdata(client, pdata);
-
pdata->aux.name = "ti-sn65dsi86-aux";
pdata->aux.dev = pdata->dev;
pdata->aux.transfer = ti_sn_aux_transfer;
drm_dp_aux_init(&pdata->aux);
pdata->bridge.funcs = &ti_sn_bridge_funcs;
- pdata->bridge.of_node = client->dev.of_node;
+ pdata->bridge.of_node = np;
drm_bridge_add(&pdata->bridge);
- ti_sn_debugfs_init(pdata);
-
return 0;
}
-static int ti_sn_bridge_remove(struct i2c_client *client)
+static void ti_sn_bridge_remove(struct auxiliary_device *adev)
{
- struct ti_sn_bridge *pdata = i2c_get_clientdata(client);
+ struct ti_sn65dsi86 *pdata = dev_get_drvdata(adev->dev.parent);
if (!pdata)
- return -EINVAL;
+ return;
+
+ if (pdata->dsi) {
+ mipi_dsi_detach(pdata->dsi);
+ mipi_dsi_device_unregister(pdata->dsi);
+ }
kfree(pdata->edid);
- ti_sn_debugfs_remove(pdata);
+
+ drm_bridge_remove(&pdata->bridge);
of_node_put(pdata->host_node);
+}
- pm_runtime_disable(pdata->dev);
+static const struct auxiliary_device_id ti_sn_bridge_id_table[] = {
+ { .name = "ti_sn65dsi86.bridge", },
+ {},
+};
- if (pdata->dsi) {
- mipi_dsi_detach(pdata->dsi);
- mipi_dsi_device_unregister(pdata->dsi);
+static struct auxiliary_driver ti_sn_bridge_driver = {
+ .name = "bridge",
+ .probe = ti_sn_bridge_probe,
+ .remove = ti_sn_bridge_remove,
+ .id_table = ti_sn_bridge_id_table,
+};
+
+static void ti_sn65dsi86_runtime_disable(void *data)
+{
+ pm_runtime_disable(data);
+}
+
+static void ti_sn65dsi86_uninit_aux(void *data)
+{
+ auxiliary_device_uninit(data);
+}
+
+static void ti_sn65dsi86_delete_aux(void *data)
+{
+ auxiliary_device_delete(data);
+}
+
+/*
+ * AUX bus docs say that a non-NULL release is mandatory, but it makes no
+ * sense for the model used here where all of the aux devices are allocated
+ * in the single shared structure. We'll use this noop as a workaround.
+ */
+static void ti_sn65dsi86_noop(struct device *dev) {}
+
+static int ti_sn65dsi86_add_aux_device(struct ti_sn65dsi86 *pdata,
+ struct auxiliary_device *aux,
+ const char *name)
+{
+ struct device *dev = pdata->dev;
+ int ret;
+
+ /*
+ * NOTE: It would be nice to set the "of_node" of our children to be
+ * the same "of_node"" that the top-level component has. That doesn't
+ * work, though, since pinctrl will try (and fail) to reserve the
+ * pins again. Until that gets sorted out the children will just need
+ * to look at the of_node of the main device.
+ */
+
+ aux->name = name;
+ aux->dev.parent = dev;
+ aux->dev.release = ti_sn65dsi86_noop;
+ ret = auxiliary_device_init(aux);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, ti_sn65dsi86_uninit_aux, aux);
+ if (ret)
+ return ret;
+
+ ret = auxiliary_device_add(aux);
+ if (ret)
+ return ret;
+ ret = devm_add_action_or_reset(dev, ti_sn65dsi86_delete_aux, aux);
+
+ return ret;
+}
+
+static int ti_sn65dsi86_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ti_sn65dsi86 *pdata;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ DRM_ERROR("device doesn't support I2C\n");
+ return -ENODEV;
}
- drm_bridge_remove(&pdata->bridge);
+ pdata = devm_kzalloc(dev, sizeof(struct ti_sn65dsi86), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+ dev_set_drvdata(dev, pdata);
+ pdata->dev = dev;
- return 0;
+ mutex_init(&pdata->comms_mutex);
+
+ pdata->regmap = devm_regmap_init_i2c(client,
+ &ti_sn65dsi86_regmap_config);
+ if (IS_ERR(pdata->regmap)) {
+ DRM_ERROR("regmap i2c init failed\n");
+ return PTR_ERR(pdata->regmap);
+ }
+
+ pdata->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(pdata->enable_gpio)) {
+ DRM_ERROR("failed to get enable gpio from DT\n");
+ ret = PTR_ERR(pdata->enable_gpio);
+ return ret;
+ }
+
+ ret = ti_sn65dsi86_parse_regulators(pdata);
+ if (ret) {
+ DRM_ERROR("failed to parse regulators\n");
+ return ret;
+ }
+
+ pdata->refclk = devm_clk_get_optional(dev, "refclk");
+ if (IS_ERR(pdata->refclk))
+ return PTR_ERR(pdata->refclk);
+
+ pm_runtime_enable(dev);
+ ret = devm_add_action_or_reset(dev, ti_sn65dsi86_runtime_disable, dev);
+ if (ret)
+ return ret;
+ pm_runtime_set_autosuspend_delay(pdata->dev, 500);
+ pm_runtime_use_autosuspend(pdata->dev);
+
+ ti_sn65dsi86_debugfs_init(pdata);
+
+ /*
+ * Break ourselves up into a collection of aux devices. The only real
+ * motiviation here is to solve the chicken-and-egg problem of probe
+ * ordering. The bridge wants the panel to be there when it probes.
+ * The panel wants its HPD GPIO (provided by sn65dsi86 on some boards)
+ * when it probes. There will soon be other devices (DDC I2C bus, PWM)
+ * that have the same problem. Having sub-devices allows the some sub
+ * devices to finish probing even if others return -EPROBE_DEFER and
+ * gets us around the problems.
+ */
+
+ if (IS_ENABLED(CONFIG_OF_GPIO)) {
+ ret = ti_sn65dsi86_add_aux_device(pdata, &pdata->gpio_aux, "gpio");
+ if (ret)
+ return ret;
+ }
+
+ return ti_sn65dsi86_add_aux_device(pdata, &pdata->bridge_aux, "bridge");
}
-static struct i2c_device_id ti_sn_bridge_id[] = {
+static struct i2c_device_id ti_sn65dsi86_id[] = {
{ "ti,sn65dsi86", 0},
{},
};
-MODULE_DEVICE_TABLE(i2c, ti_sn_bridge_id);
+MODULE_DEVICE_TABLE(i2c, ti_sn65dsi86_id);
-static const struct of_device_id ti_sn_bridge_match_table[] = {
+static const struct of_device_id ti_sn65dsi86_match_table[] = {
{.compatible = "ti,sn65dsi86"},
{},
};
-MODULE_DEVICE_TABLE(of, ti_sn_bridge_match_table);
+MODULE_DEVICE_TABLE(of, ti_sn65dsi86_match_table);
-static struct i2c_driver ti_sn_bridge_driver = {
+static struct i2c_driver ti_sn65dsi86_driver = {
.driver = {
.name = "ti_sn65dsi86",
- .of_match_table = ti_sn_bridge_match_table,
- .pm = &ti_sn_bridge_pm_ops,
+ .of_match_table = ti_sn65dsi86_match_table,
+ .pm = &ti_sn65dsi86_pm_ops,
},
- .probe = ti_sn_bridge_probe,
- .remove = ti_sn_bridge_remove,
- .id_table = ti_sn_bridge_id,
+ .probe = ti_sn65dsi86_probe,
+ .id_table = ti_sn65dsi86_id,
};
-module_i2c_driver(ti_sn_bridge_driver);
+
+static int __init ti_sn65dsi86_init(void)
+{
+ int ret;
+
+ ret = i2c_add_driver(&ti_sn65dsi86_driver);
+ if (ret)
+ return ret;
+
+ ret = ti_sn_gpio_register();
+ if (ret)
+ goto err_main_was_registered;
+
+ ret = auxiliary_driver_register(&ti_sn_bridge_driver);
+ if (ret)
+ goto err_gpio_was_registered;
+
+ return 0;
+
+err_gpio_was_registered:
+ ti_sn_gpio_unregister();
+err_main_was_registered:
+ i2c_del_driver(&ti_sn65dsi86_driver);
+
+ return ret;
+}
+module_init(ti_sn65dsi86_init);
+
+static void __exit ti_sn65dsi86_exit(void)
+{
+ auxiliary_driver_unregister(&ti_sn_bridge_driver);
+ ti_sn_gpio_unregister();
+ i2c_del_driver(&ti_sn65dsi86_driver);
+}
+module_exit(ti_sn65dsi86_exit);
MODULE_AUTHOR("Sandeep Panda <spanda@codeaurora.org>");
MODULE_DESCRIPTION("sn65dsi86 DSI to eDP bridge driver");
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 5311d03d49cc..a4ad6fd13abc 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -35,9 +35,10 @@
#include <linux/pci.h>
#include <linux/slab.h>
+#if IS_ENABLED(CONFIG_AGP)
#include <asm/agp.h>
+#endif
-#include <drm/drm_agpsupport.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
@@ -45,6 +46,8 @@
#include "drm_legacy.h"
+#if IS_ENABLED(CONFIG_AGP)
+
/*
* Get AGP information.
*
@@ -53,7 +56,7 @@
* Verifies the AGP device has been initialized and acquired and fills in the
* drm_agp_info structure with the information in drm_agp_head::agp_info.
*/
-int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
+int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info)
{
struct agp_kern_info *kern;
@@ -73,15 +76,15 @@ int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info)
return 0;
}
-EXPORT_SYMBOL(drm_agp_info);
+EXPORT_SYMBOL(drm_legacy_agp_info);
-int drm_agp_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_agp_info *info = data;
int err;
- err = drm_agp_info(dev, info);
+ err = drm_legacy_agp_info(dev, info);
if (err)
return err;
@@ -97,7 +100,7 @@ int drm_agp_info_ioctl(struct drm_device *dev, void *data,
* Verifies the AGP device hasn't been acquired before and calls
* \c agp_backend_acquire.
*/
-int drm_agp_acquire(struct drm_device *dev)
+int drm_legacy_agp_acquire(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
@@ -111,7 +114,7 @@ int drm_agp_acquire(struct drm_device *dev)
dev->agp->acquired = 1;
return 0;
}
-EXPORT_SYMBOL(drm_agp_acquire);
+EXPORT_SYMBOL(drm_legacy_agp_acquire);
/*
* Acquire the AGP device (ioctl).
@@ -121,10 +124,10 @@ EXPORT_SYMBOL(drm_agp_acquire);
* Verifies the AGP device hasn't been acquired before and calls
* \c agp_backend_acquire.
*/
-int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- return drm_agp_acquire((struct drm_device *) file_priv->minor->dev);
+ return drm_legacy_agp_acquire((struct drm_device *)file_priv->minor->dev);
}
/*
@@ -135,7 +138,7 @@ int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
*
* Verifies the AGP device has been acquired and calls \c agp_backend_release.
*/
-int drm_agp_release(struct drm_device *dev)
+int drm_legacy_agp_release(struct drm_device *dev)
{
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
@@ -143,12 +146,12 @@ int drm_agp_release(struct drm_device *dev)
dev->agp->acquired = 0;
return 0;
}
-EXPORT_SYMBOL(drm_agp_release);
+EXPORT_SYMBOL(drm_legacy_agp_release);
-int drm_agp_release_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- return drm_agp_release(dev);
+ return drm_legacy_agp_release(dev);
}
/*
@@ -161,7 +164,7 @@ int drm_agp_release_ioctl(struct drm_device *dev, void *data,
* Verifies the AGP device has been acquired but not enabled, and calls
* \c agp_enable.
*/
-int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
+int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
{
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
@@ -171,14 +174,14 @@ int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode)
dev->agp->enabled = 1;
return 0;
}
-EXPORT_SYMBOL(drm_agp_enable);
+EXPORT_SYMBOL(drm_legacy_agp_enable);
-int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_agp_mode *mode = data;
- return drm_agp_enable(dev, *mode);
+ return drm_legacy_agp_enable(dev, *mode);
}
/*
@@ -189,7 +192,7 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
* Verifies the AGP device is present and has been acquired, allocates the
* memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
*/
-int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
+int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
{
struct drm_agp_mem *entry;
struct agp_memory *memory;
@@ -221,15 +224,15 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
return 0;
}
-EXPORT_SYMBOL(drm_agp_alloc);
+EXPORT_SYMBOL(drm_legacy_agp_alloc);
-int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
+int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_agp_buffer *request = data;
- return drm_agp_alloc(dev, request);
+ return drm_legacy_agp_alloc(dev, request);
}
/*
@@ -241,8 +244,8 @@ int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
*
* Walks through drm_agp_head::memory until finding a matching handle.
*/
-static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device *dev,
- unsigned long handle)
+static struct drm_agp_mem *drm_legacy_agp_lookup_entry(struct drm_device *dev,
+ unsigned long handle)
{
struct drm_agp_mem *entry;
@@ -261,14 +264,14 @@ static struct drm_agp_mem *drm_agp_lookup_entry(struct drm_device *dev,
* Verifies the AGP device is present and acquired, looks-up the AGP memory
* entry and passes it to the unbind_agp() function.
*/
-int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
+int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
{
struct drm_agp_mem *entry;
int ret;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- entry = drm_agp_lookup_entry(dev, request->handle);
+ entry = drm_legacy_agp_lookup_entry(dev, request->handle);
if (!entry || !entry->bound)
return -EINVAL;
ret = agp_unbind_memory(entry->memory);
@@ -276,15 +279,15 @@ int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request)
entry->bound = 0;
return ret;
}
-EXPORT_SYMBOL(drm_agp_unbind);
+EXPORT_SYMBOL(drm_legacy_agp_unbind);
-int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_agp_binding *request = data;
- return drm_agp_unbind(dev, request);
+ return drm_legacy_agp_unbind(dev, request);
}
/*
@@ -296,7 +299,7 @@ int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
* is currently bound into the GATT. Looks-up the AGP memory entry and passes
* it to bind_agp() function.
*/
-int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
+int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
{
struct drm_agp_mem *entry;
int retcode;
@@ -304,7 +307,7 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- entry = drm_agp_lookup_entry(dev, request->handle);
+ entry = drm_legacy_agp_lookup_entry(dev, request->handle);
if (!entry || entry->bound)
return -EINVAL;
page = DIV_ROUND_UP(request->offset, PAGE_SIZE);
@@ -316,15 +319,15 @@ int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request)
dev->agp->base, entry->bound);
return 0;
}
-EXPORT_SYMBOL(drm_agp_bind);
+EXPORT_SYMBOL(drm_legacy_agp_bind);
-int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_agp_binding *request = data;
- return drm_agp_bind(dev, request);
+ return drm_legacy_agp_bind(dev, request);
}
/*
@@ -337,13 +340,13 @@ int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
* unbind_agp(). Frees it via free_agp() as well as the entry itself
* and unlinks from the doubly linked list it's inserted in.
*/
-int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
+int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
{
struct drm_agp_mem *entry;
if (!dev->agp || !dev->agp->acquired)
return -EINVAL;
- entry = drm_agp_lookup_entry(dev, request->handle);
+ entry = drm_legacy_agp_lookup_entry(dev, request->handle);
if (!entry)
return -EINVAL;
if (entry->bound)
@@ -355,15 +358,15 @@ int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request)
kfree(entry);
return 0;
}
-EXPORT_SYMBOL(drm_agp_free);
+EXPORT_SYMBOL(drm_legacy_agp_free);
-int drm_agp_free_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
struct drm_agp_buffer *request = data;
- return drm_agp_free(dev, request);
+ return drm_legacy_agp_free(dev, request);
}
/*
@@ -378,7 +381,7 @@ int drm_agp_free_ioctl(struct drm_device *dev, void *data,
* Note that final cleanup of the kmalloced structure is directly done in
* drm_pci_agp_destroy.
*/
-struct drm_agp_head *drm_agp_init(struct drm_device *dev)
+struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev->dev);
struct drm_agp_head *head = NULL;
@@ -409,7 +412,7 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
return head;
}
/* Only exported for i810.ko */
-EXPORT_SYMBOL(drm_agp_init);
+EXPORT_SYMBOL(drm_legacy_agp_init);
/**
* drm_legacy_agp_clear - Clear AGP resource list
@@ -439,8 +442,10 @@ void drm_legacy_agp_clear(struct drm_device *dev)
INIT_LIST_HEAD(&dev->agp->memory);
if (dev->agp->acquired)
- drm_agp_release(dev);
+ drm_legacy_agp_release(dev);
dev->agp->acquired = 0;
dev->agp->enabled = 0;
}
+
+#endif
diff --git a/drivers/gpu/drm/drm_aperture.c b/drivers/gpu/drm/drm_aperture.c
new file mode 100644
index 000000000000..33bf018c3bdf
--- /dev/null
+++ b/drivers/gpu/drm/drm_aperture.c
@@ -0,0 +1,344 @@
+// SPDX-License-Identifier: MIT
+
+#include <linux/device.h>
+#include <linux/fb.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h> /* for firmware helpers */
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vgaarb.h>
+
+#include <drm/drm_aperture.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_print.h>
+
+/**
+ * DOC: overview
+ *
+ * A graphics device might be supported by different drivers, but only one
+ * driver can be active at any given time. Many systems load a generic
+ * graphics drivers, such as EFI-GOP or VESA, early during the boot process.
+ * During later boot stages, they replace the generic driver with a dedicated,
+ * hardware-specific driver. To take over the device the dedicated driver
+ * first has to remove the generic driver. DRM aperture functions manage
+ * ownership of DRM framebuffer memory and hand-over between drivers.
+ *
+ * DRM drivers should call drm_aperture_remove_conflicting_framebuffers()
+ * at the top of their probe function. The function removes any generic
+ * driver that is currently associated with the given framebuffer memory.
+ * If the framebuffer is located at PCI BAR 0, the rsp code looks as in the
+ * example given below.
+ *
+ * .. code-block:: c
+ *
+ * static int remove_conflicting_framebuffers(struct pci_dev *pdev)
+ * {
+ * bool primary = false;
+ * resource_size_t base, size;
+ * int ret;
+ *
+ * base = pci_resource_start(pdev, 0);
+ * size = pci_resource_len(pdev, 0);
+ * #ifdef CONFIG_X86
+ * primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
+ * #endif
+ *
+ * return drm_aperture_remove_conflicting_framebuffers(base, size, primary,
+ * "example driver");
+ * }
+ *
+ * static int probe(struct pci_dev *pdev)
+ * {
+ * int ret;
+ *
+ * // Remove any generic drivers...
+ * ret = remove_conflicting_framebuffers(pdev);
+ * if (ret)
+ * return ret;
+ *
+ * // ... and initialize the hardware.
+ * ...
+ *
+ * drm_dev_register();
+ *
+ * return 0;
+ * }
+ *
+ * PCI device drivers should call
+ * drm_aperture_remove_conflicting_pci_framebuffers() and let it detect the
+ * framebuffer apertures automatically. Device drivers without knowledge of
+ * the framebuffer's location shall call drm_aperture_remove_framebuffers(),
+ * which removes all drivers for known framebuffer.
+ *
+ * Drivers that are susceptible to being removed by other drivers, such as
+ * generic EFI or VESA drivers, have to register themselves as owners of their
+ * given framebuffer memory. Ownership of the framebuffer memory is achived
+ * by calling devm_aperture_acquire_from_firmware(). On success, the driver
+ * is the owner of the framebuffer range. The function fails if the
+ * framebuffer is already by another driver. See below for an example.
+ *
+ * .. code-block:: c
+ *
+ * static int acquire_framebuffers(struct drm_device *dev, struct platform_device *pdev)
+ * {
+ * resource_size_t base, size;
+ *
+ * mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ * if (!mem)
+ * return -EINVAL;
+ * base = mem->start;
+ * size = resource_size(mem);
+ *
+ * return devm_acquire_aperture_from_firmware(dev, base, size);
+ * }
+ *
+ * static int probe(struct platform_device *pdev)
+ * {
+ * struct drm_device *dev;
+ * int ret;
+ *
+ * // ... Initialize the device...
+ * dev = devm_drm_dev_alloc();
+ * ...
+ *
+ * // ... and acquire ownership of the framebuffer.
+ * ret = acquire_framebuffers(dev, pdev);
+ * if (ret)
+ * return ret;
+ *
+ * drm_dev_register(dev, 0);
+ *
+ * return 0;
+ * }
+ *
+ * The generic driver is now subject to forced removal by other drivers. This
+ * only works for platform drivers that support hot unplug.
+ * When a driver calls drm_aperture_remove_conflicting_framebuffers() et al
+ * for the registered framebuffer range, the aperture helpers call
+ * platform_device_unregister() and the generic driver unloads itself. It
+ * may not access the device's registers, framebuffer memory, ROM, etc
+ * afterwards.
+ */
+
+struct drm_aperture {
+ struct drm_device *dev;
+ resource_size_t base;
+ resource_size_t size;
+ struct list_head lh;
+ void (*detach)(struct drm_device *dev);
+};
+
+static LIST_HEAD(drm_apertures);
+static DEFINE_MUTEX(drm_apertures_lock);
+
+static bool overlap(resource_size_t base1, resource_size_t end1,
+ resource_size_t base2, resource_size_t end2)
+{
+ return (base1 < end2) && (end1 > base2);
+}
+
+static void devm_aperture_acquire_release(void *data)
+{
+ struct drm_aperture *ap = data;
+ bool detached = !ap->dev;
+
+ if (detached)
+ return;
+
+ mutex_lock(&drm_apertures_lock);
+ list_del(&ap->lh);
+ mutex_unlock(&drm_apertures_lock);
+}
+
+static int devm_aperture_acquire(struct drm_device *dev,
+ resource_size_t base, resource_size_t size,
+ void (*detach)(struct drm_device *))
+{
+ size_t end = base + size;
+ struct list_head *pos;
+ struct drm_aperture *ap;
+
+ mutex_lock(&drm_apertures_lock);
+
+ list_for_each(pos, &drm_apertures) {
+ ap = container_of(pos, struct drm_aperture, lh);
+ if (overlap(base, end, ap->base, ap->base + ap->size))
+ return -EBUSY;
+ }
+
+ ap = devm_kzalloc(dev->dev, sizeof(*ap), GFP_KERNEL);
+ if (!ap)
+ return -ENOMEM;
+
+ ap->dev = dev;
+ ap->base = base;
+ ap->size = size;
+ ap->detach = detach;
+ INIT_LIST_HEAD(&ap->lh);
+
+ list_add(&ap->lh, &drm_apertures);
+
+ mutex_unlock(&drm_apertures_lock);
+
+ return devm_add_action_or_reset(dev->dev, devm_aperture_acquire_release, ap);
+}
+
+static void drm_aperture_detach_firmware(struct drm_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev);
+
+ /*
+ * Remove the device from the device hierarchy. This is the right thing
+ * to do for firmware-based DRM drivers, such as EFI, VESA or VGA. After
+ * the new driver takes over the hardware, the firmware device's state
+ * will be lost.
+ *
+ * For non-platform devices, a new callback would be required.
+ *
+ * If the aperture helpers ever need to handle native drivers, this call
+ * would only have to unplug the DRM device, so that the hardware device
+ * stays around after detachment.
+ */
+ platform_device_unregister(pdev);
+}
+
+/**
+ * devm_aperture_acquire_from_firmware - Acquires ownership of a firmware framebuffer
+ * on behalf of a DRM driver.
+ * @dev: the DRM device to own the framebuffer memory
+ * @base: the framebuffer's byte offset in physical memory
+ * @size: the framebuffer size in bytes
+ *
+ * Installs the given device as the new owner of the framebuffer. The function
+ * expects the framebuffer to be provided by a platform device that has been
+ * set up by firmware. Firmware can be any generic interface, such as EFI,
+ * VESA, VGA, etc. If the native hardware driver takes over ownership of the
+ * framebuffer range, the firmware state gets lost. Aperture helpers will then
+ * unregister the platform device automatically. Acquired apertures are
+ * released automatically if the underlying device goes away.
+ *
+ * The function fails if the framebuffer range, or parts of it, is currently
+ * owned by another driver. To evict current owners, callers should use
+ * drm_aperture_remove_conflicting_framebuffers() et al. before calling this
+ * function. The function also fails if the given device is not a platform
+ * device.
+ *
+ * Returns:
+ * 0 on success, or a negative errno value otherwise.
+ */
+int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base,
+ resource_size_t size)
+{
+ if (drm_WARN_ON(dev, !dev_is_platform(dev->dev)))
+ return -EINVAL;
+
+ return devm_aperture_acquire(dev, base, size, drm_aperture_detach_firmware);
+}
+EXPORT_SYMBOL(devm_aperture_acquire_from_firmware);
+
+static void drm_aperture_detach_drivers(resource_size_t base, resource_size_t size)
+{
+ resource_size_t end = base + size;
+ struct list_head *pos, *n;
+
+ mutex_lock(&drm_apertures_lock);
+
+ list_for_each_safe(pos, n, &drm_apertures) {
+ struct drm_aperture *ap =
+ container_of(pos, struct drm_aperture, lh);
+ struct drm_device *dev = ap->dev;
+
+ if (WARN_ON_ONCE(!dev))
+ continue;
+
+ if (!overlap(base, end, ap->base, ap->base + ap->size))
+ continue;
+
+ ap->dev = NULL; /* detach from device */
+ list_del(&ap->lh);
+
+ ap->detach(dev);
+ }
+
+ mutex_unlock(&drm_apertures_lock);
+}
+
+/**
+ * drm_aperture_remove_conflicting_framebuffers - remove existing framebuffers in the given range
+ * @base: the aperture's base address in physical memory
+ * @size: aperture size in bytes
+ * @primary: also kick vga16fb if present
+ * @name: requesting driver name
+ *
+ * This function removes graphics device drivers which use memory range described by
+ * @base and @size.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise
+ */
+int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
+ bool primary, const char *name)
+{
+#if IS_REACHABLE(CONFIG_FB)
+ struct apertures_struct *a;
+ int ret;
+
+ a = alloc_apertures(1);
+ if (!a)
+ return -ENOMEM;
+
+ a->ranges[0].base = base;
+ a->ranges[0].size = size;
+
+ ret = remove_conflicting_framebuffers(a, name, primary);
+ kfree(a);
+
+ if (ret)
+ return ret;
+#endif
+
+ drm_aperture_detach_drivers(base, size);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_aperture_remove_conflicting_framebuffers);
+
+/**
+ * drm_aperture_remove_conflicting_pci_framebuffers - remove existing framebuffers for PCI devices
+ * @pdev: PCI device
+ * @name: requesting driver name
+ *
+ * This function removes graphics device drivers using memory range configured
+ * for any of @pdev's memory bars. The function assumes that PCI device with
+ * shadowed ROM drives a primary display and so kicks out vga16fb.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise
+ */
+int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const char *name)
+{
+ resource_size_t base, size;
+ int bar, ret = 0;
+
+ for (bar = 0; bar < PCI_STD_NUM_BARS; ++bar) {
+ if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM))
+ continue;
+ base = pci_resource_start(pdev, bar);
+ size = pci_resource_len(pdev, bar);
+ drm_aperture_detach_drivers(base, size);
+ }
+
+ /*
+ * WARNING: Apparently we must kick fbdev drivers before vgacon,
+ * otherwise the vga fbdev driver falls over.
+ */
+#if IS_REACHABLE(CONFIG_FB)
+ ret = remove_conflicting_pci_framebuffers(pdev, name);
+#endif
+ if (ret == 0)
+ ret = vga_remove_vgacon(pdev);
+ return ret;
+}
+EXPORT_SYMBOL(drm_aperture_remove_conflicting_pci_framebuffers);
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index dd9ed000ad4c..a8bbb021684b 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -385,7 +385,8 @@ static int drm_atomic_crtc_check(const struct drm_crtc_state *old_crtc_state,
/* The state->enable vs. state->mode_blob checks can be WARN_ON,
* as this is a kernel-internal detail that userspace should never
- * be able to trigger. */
+ * be able to trigger.
+ */
if (drm_core_check_feature(crtc->dev, DRIVER_ATOMIC) &&
WARN_ON(new_crtc_state->enable && !new_crtc_state->mode_blob)) {
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] enabled without mode blob\n",
@@ -1302,8 +1303,8 @@ int drm_atomic_check_only(struct drm_atomic_state *state)
struct drm_crtc_state *new_crtc_state;
struct drm_connector *conn;
struct drm_connector_state *conn_state;
- unsigned requested_crtc = 0;
- unsigned affected_crtc = 0;
+ unsigned int requested_crtc = 0;
+ unsigned int affected_crtc = 0;
int i, ret = 0;
DRM_DEBUG_ATOMIC("checking %p\n", state);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index f2b3e28d938b..bc3487964fb5 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -106,7 +106,7 @@ static int handle_conflicting_encoders(struct drm_atomic_state *state,
struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
struct drm_encoder *encoder;
- unsigned encoder_mask = 0;
+ unsigned int encoder_mask = 0;
int i, ret = 0;
/*
@@ -609,7 +609,7 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
struct drm_connector *connector;
struct drm_connector_state *old_connector_state, *new_connector_state;
int i, ret;
- unsigned connectors_mask = 0;
+ unsigned int connectors_mask = 0;
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
bool has_connectors =
@@ -1018,8 +1018,10 @@ disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
struct drm_encoder *encoder;
struct drm_bridge *bridge;
- /* Shut down everything that's in the changeset and currently
- * still on. So need to check the old, saved state. */
+ /*
+ * Shut down everything that's in the changeset and currently
+ * still on. So need to check the old, saved state.
+ */
if (!old_conn_state->crtc)
continue;
@@ -1409,7 +1411,7 @@ EXPORT_SYMBOL(drm_atomic_helper_commit_modeset_enables);
* @dev: DRM device
* @state: atomic state object with old state structures
* @pre_swap: If true, do an interruptible wait, and @state is the new state.
- * Otherwise @state is the old state.
+ * Otherwise @state is the old state.
*
* For implicit sync, driver should fish the exclusive fence out from the
* incoming fb's and stash it in the drm_plane_state. This is called after
@@ -1478,7 +1480,7 @@ drm_atomic_helper_wait_for_vblanks(struct drm_device *dev,
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state, *new_crtc_state;
int i, ret;
- unsigned crtc_mask = 0;
+ unsigned int crtc_mask = 0;
/*
* Legacy cursor ioctls are completely unsynced, and userspace
@@ -1953,8 +1955,10 @@ static int stall_checks(struct drm_crtc *crtc, bool nonblock)
list_for_each_entry(commit, &crtc->commit_list, commit_entry) {
if (i == 0) {
completed = try_wait_for_completion(&commit->flip_done);
- /* Userspace is not allowed to get ahead of the previous
- * commit with nonblocking ones. */
+ /*
+ * Userspace is not allowed to get ahead of the previous
+ * commit with nonblocking ones.
+ */
if (!completed && nonblock) {
spin_unlock(&crtc->commit_lock);
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] busy with a previous commit\n",
@@ -2103,9 +2107,11 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
if (ret)
return ret;
- /* Drivers only send out events when at least either current or
+ /*
+ * Drivers only send out events when at least either current or
* new CRTC state is active. Complete right away if everything
- * stays off. */
+ * stays off.
+ */
if (!old_crtc_state->active && !new_crtc_state->active) {
complete_all(&commit->flip_done);
continue;
@@ -2137,8 +2143,10 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
}
for_each_oldnew_connector_in_state(state, conn, old_conn_state, new_conn_state, i) {
- /* Userspace is not allowed to get ahead of the previous
- * commit with nonblocking ones. */
+ /*
+ * Userspace is not allowed to get ahead of the previous
+ * commit with nonblocking ones.
+ */
if (nonblock && old_conn_state->commit &&
!try_wait_for_completion(&old_conn_state->commit->flip_done)) {
DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] busy with a previous commit\n",
@@ -2156,8 +2164,10 @@ int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
}
for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) {
- /* Userspace is not allowed to get ahead of the previous
- * commit with nonblocking ones. */
+ /*
+ * Userspace is not allowed to get ahead of the previous
+ * commit with nonblocking ones.
+ */
if (nonblock && old_plane_state->commit &&
!try_wait_for_completion(&old_plane_state->commit->flip_done)) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] busy with a previous commit\n",
@@ -2575,7 +2585,7 @@ drm_atomic_helper_commit_planes_on_crtc(struct drm_crtc_state *old_crtc_state)
struct drm_crtc_state *new_crtc_state =
drm_atomic_get_new_crtc_state(old_state, crtc);
struct drm_plane *plane;
- unsigned plane_mask;
+ unsigned int plane_mask;
plane_mask = old_crtc_state->plane_mask;
plane_mask |= new_crtc_state->plane_mask;
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 268bb69c2e2f..438e9585b225 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -78,8 +78,8 @@ int drm_atomic_set_mode_for_crtc(struct drm_crtc_state *state,
drm_mode_convert_to_umode(&umode, mode);
state->mode_blob =
drm_property_create_blob(state->crtc->dev,
- sizeof(umode),
- &umode);
+ sizeof(umode),
+ &umode);
if (IS_ERR(state->mode_blob))
return PTR_ERR(state->mode_blob);
@@ -114,7 +114,7 @@ EXPORT_SYMBOL(drm_atomic_set_mode_for_crtc);
* Zero on success, error code on failure. Cannot return -EDEADLK.
*/
int drm_atomic_set_mode_prop_for_crtc(struct drm_crtc_state *state,
- struct drm_property_blob *blob)
+ struct drm_property_blob *blob)
{
struct drm_crtc *crtc = state->crtc;
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index f2d46b7ac6f9..f00e5abdbbf4 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -300,7 +300,8 @@ int drm_master_open(struct drm_file *file_priv)
int ret = 0;
/* if there is no current master make this fd it, but do not create
- * any master object for render clients */
+ * any master object for render clients
+ */
mutex_lock(&dev->master_mutex);
if (!dev->master)
ret = drm_new_set_master(dev, file_priv);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index 26e2f2ffd255..ec37cbfabb50 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -328,8 +328,8 @@ unsigned int drm_rotation_simplify(unsigned int rotation,
if (rotation & ~supported_rotations) {
rotation ^= DRM_MODE_REFLECT_X | DRM_MODE_REFLECT_Y;
rotation = (rotation & DRM_MODE_REFLECT_MASK) |
- BIT((ffs(rotation & DRM_MODE_ROTATE_MASK) + 1)
- % 4);
+ BIT((ffs(rotation & DRM_MODE_ROTATE_MASK) + 1)
+ % 4);
}
return rotation;
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index 64f0effb52ac..044acd07c153 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -522,6 +522,9 @@ void drm_bridge_chain_pre_enable(struct drm_bridge *bridge)
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
if (iter->funcs->pre_enable)
iter->funcs->pre_enable(iter);
+
+ if (iter == bridge)
+ break;
}
}
EXPORT_SYMBOL(drm_bridge_chain_pre_enable);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index e3d77dfefb0a..4805726b34ac 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -40,7 +40,6 @@
#include <asm/shmparam.h>
-#include <drm/drm_agpsupport.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
@@ -79,7 +78,7 @@ static struct drm_map_list *drm_find_matching_map(struct drm_device *dev,
return entry;
break;
default: /* Make gcc happy */
- ;
+ break;
}
if (entry->map->offset == map->offset)
return entry;
@@ -325,7 +324,8 @@ static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
/* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
* As we're limiting the address to 2^32-1 (or less),
* casting it down to 32 bits is no problem, but we
- * need to point to a 64bit variable first. */
+ * need to point to a 64bit variable first.
+ */
map->handle = dma_alloc_coherent(dev->dev,
map->size,
&map->offset,
@@ -674,12 +674,17 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
static void drm_cleanup_buf_error(struct drm_device *dev,
struct drm_buf_entry *entry)
{
+ drm_dma_handle_t *dmah;
int i;
if (entry->seg_count) {
for (i = 0; i < entry->seg_count; i++) {
if (entry->seglist[i]) {
- drm_pci_free(dev, entry->seglist[i]);
+ dmah = entry->seglist[i];
+ dma_free_coherent(dev->dev,
+ dmah->size,
+ dmah->vaddr,
+ dmah->busaddr);
}
}
kfree(entry->seglist);
@@ -978,10 +983,18 @@ int drm_legacy_addbufs_pci(struct drm_device *dev,
page_count = 0;
while (entry->buf_count < count) {
+ dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
+ if (!dmah)
+ return -ENOMEM;
- dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000);
+ dmah->size = total;
+ dmah->vaddr = dma_alloc_coherent(dev->dev,
+ dmah->size,
+ &dmah->busaddr,
+ GFP_KERNEL);
+ if (!dmah->vaddr) {
+ kfree(dmah);
- if (!dmah) {
/* Set count correctly so we free the proper amount. */
entry->buf_count = count;
entry->seg_count = count;
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 7631f76e7f34..da39e7ff6965 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -20,6 +20,7 @@
* OF THIS SOFTWARE.
*/
+#include <drm/drm_auth.h>
#include <drm/drm_connector.h>
#include <drm/drm_edid.h>
#include <drm/drm_encoder.h>
@@ -279,7 +280,8 @@ int drm_connector_init(struct drm_device *dev,
drm_connector_get_cmdline_mode(connector);
/* We should add connectors at the end to avoid upsetting the connector
- * index too much. */
+ * index too much.
+ */
spin_lock_irq(&config->connector_list_lock);
list_add_tail(&connector->head, &config->connector_list);
config->num_connector++;
@@ -1958,11 +1960,11 @@ int drm_connector_set_path_property(struct drm_connector *connector,
int ret;
ret = drm_property_replace_global_blob(dev,
- &connector->path_blob_ptr,
- strlen(path) + 1,
- path,
- &connector->base,
- dev->mode_config.path_property);
+ &connector->path_blob_ptr,
+ strlen(path) + 1,
+ path,
+ &connector->base,
+ dev->mode_config.path_property);
return ret;
}
EXPORT_SYMBOL(drm_connector_set_path_property);
@@ -1988,11 +1990,11 @@ int drm_connector_set_tile_property(struct drm_connector *connector)
if (!connector->has_tile) {
ret = drm_property_replace_global_blob(dev,
- &connector->tile_blob_ptr,
- 0,
- NULL,
- &connector->base,
- dev->mode_config.tile_property);
+ &connector->tile_blob_ptr,
+ 0,
+ NULL,
+ &connector->base,
+ dev->mode_config.tile_property);
return ret;
}
@@ -2003,11 +2005,11 @@ int drm_connector_set_tile_property(struct drm_connector *connector)
connector->tile_h_size, connector->tile_v_size);
ret = drm_property_replace_global_blob(dev,
- &connector->tile_blob_ptr,
- strlen(tile) + 1,
- tile,
- &connector->base,
- dev->mode_config.tile_property);
+ &connector->tile_blob_ptr,
+ strlen(tile) + 1,
+ tile,
+ &connector->base,
+ dev->mode_config.tile_property);
return ret;
}
EXPORT_SYMBOL(drm_connector_set_tile_property);
@@ -2076,10 +2078,10 @@ int drm_connector_update_edid_property(struct drm_connector *connector,
ret = drm_property_replace_global_blob(dev,
&connector->edid_blob_ptr,
- size,
- edid,
- &connector->base,
- dev->mode_config.edid_property);
+ size,
+ edid,
+ &connector->base,
+ dev->mode_config.edid_property);
if (ret)
return ret;
return drm_connector_set_tile_property(connector);
@@ -2151,6 +2153,75 @@ int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
EXPORT_SYMBOL(drm_connector_attach_max_bpc_property);
/**
+ * drm_connector_attach_hdr_output_metadata_property - attach "HDR_OUTPUT_METADA" property
+ * @connector: connector to attach the property on.
+ *
+ * This is used to allow the userspace to send HDR Metadata to the
+ * driver.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *connector)
+{
+ struct drm_device *dev = connector->dev;
+ struct drm_property *prop = dev->mode_config.hdr_output_metadata_property;
+
+ drm_object_attach_property(&connector->base, prop, 0);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_hdr_output_metadata_property);
+
+/**
+ * drm_connector_attach_colorspace_property - attach "Colorspace" property
+ * @connector: connector to attach the property on.
+ *
+ * This is used to allow the userspace to signal the output colorspace
+ * to the driver.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_attach_colorspace_property(struct drm_connector *connector)
+{
+ struct drm_property *prop = connector->colorspace_property;
+
+ drm_object_attach_property(&connector->base, prop, DRM_MODE_COLORIMETRY_DEFAULT);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_connector_attach_colorspace_property);
+
+/**
+ * drm_connector_atomic_hdr_metadata_equal - checks if the hdr metadata changed
+ * @old_state: old connector state to compare
+ * @new_state: new connector state to compare
+ *
+ * This is used by HDR-enabled drivers to test whether the HDR metadata
+ * have changed between two different connector state (and thus probably
+ * requires a full blown mode change).
+ *
+ * Returns:
+ * True if the metadata are equal, False otherwise
+ */
+bool drm_connector_atomic_hdr_metadata_equal(struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state)
+{
+ struct drm_property_blob *old_blob = old_state->hdr_output_metadata;
+ struct drm_property_blob *new_blob = new_state->hdr_output_metadata;
+
+ if (!old_blob || !new_blob)
+ return old_blob == new_blob;
+
+ if (old_blob->length != new_blob->length)
+ return false;
+
+ return !memcmp(old_blob->data, new_blob->data, old_blob->length);
+}
+EXPORT_SYMBOL(drm_connector_atomic_hdr_metadata_equal);
+
+/**
* drm_connector_set_vrr_capable_property - sets the variable refresh rate
* capable property for a connector
* @connector: drm connector
@@ -2288,7 +2359,8 @@ int drm_connector_property_set_ioctl(struct drm_device *dev,
static struct drm_encoder *drm_connector_get_encoder(struct drm_connector *connector)
{
/* For atomic drivers only state objects are synchronously updated and
- * protected by modeset locks, so check those first. */
+ * protected by modeset locks, so check those first.
+ */
if (connector->state)
return connector->state->best_encoder;
return connector->encoder;
@@ -2374,9 +2446,13 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
if (out_resp->count_modes == 0) {
- connector->funcs->fill_modes(connector,
- dev->mode_config.max_width,
- dev->mode_config.max_height);
+ if (drm_is_current_master(file_priv))
+ connector->funcs->fill_modes(connector,
+ dev->mode_config.max_width,
+ dev->mode_config.max_height);
+ else
+ drm_dbg_kms(dev, "User-space requested a forced probe on [CONNECTOR:%d:%s] but is not the DRM master, demoting to read-only probe",
+ connector->base.id, connector->name);
}
out_resp->mm_width = connector->display_info.width_mm;
@@ -2450,7 +2526,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
out_resp->encoder_id = 0;
/* Only grab properties after probing, to make sure EDID and other
- * properties reflect the latest status. */
+ * properties reflect the latest status.
+ */
ret = drm_mode_object_get_properties(&connector->base, file_priv->atomic,
(uint32_t __user *)(unsigned long)(out_resp->props_ptr),
(uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr),
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index c99be950bf17..54e3c513d6a5 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -312,7 +312,8 @@ static int drm_context_switch_complete(struct drm_device *dev,
/* If a context switch is ever initiated
when the kernel holds the lock, release
- that lock here. */
+ that lock here.
+ */
clear_bit(0, &dev->context_flag);
return 0;
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index d07ba54ec945..eb6b741a6f99 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -81,6 +81,7 @@ int drm_legacy_dma_setup(struct drm_device *dev)
void drm_legacy_dma_takedown(struct drm_device *dev)
{
struct drm_device_dma *dma = dev->dma;
+ drm_dma_handle_t *dmah;
int i, j;
if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA) ||
@@ -100,7 +101,12 @@ void drm_legacy_dma_takedown(struct drm_device *dev)
dma->bufs[i].seg_count);
for (j = 0; j < dma->bufs[i].seg_count; j++) {
if (dma->bufs[i].seglist[j]) {
- drm_pci_free(dev, dma->bufs[i].seglist[j]);
+ dmah = dma->bufs[i].seglist[j];
+ dma_free_coherent(dev->dev,
+ dmah->size,
+ dmah->vaddr,
+ dmah->busaddr);
+ kfree(dmah);
}
}
kfree(dma->bufs[i].seglist);
diff --git a/drivers/gpu/drm/drm_dp_aux_dev.c b/drivers/gpu/drm/drm_dp_aux_dev.c
index e25181bf2c48..06b374cae956 100644
--- a/drivers/gpu/drm/drm_dp_aux_dev.c
+++ b/drivers/gpu/drm/drm_dp_aux_dev.c
@@ -278,6 +278,12 @@ void drm_dp_aux_unregister_devnode(struct drm_dp_aux *aux)
if (!aux_dev) /* attach must have failed */
return;
+ /*
+ * As some AUX adapters may exist as platform devices which outlive their respective DRM
+ * devices, we clear drm_dev to ensure that we never accidentally reference a stale pointer
+ */
+ aux->drm_dev = NULL;
+
mutex_lock(&aux_idr_mutex);
idr_remove(&aux_idr, aux_dev->index);
mutex_unlock(&aux_idr_mutex);
diff --git a/drivers/gpu/drm/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
index 1c9ea9f7fdaf..9faf49354cab 100644
--- a/drivers/gpu/drm/drm_dp_dual_mode_helper.c
+++ b/drivers/gpu/drm/drm_dp_dual_mode_helper.c
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/string.h>
+#include <drm/drm_device.h>
#include <drm/drm_dp_dual_mode_helper.h>
#include <drm/drm_print.h>
@@ -165,6 +166,7 @@ static bool is_lspcon_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN],
/**
* drm_dp_dual_mode_detect - Identify the DP dual mode adaptor
+ * @dev: &drm_device to use
* @adapter: I2C adapter for the DDC bus
*
* Attempt to identify the type of the DP dual mode adaptor used.
@@ -178,7 +180,8 @@ static bool is_lspcon_adaptor(const char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN],
* Returns:
* The type of the DP dual mode adaptor used
*/
-enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
+enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(const struct drm_device *dev,
+ struct i2c_adapter *adapter)
{
char hdmi_id[DP_DUAL_MODE_HDMI_ID_LEN] = {};
uint8_t adaptor_id = 0x00;
@@ -200,8 +203,8 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
*/
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_HDMI_ID,
hdmi_id, sizeof(hdmi_id));
- DRM_DEBUG_KMS("DP dual mode HDMI ID: %*pE (err %zd)\n",
- ret ? 0 : (int)sizeof(hdmi_id), hdmi_id, ret);
+ drm_dbg_kms(dev, "DP dual mode HDMI ID: %*pE (err %zd)\n",
+ ret ? 0 : (int)sizeof(hdmi_id), hdmi_id, ret);
if (ret)
return DRM_DP_DUAL_MODE_UNKNOWN;
@@ -219,8 +222,7 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
*/
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID,
&adaptor_id, sizeof(adaptor_id));
- DRM_DEBUG_KMS("DP dual mode adaptor ID: %02x (err %zd)\n",
- adaptor_id, ret);
+ drm_dbg_kms(dev, "DP dual mode adaptor ID: %02x (err %zd)\n", adaptor_id, ret);
if (ret == 0) {
if (is_lspcon_adaptor(hdmi_id, adaptor_id))
return DRM_DP_DUAL_MODE_LSPCON;
@@ -236,8 +238,7 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter)
* that we may have misdetected the type.
*/
if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0])
- DRM_ERROR("Unexpected DP dual mode adaptor ID %02x\n",
- adaptor_id);
+ drm_err(dev, "Unexpected DP dual mode adaptor ID %02x\n", adaptor_id);
}
@@ -250,6 +251,7 @@ EXPORT_SYMBOL(drm_dp_dual_mode_detect);
/**
* drm_dp_dual_mode_max_tmds_clock - Max TMDS clock for DP dual mode adaptor
+ * @dev: &drm_device to use
* @type: DP dual mode adaptor type
* @adapter: I2C adapter for the DDC bus
*
@@ -263,7 +265,7 @@ EXPORT_SYMBOL(drm_dp_dual_mode_detect);
* Returns:
* Maximum supported TMDS clock rate for the DP dual mode adaptor in kHz.
*/
-int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
+int drm_dp_dual_mode_max_tmds_clock(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter)
{
uint8_t max_tmds_clock;
@@ -283,7 +285,7 @@ int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_MAX_TMDS_CLOCK,
&max_tmds_clock, sizeof(max_tmds_clock));
if (ret || max_tmds_clock == 0x00 || max_tmds_clock == 0xff) {
- DRM_DEBUG_KMS("Failed to query max TMDS clock\n");
+ drm_dbg_kms(dev, "Failed to query max TMDS clock\n");
return 165000;
}
@@ -293,6 +295,7 @@ EXPORT_SYMBOL(drm_dp_dual_mode_max_tmds_clock);
/**
* drm_dp_dual_mode_get_tmds_output - Get the state of the TMDS output buffers in the DP dual mode adaptor
+ * @dev: &drm_device to use
* @type: DP dual mode adaptor type
* @adapter: I2C adapter for the DDC bus
* @enabled: current state of the TMDS output buffers
@@ -307,8 +310,8 @@ EXPORT_SYMBOL(drm_dp_dual_mode_max_tmds_clock);
* Returns:
* 0 on success, negative error code on failure
*/
-int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
- struct i2c_adapter *adapter,
+int drm_dp_dual_mode_get_tmds_output(const struct drm_device *dev,
+ enum drm_dp_dual_mode_type type, struct i2c_adapter *adapter,
bool *enabled)
{
uint8_t tmds_oen;
@@ -322,7 +325,7 @@ int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
&tmds_oen, sizeof(tmds_oen));
if (ret) {
- DRM_DEBUG_KMS("Failed to query state of TMDS output buffers\n");
+ drm_dbg_kms(dev, "Failed to query state of TMDS output buffers\n");
return ret;
}
@@ -334,6 +337,7 @@ EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output);
/**
* drm_dp_dual_mode_set_tmds_output - Enable/disable TMDS output buffers in the DP dual mode adaptor
+ * @dev: &drm_device to use
* @type: DP dual mode adaptor type
* @adapter: I2C adapter for the DDC bus
* @enable: enable (as opposed to disable) the TMDS output buffers
@@ -347,7 +351,7 @@ EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output);
* Returns:
* 0 on success, negative error code on failure
*/
-int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
+int drm_dp_dual_mode_set_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool enable)
{
uint8_t tmds_oen = enable ? 0 : DP_DUAL_MODE_TMDS_DISABLE;
@@ -367,18 +371,17 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_TMDS_OEN,
&tmds_oen, sizeof(tmds_oen));
if (ret) {
- DRM_DEBUG_KMS("Failed to %s TMDS output buffers (%d attempts)\n",
- enable ? "enable" : "disable",
- retry + 1);
+ drm_dbg_kms(dev, "Failed to %s TMDS output buffers (%d attempts)\n",
+ enable ? "enable" : "disable", retry + 1);
return ret;
}
ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_TMDS_OEN,
&tmp, sizeof(tmp));
if (ret) {
- DRM_DEBUG_KMS("I2C read failed during TMDS output buffer %s (%d attempts)\n",
- enable ? "enabling" : "disabling",
- retry + 1);
+ drm_dbg_kms(dev,
+ "I2C read failed during TMDS output buffer %s (%d attempts)\n",
+ enable ? "enabling" : "disabling", retry + 1);
return ret;
}
@@ -386,8 +389,8 @@ int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
return 0;
}
- DRM_DEBUG_KMS("I2C write value mismatch during TMDS output buffer %s\n",
- enable ? "enabling" : "disabling");
+ drm_dbg_kms(dev, "I2C write value mismatch during TMDS output buffer %s\n",
+ enable ? "enabling" : "disabling");
return -EIO;
}
@@ -425,6 +428,7 @@ EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
/**
* drm_lspcon_get_mode: Get LSPCON's current mode of operation by
* reading offset (0x80, 0x41)
+ * @dev: &drm_device to use
* @adapter: I2C-over-aux adapter
* @mode: current lspcon mode of operation output variable
*
@@ -432,7 +436,7 @@ EXPORT_SYMBOL(drm_dp_get_dual_mode_type_name);
* 0 on success, sets the current_mode value to appropriate mode
* -error on failure
*/
-int drm_lspcon_get_mode(struct i2c_adapter *adapter,
+int drm_lspcon_get_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
enum drm_lspcon_mode *mode)
{
u8 data;
@@ -440,7 +444,7 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter,
int retry;
if (!mode) {
- DRM_ERROR("NULL input\n");
+ drm_err(dev, "NULL input\n");
return -EINVAL;
}
@@ -457,7 +461,7 @@ int drm_lspcon_get_mode(struct i2c_adapter *adapter,
}
if (ret < 0) {
- DRM_DEBUG_KMS("LSPCON read(0x80, 0x41) failed\n");
+ drm_dbg_kms(dev, "LSPCON read(0x80, 0x41) failed\n");
return -EFAULT;
}
@@ -472,13 +476,14 @@ EXPORT_SYMBOL(drm_lspcon_get_mode);
/**
* drm_lspcon_set_mode: Change LSPCON's mode of operation by
* writing offset (0x80, 0x40)
+ * @dev: &drm_device to use
* @adapter: I2C-over-aux adapter
* @mode: required mode of operation
*
* Returns:
* 0 on success, -error on failure/timeout
*/
-int drm_lspcon_set_mode(struct i2c_adapter *adapter,
+int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
enum drm_lspcon_mode mode)
{
u8 data = 0;
@@ -493,7 +498,7 @@ int drm_lspcon_set_mode(struct i2c_adapter *adapter,
ret = drm_dp_dual_mode_write(adapter, DP_DUAL_MODE_LSPCON_MODE_CHANGE,
&data, sizeof(data));
if (ret < 0) {
- DRM_ERROR("LSPCON mode change failed\n");
+ drm_err(dev, "LSPCON mode change failed\n");
return ret;
}
@@ -503,24 +508,23 @@ int drm_lspcon_set_mode(struct i2c_adapter *adapter,
* so wait and retry until time out or done.
*/
do {
- ret = drm_lspcon_get_mode(adapter, &current_mode);
+ ret = drm_lspcon_get_mode(dev, adapter, &current_mode);
if (ret) {
- DRM_ERROR("can't confirm LSPCON mode change\n");
+ drm_err(dev, "can't confirm LSPCON mode change\n");
return ret;
} else {
if (current_mode != mode) {
msleep(10);
time_out -= 10;
} else {
- DRM_DEBUG_KMS("LSPCON mode changed to %s\n",
- mode == DRM_LSPCON_MODE_LS ?
- "LS" : "PCON");
+ drm_dbg_kms(dev, "LSPCON mode changed to %s\n",
+ mode == DRM_LSPCON_MODE_LS ? "LS" : "PCON");
return 0;
}
}
} while (time_out);
- DRM_ERROR("LSPCON mode change timed out\n");
+ drm_err(dev, "LSPCON mode change timed out\n");
return -ETIMEDOUT;
}
EXPORT_SYMBOL(drm_lspcon_set_mode);
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index cb2f53e56685..55b53df6ce34 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -132,14 +132,15 @@ u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZ
}
EXPORT_SYMBOL(drm_dp_get_adjust_request_post_cursor);
-void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
unsigned long rd_interval = dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
DP_TRAINING_AUX_RD_MASK;
if (rd_interval > 4)
- DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n",
- rd_interval);
+ drm_dbg_kms(aux->drm_dev, "%s: AUX interval %lu, out of range (max 4)\n",
+ aux->name, rd_interval);
if (rd_interval == 0 || dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14)
rd_interval = 100;
@@ -150,11 +151,12 @@ void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
}
EXPORT_SYMBOL(drm_dp_link_train_clock_recovery_delay);
-static void __drm_dp_link_train_channel_eq_delay(unsigned long rd_interval)
+static void __drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
+ unsigned long rd_interval)
{
if (rd_interval > 4)
- DRM_DEBUG_KMS("AUX interval %lu, out of range (max 4)\n",
- rd_interval);
+ drm_dbg_kms(aux->drm_dev, "%s: AUX interval %lu, out of range (max 4)\n",
+ aux->name, rd_interval);
if (rd_interval == 0)
rd_interval = 400;
@@ -164,9 +166,11 @@ static void __drm_dp_link_train_channel_eq_delay(unsigned long rd_interval)
usleep_range(rd_interval, rd_interval * 2);
}
-void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
- __drm_dp_link_train_channel_eq_delay(dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
+ __drm_dp_link_train_channel_eq_delay(aux,
+ dpcd[DP_TRAINING_AUX_RD_INTERVAL] &
DP_TRAINING_AUX_RD_MASK);
}
EXPORT_SYMBOL(drm_dp_link_train_channel_eq_delay);
@@ -182,13 +186,14 @@ static u8 dp_lttpr_phy_cap(const u8 phy_cap[DP_LTTPR_PHY_CAP_SIZE], int r)
return phy_cap[r - DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1];
}
-void drm_dp_lttpr_link_train_channel_eq_delay(const u8 phy_cap[DP_LTTPR_PHY_CAP_SIZE])
+void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
+ const u8 phy_cap[DP_LTTPR_PHY_CAP_SIZE])
{
u8 interval = dp_lttpr_phy_cap(phy_cap,
DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1) &
DP_TRAINING_AUX_RD_MASK;
- __drm_dp_link_train_channel_eq_delay(interval);
+ __drm_dp_link_train_channel_eq_delay(aux, interval);
}
EXPORT_SYMBOL(drm_dp_lttpr_link_train_channel_eq_delay);
@@ -215,11 +220,11 @@ drm_dp_dump_access(const struct drm_dp_aux *aux,
const char *arrow = request == DP_AUX_NATIVE_READ ? "->" : "<-";
if (ret > 0)
- DRM_DEBUG_DP("%s: 0x%05x AUX %s (ret=%3d) %*ph\n",
- aux->name, offset, arrow, ret, min(ret, 20), buffer);
+ drm_dbg_dp(aux->drm_dev, "%s: 0x%05x AUX %s (ret=%3d) %*ph\n",
+ aux->name, offset, arrow, ret, min(ret, 20), buffer);
else
- DRM_DEBUG_DP("%s: 0x%05x AUX %s (ret=%3d)\n",
- aux->name, offset, arrow, ret);
+ drm_dbg_dp(aux->drm_dev, "%s: 0x%05x AUX %s (ret=%3d)\n",
+ aux->name, offset, arrow, ret);
}
/**
@@ -282,8 +287,8 @@ static int drm_dp_dpcd_access(struct drm_dp_aux *aux, u8 request,
err = ret;
}
- DRM_DEBUG_KMS("%s: Too many retries, giving up. First error: %d\n",
- aux->name, err);
+ drm_dbg_kms(aux->drm_dev, "%s: Too many retries, giving up. First error: %d\n",
+ aux->name, err);
ret = err;
unlock:
@@ -519,44 +524,44 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
if (drm_dp_dpcd_read(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
&auto_test_req, 1) < 1) {
- DRM_ERROR("%s: DPCD failed read at register 0x%x\n",
- aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
+ drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n",
+ aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
return false;
}
auto_test_req &= DP_AUTOMATED_TEST_REQUEST;
if (drm_dp_dpcd_read(aux, DP_TEST_REQUEST, &link_edid_read, 1) < 1) {
- DRM_ERROR("%s: DPCD failed read at register 0x%x\n",
- aux->name, DP_TEST_REQUEST);
+ drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n",
+ aux->name, DP_TEST_REQUEST);
return false;
}
link_edid_read &= DP_TEST_LINK_EDID_READ;
if (!auto_test_req || !link_edid_read) {
- DRM_DEBUG_KMS("%s: Source DUT does not support TEST_EDID_READ\n",
- aux->name);
+ drm_dbg_kms(aux->drm_dev, "%s: Source DUT does not support TEST_EDID_READ\n",
+ aux->name);
return false;
}
if (drm_dp_dpcd_write(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
&auto_test_req, 1) < 1) {
- DRM_ERROR("%s: DPCD failed write at register 0x%x\n",
- aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
+ drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
+ aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR);
return false;
}
/* send back checksum for the last edid extension block data */
if (drm_dp_dpcd_write(aux, DP_TEST_EDID_CHECKSUM,
&real_edid_checksum, 1) < 1) {
- DRM_ERROR("%s: DPCD failed write at register 0x%x\n",
- aux->name, DP_TEST_EDID_CHECKSUM);
+ drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
+ aux->name, DP_TEST_EDID_CHECKSUM);
return false;
}
test_resp |= DP_TEST_EDID_CHECKSUM_WRITE;
if (drm_dp_dpcd_write(aux, DP_TEST_RESPONSE, &test_resp, 1) < 1) {
- DRM_ERROR("%s: DPCD failed write at register 0x%x\n",
- aux->name, DP_TEST_RESPONSE);
+ drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n",
+ aux->name, DP_TEST_RESPONSE);
return false;
}
@@ -599,17 +604,16 @@ static int drm_dp_read_extended_dpcd_caps(struct drm_dp_aux *aux,
return -EIO;
if (dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) {
- DRM_DEBUG_KMS("%s: Extended DPCD rev less than base DPCD rev (%d > %d)\n",
- aux->name, dpcd[DP_DPCD_REV],
- dpcd_ext[DP_DPCD_REV]);
+ drm_dbg_kms(aux->drm_dev,
+ "%s: Extended DPCD rev less than base DPCD rev (%d > %d)\n",
+ aux->name, dpcd[DP_DPCD_REV], dpcd_ext[DP_DPCD_REV]);
return 0;
}
if (!memcmp(dpcd, dpcd_ext, sizeof(dpcd_ext)))
return 0;
- DRM_DEBUG_KMS("%s: Base DPCD: %*ph\n",
- aux->name, DP_RECEIVER_CAP_SIZE, dpcd);
+ drm_dbg_kms(aux->drm_dev, "%s: Base DPCD: %*ph\n", aux->name, DP_RECEIVER_CAP_SIZE, dpcd);
memcpy(dpcd, dpcd_ext, sizeof(dpcd_ext));
@@ -644,8 +648,7 @@ int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux,
if (ret < 0)
return ret;
- DRM_DEBUG_KMS("%s: DPCD: %*ph\n",
- aux->name, DP_RECEIVER_CAP_SIZE, dpcd);
+ drm_dbg_kms(aux->drm_dev, "%s: DPCD: %*ph\n", aux->name, DP_RECEIVER_CAP_SIZE, dpcd);
return ret;
}
@@ -674,12 +677,17 @@ int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
memset(downstream_ports, 0, DP_MAX_DOWNSTREAM_PORTS);
/* No downstream info to read */
- if (!drm_dp_is_branch(dpcd) ||
- dpcd[DP_DPCD_REV] < DP_DPCD_REV_10 ||
- !(dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT))
+ if (!drm_dp_is_branch(dpcd) || dpcd[DP_DPCD_REV] == DP_DPCD_REV_10)
return 0;
+ /* Some branches advertise having 0 downstream ports, despite also advertising they have a
+ * downstream port present. The DP spec isn't clear on if this is allowed or not, but since
+ * some branches do it we need to handle it regardless.
+ */
len = drm_dp_downstream_port_count(dpcd);
+ if (!len)
+ return 0;
+
if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE)
len *= 4;
@@ -689,8 +697,7 @@ int drm_dp_read_downstream_info(struct drm_dp_aux *aux,
if (ret != len)
return -EIO;
- DRM_DEBUG_KMS("%s: DPCD DFP: %*ph\n",
- aux->name, len, downstream_ports);
+ drm_dbg_kms(aux->drm_dev, "%s: DPCD DFP: %*ph\n", aux->name, len, downstream_ports);
return 0;
}
@@ -1407,11 +1414,11 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
* Avoid spamming the kernel log with timeout errors.
*/
if (ret == -ETIMEDOUT)
- DRM_DEBUG_KMS_RATELIMITED("%s: transaction timed out\n",
- aux->name);
+ drm_dbg_kms_ratelimited(aux->drm_dev, "%s: transaction timed out\n",
+ aux->name);
else
- DRM_DEBUG_KMS("%s: transaction failed: %d\n",
- aux->name, ret);
+ drm_dbg_kms(aux->drm_dev, "%s: transaction failed: %d\n",
+ aux->name, ret);
return ret;
}
@@ -1425,12 +1432,12 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
break;
case DP_AUX_NATIVE_REPLY_NACK:
- DRM_DEBUG_KMS("%s: native nack (result=%d, size=%zu)\n",
- aux->name, ret, msg->size);
+ drm_dbg_kms(aux->drm_dev, "%s: native nack (result=%d, size=%zu)\n",
+ aux->name, ret, msg->size);
return -EREMOTEIO;
case DP_AUX_NATIVE_REPLY_DEFER:
- DRM_DEBUG_KMS("%s: native defer\n", aux->name);
+ drm_dbg_kms(aux->drm_dev, "%s: native defer\n", aux->name);
/*
* We could check for I2C bit rate capabilities and if
* available adjust this interval. We could also be
@@ -1444,8 +1451,8 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
continue;
default:
- DRM_ERROR("%s: invalid native reply %#04x\n",
- aux->name, msg->reply);
+ drm_err(aux->drm_dev, "%s: invalid native reply %#04x\n",
+ aux->name, msg->reply);
return -EREMOTEIO;
}
@@ -1460,13 +1467,13 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return ret;
case DP_AUX_I2C_REPLY_NACK:
- DRM_DEBUG_KMS("%s: I2C nack (result=%d, size=%zu)\n",
- aux->name, ret, msg->size);
+ drm_dbg_kms(aux->drm_dev, "%s: I2C nack (result=%d, size=%zu)\n",
+ aux->name, ret, msg->size);
aux->i2c_nack_count++;
return -EREMOTEIO;
case DP_AUX_I2C_REPLY_DEFER:
- DRM_DEBUG_KMS("%s: I2C defer\n", aux->name);
+ drm_dbg_kms(aux->drm_dev, "%s: I2C defer\n", aux->name);
/* DP Compliance Test 4.2.2.5 Requirement:
* Must have at least 7 retries for I2C defers on the
* transaction to pass this test
@@ -1480,13 +1487,13 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
continue;
default:
- DRM_ERROR("%s: invalid I2C reply %#04x\n",
- aux->name, msg->reply);
+ drm_err(aux->drm_dev, "%s: invalid I2C reply %#04x\n",
+ aux->name, msg->reply);
return -EREMOTEIO;
}
}
- DRM_DEBUG_KMS("%s: Too many retries, giving up\n", aux->name);
+ drm_dbg_kms(aux->drm_dev, "%s: Too many retries, giving up\n", aux->name);
return -EREMOTEIO;
}
@@ -1515,8 +1522,9 @@ static int drm_dp_i2c_drain_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *o
return err == 0 ? -EPROTO : err;
if (err < msg.size && err < ret) {
- DRM_DEBUG_KMS("%s: Partial I2C reply: requested %zu bytes got %d bytes\n",
- aux->name, msg.size, err);
+ drm_dbg_kms(aux->drm_dev,
+ "%s: Partial I2C reply: requested %zu bytes got %d bytes\n",
+ aux->name, msg.size, err);
ret = err;
}
@@ -1695,12 +1703,11 @@ static void drm_dp_aux_crc_work(struct work_struct *work)
}
if (ret == -EAGAIN) {
- DRM_DEBUG_KMS("%s: Get CRC failed after retrying: %d\n",
- aux->name, ret);
+ drm_dbg_kms(aux->drm_dev, "%s: Get CRC failed after retrying: %d\n",
+ aux->name, ret);
continue;
} else if (ret) {
- DRM_DEBUG_KMS("%s: Failed to get a CRC: %d\n",
- aux->name, ret);
+ drm_dbg_kms(aux->drm_dev, "%s: Failed to get a CRC: %d\n", aux->name, ret);
continue;
}
@@ -1728,10 +1735,18 @@ EXPORT_SYMBOL(drm_dp_remote_aux_init);
* drm_dp_aux_init() - minimally initialise an aux channel
* @aux: DisplayPort AUX channel
*
- * If you need to use the drm_dp_aux's i2c adapter prior to registering it
- * with the outside world, call drm_dp_aux_init() first. You must still
- * call drm_dp_aux_register() once the connector has been registered to
- * allow userspace access to the auxiliary DP channel.
+ * If you need to use the drm_dp_aux's i2c adapter prior to registering it with
+ * the outside world, call drm_dp_aux_init() first. For drivers which are
+ * grandparents to their AUX adapters (e.g. the AUX adapter is parented by a
+ * &drm_connector), you must still call drm_dp_aux_register() once the connector
+ * has been registered to allow userspace access to the auxiliary DP channel.
+ * Likewise, for such drivers you should also assign &drm_dp_aux.drm_dev as
+ * early as possible so that the &drm_device that corresponds to the AUX adapter
+ * may be mentioned in debugging output from the DRM DP helpers.
+ *
+ * For devices which use a separate platform device for their AUX adapters, this
+ * may be called as early as required by the driver.
+ *
*/
void drm_dp_aux_init(struct drm_dp_aux *aux)
{
@@ -1751,15 +1766,26 @@ EXPORT_SYMBOL(drm_dp_aux_init);
* drm_dp_aux_register() - initialise and register aux channel
* @aux: DisplayPort AUX channel
*
- * Automatically calls drm_dp_aux_init() if this hasn't been done yet.
- * This should only be called when the underlying &struct drm_connector is
- * initialiazed already. Therefore the best place to call this is from
- * &drm_connector_funcs.late_register. Not that drivers which don't follow this
- * will Oops when CONFIG_DRM_DP_AUX_CHARDEV is enabled.
- *
- * Drivers which need to use the aux channel before that point (e.g. at driver
- * load time, before drm_dev_register() has been called) need to call
- * drm_dp_aux_init().
+ * Automatically calls drm_dp_aux_init() if this hasn't been done yet. This
+ * should only be called once the parent of @aux, &drm_dp_aux.dev, is
+ * initialized. For devices which are grandparents of their AUX channels,
+ * &drm_dp_aux.dev will typically be the &drm_connector &device which
+ * corresponds to @aux. For these devices, it's advised to call
+ * drm_dp_aux_register() in &drm_connector_funcs.late_register, and likewise to
+ * call drm_dp_aux_unregister() in &drm_connector_funcs.early_unregister.
+ * Functions which don't follow this will likely Oops when
+ * %CONFIG_DRM_DP_AUX_CHARDEV is enabled.
+ *
+ * For devices where the AUX channel is a device that exists independently of
+ * the &drm_device that uses it, such as SoCs and bridge devices, it is
+ * recommended to call drm_dp_aux_register() after a &drm_device has been
+ * assigned to &drm_dp_aux.drm_dev, and likewise to call
+ * drm_dp_aux_unregister() once the &drm_device should no longer be associated
+ * with the AUX channel (e.g. on bridge detach).
+ *
+ * Drivers which need to use the aux channel before either of the two points
+ * mentioned above need to call drm_dp_aux_init() in order to use the AUX
+ * channel before registration.
*
* Returns 0 on success or a negative error code on failure.
*/
@@ -1767,6 +1793,8 @@ int drm_dp_aux_register(struct drm_dp_aux *aux)
{
int ret;
+ WARN_ON_ONCE(!aux->drm_dev);
+
if (!aux->ddc.algo)
drm_dp_aux_init(aux);
@@ -1983,13 +2011,12 @@ int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
dev_id_len = strnlen(ident->device_id, sizeof(ident->device_id));
- DRM_DEBUG_KMS("%s: DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
- aux->name, is_branch ? "branch" : "sink",
- (int)sizeof(ident->oui), ident->oui,
- dev_id_len, ident->device_id,
- ident->hw_rev >> 4, ident->hw_rev & 0xf,
- ident->sw_major_rev, ident->sw_minor_rev,
- desc->quirks);
+ drm_dbg_kms(aux->drm_dev,
+ "%s: DP %s: OUI %*phD dev-ID %*pE HW-rev %d.%d SW-rev %d.%d quirks 0x%04x\n",
+ aux->name, is_branch ? "branch" : "sink",
+ (int)sizeof(ident->oui), ident->oui, dev_id_len,
+ ident->device_id, ident->hw_rev >> 4, ident->hw_rev & 0xf,
+ ident->sw_major_rev, ident->sw_minor_rev, desc->quirks);
return 0;
}
@@ -2755,7 +2782,8 @@ int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux)
if (ret < 0)
return ret;
if (!(buf & DP_PCON_ENABLE_SOURCE_CTL_MODE)) {
- DRM_DEBUG_KMS("PCON in Autonomous mode, can't enable FRL\n");
+ drm_dbg_kms(aux->drm_dev, "%s: PCON in Autonomous mode, can't enable FRL\n",
+ aux->name);
return -EINVAL;
}
buf |= DP_PCON_ENABLE_HDMI_LINK;
@@ -2850,7 +2878,8 @@ void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux,
num_error = 0;
}
- DRM_ERROR("More than %d errors since the last read for lane %d", num_error, i);
+ drm_err(aux->drm_dev, "%s: More than %d errors since the last read for lane %d",
+ aux->name, num_error, i);
}
}
EXPORT_SYMBOL(drm_dp_pcon_hdmi_frl_link_error_count);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 159014455fab..54604633e65c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -286,7 +286,8 @@ static void drm_dp_encode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
*len = idx;
}
-static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
+static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_hdr *hdr,
u8 *buf, int buflen, u8 *hdrlen)
{
u8 crc4;
@@ -303,7 +304,7 @@ static bool drm_dp_decode_sideband_msg_hdr(struct drm_dp_sideband_msg_hdr *hdr,
crc4 = drm_dp_msg_header_crc4(buf, (len * 2) - 1);
if ((crc4 & 0xf) != (buf[len - 1] & 0xf)) {
- DRM_DEBUG_KMS("crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
+ drm_dbg_kms(mgr->dev, "crc4 mismatch 0x%x 0x%x\n", crc4, buf[len - 1]);
return false;
}
@@ -789,7 +790,8 @@ static bool drm_dp_sideband_append_payload(struct drm_dp_sideband_msg_rx *msg,
return true;
}
-static bool drm_dp_sideband_parse_link_address(struct drm_dp_sideband_msg_rx *raw,
+static bool drm_dp_sideband_parse_link_address(const struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *repmsg)
{
int idx = 1;
@@ -1014,7 +1016,8 @@ drm_dp_sideband_parse_query_stream_enc_status(
return true;
}
-static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
+static bool drm_dp_sideband_parse_reply(const struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_reply_body *msg)
{
memset(msg, 0, sizeof(*msg));
@@ -1030,7 +1033,7 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
switch (msg->req_type) {
case DP_LINK_ADDRESS:
- return drm_dp_sideband_parse_link_address(raw, msg);
+ return drm_dp_sideband_parse_link_address(mgr, raw, msg);
case DP_QUERY_PAYLOAD:
return drm_dp_sideband_parse_query_payload_ack(raw, msg);
case DP_REMOTE_DPCD_READ:
@@ -1053,14 +1056,16 @@ static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
case DP_QUERY_STREAM_ENC_STATUS:
return drm_dp_sideband_parse_query_stream_enc_status(raw, msg);
default:
- DRM_ERROR("Got unknown reply 0x%02x (%s)\n", msg->req_type,
- drm_dp_mst_req_type_str(msg->req_type));
+ drm_err(mgr->dev, "Got unknown reply 0x%02x (%s)\n",
+ msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
return false;
}
}
-static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideband_msg_rx *raw,
- struct drm_dp_sideband_msg_req_body *msg)
+static bool
+drm_dp_sideband_parse_connection_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_req_body *msg)
{
int idx = 1;
@@ -1082,12 +1087,14 @@ static bool drm_dp_sideband_parse_connection_status_notify(struct drm_dp_sideban
idx++;
return true;
fail_len:
- DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen);
+ drm_dbg_kms(mgr->dev, "connection status reply parse length fail %d %d\n",
+ idx, raw->curlen);
return false;
}
-static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_msg_rx *raw,
- struct drm_dp_sideband_msg_req_body *msg)
+static bool drm_dp_sideband_parse_resource_status_notify(const struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_rx *raw,
+ struct drm_dp_sideband_msg_req_body *msg)
{
int idx = 1;
@@ -1105,11 +1112,12 @@ static bool drm_dp_sideband_parse_resource_status_notify(struct drm_dp_sideband_
idx++;
return true;
fail_len:
- DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen);
+ drm_dbg_kms(mgr->dev, "resource status reply parse length fail %d %d\n", idx, raw->curlen);
return false;
}
-static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
+static bool drm_dp_sideband_parse_req(const struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_sideband_msg_rx *raw,
struct drm_dp_sideband_msg_req_body *msg)
{
memset(msg, 0, sizeof(*msg));
@@ -1117,12 +1125,12 @@ static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
switch (msg->req_type) {
case DP_CONNECTION_STATUS_NOTIFY:
- return drm_dp_sideband_parse_connection_status_notify(raw, msg);
+ return drm_dp_sideband_parse_connection_status_notify(mgr, raw, msg);
case DP_RESOURCE_STATUS_NOTIFY:
- return drm_dp_sideband_parse_resource_status_notify(raw, msg);
+ return drm_dp_sideband_parse_resource_status_notify(mgr, raw, msg);
default:
- DRM_ERROR("Got unknown request 0x%02x (%s)\n", msg->req_type,
- drm_dp_mst_req_type_str(msg->req_type));
+ drm_err(mgr->dev, "Got unknown request 0x%02x (%s)\n",
+ msg->req_type, drm_dp_mst_req_type_str(msg->req_type));
return false;
}
}
@@ -1232,14 +1240,14 @@ static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1);
if (ret > mgr->max_payloads) {
ret = -EINVAL;
- DRM_DEBUG_KMS("out of payload ids %d\n", ret);
+ drm_dbg_kms(mgr->dev, "out of payload ids %d\n", ret);
goto out_unlock;
}
vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1);
if (vcpi_ret > mgr->max_payloads) {
ret = -EINVAL;
- DRM_DEBUG_KMS("out of vcpi ids %d\n", ret);
+ drm_dbg_kms(mgr->dev, "out of vcpi ids %d\n", ret);
goto out_unlock;
}
@@ -1261,7 +1269,7 @@ static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr,
return;
mutex_lock(&mgr->payload_lock);
- DRM_DEBUG_KMS("putting payload %d\n", vcpi);
+ drm_dbg_kms(mgr->dev, "putting payload %d\n", vcpi);
clear_bit(vcpi - 1, &mgr->vcpi_mask);
for (i = 0; i < mgr->max_payloads; i++) {
@@ -1331,7 +1339,8 @@ static int drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb,
goto out;
}
} else {
- DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno);
+ drm_dbg_kms(mgr->dev, "timedout msg send %p %d %d\n",
+ txmsg, txmsg->state, txmsg->seqno);
/* dump some state */
ret = -EIO;
@@ -1485,7 +1494,7 @@ static void
drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
{
kref_get(&mstb->malloc_kref);
- DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
+ drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref));
}
/**
@@ -1502,7 +1511,7 @@ drm_dp_mst_get_mstb_malloc(struct drm_dp_mst_branch *mstb)
static void
drm_dp_mst_put_mstb_malloc(struct drm_dp_mst_branch *mstb)
{
- DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
+ drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->malloc_kref) - 1);
kref_put(&mstb->malloc_kref, drm_dp_free_mst_branch_device);
}
@@ -1536,7 +1545,7 @@ void
drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port)
{
kref_get(&port->malloc_kref);
- DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref));
+ drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref));
}
EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
@@ -1553,7 +1562,7 @@ EXPORT_SYMBOL(drm_dp_mst_get_port_malloc);
void
drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port)
{
- DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
+ drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->malloc_kref) - 1);
kref_put(&port->malloc_kref, drm_dp_free_mst_port);
}
EXPORT_SYMBOL(drm_dp_mst_put_port_malloc);
@@ -1778,8 +1787,7 @@ drm_dp_mst_topology_try_get_mstb(struct drm_dp_mst_branch *mstb)
topology_ref_history_lock(mstb->mgr);
ret = kref_get_unless_zero(&mstb->topology_kref);
if (ret) {
- DRM_DEBUG("mstb %p (%d)\n",
- mstb, kref_read(&mstb->topology_kref));
+ drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
}
@@ -1809,7 +1817,7 @@ static void drm_dp_mst_topology_get_mstb(struct drm_dp_mst_branch *mstb)
save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_GET);
WARN_ON(kref_read(&mstb->topology_kref) == 0);
kref_get(&mstb->topology_kref);
- DRM_DEBUG("mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
+ drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref));
topology_ref_history_unlock(mstb->mgr);
}
@@ -1831,8 +1839,7 @@ drm_dp_mst_topology_put_mstb(struct drm_dp_mst_branch *mstb)
{
topology_ref_history_lock(mstb->mgr);
- DRM_DEBUG("mstb %p (%d)\n",
- mstb, kref_read(&mstb->topology_kref) - 1);
+ drm_dbg(mstb->mgr->dev, "mstb %p (%d)\n", mstb, kref_read(&mstb->topology_kref) - 1);
save_mstb_topology_ref(mstb, DRM_DP_MST_TOPOLOGY_REF_PUT);
topology_ref_history_unlock(mstb->mgr);
@@ -1895,8 +1902,7 @@ drm_dp_mst_topology_try_get_port(struct drm_dp_mst_port *port)
topology_ref_history_lock(port->mgr);
ret = kref_get_unless_zero(&port->topology_kref);
if (ret) {
- DRM_DEBUG("port %p (%d)\n",
- port, kref_read(&port->topology_kref));
+ drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
}
@@ -1923,7 +1929,7 @@ static void drm_dp_mst_topology_get_port(struct drm_dp_mst_port *port)
WARN_ON(kref_read(&port->topology_kref) == 0);
kref_get(&port->topology_kref);
- DRM_DEBUG("port %p (%d)\n", port, kref_read(&port->topology_kref));
+ drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref));
save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_GET);
topology_ref_history_unlock(port->mgr);
@@ -1944,8 +1950,7 @@ static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port)
{
topology_ref_history_lock(port->mgr);
- DRM_DEBUG("port %p (%d)\n",
- port, kref_read(&port->topology_kref) - 1);
+ drm_dbg(port->mgr->dev, "port %p (%d)\n", port, kref_read(&port->topology_kref) - 1);
save_port_topology_ref(port, DRM_DP_MST_TOPOLOGY_REF_PUT);
topology_ref_history_unlock(port->mgr);
@@ -2130,8 +2135,7 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
mstb = drm_dp_add_mst_branch_device(lct, rad);
if (!mstb) {
ret = -ENOMEM;
- DRM_ERROR("Failed to create MSTB for port %p",
- port);
+ drm_err(mgr->dev, "Failed to create MSTB for port %p", port);
goto out;
}
@@ -2261,8 +2265,8 @@ static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
int drm_dp_mst_connector_late_register(struct drm_connector *connector,
struct drm_dp_mst_port *port)
{
- DRM_DEBUG_KMS("registering %s remote bus for %s\n",
- port->aux.name, connector->kdev->kobj.name);
+ drm_dbg_kms(port->mgr->dev, "registering %s remote bus for %s\n",
+ port->aux.name, connector->kdev->kobj.name);
port->aux.dev = connector->kdev;
return drm_dp_aux_register_devnode(&port->aux);
@@ -2281,8 +2285,8 @@ EXPORT_SYMBOL(drm_dp_mst_connector_late_register);
void drm_dp_mst_connector_early_unregister(struct drm_connector *connector,
struct drm_dp_mst_port *port)
{
- DRM_DEBUG_KMS("unregistering %s remote bus for %s\n",
- port->aux.name, connector->kdev->kobj.name);
+ drm_dbg_kms(port->mgr->dev, "unregistering %s remote bus for %s\n",
+ port->aux.name, connector->kdev->kobj.name);
drm_dp_aux_unregister_devnode(&port->aux);
}
EXPORT_SYMBOL(drm_dp_mst_connector_early_unregister);
@@ -2312,7 +2316,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
return;
error:
- DRM_ERROR("Failed to create connector for port %p: %d\n", port, ret);
+ drm_err(mgr->dev, "Failed to create connector for port %p: %d\n", port, ret);
}
/*
@@ -2350,6 +2354,7 @@ drm_dp_mst_add_port(struct drm_device *dev,
port->aux.is_remote = true;
/* initialize the MST downstream port's AUX crc work queue */
+ port->aux.drm_dev = dev;
drm_dp_remote_aux_init(&port->aux);
/*
@@ -2451,8 +2456,7 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
if (ret == 1) {
send_link_addr = true;
} else if (ret < 0) {
- DRM_ERROR("Failed to change PDT on port %p: %d\n",
- port, ret);
+ drm_err(dev, "Failed to change PDT on port %p: %d\n", port, ret);
goto fail;
}
@@ -2547,8 +2551,7 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
if (ret == 1) {
dowork = true;
} else if (ret < 0) {
- DRM_ERROR("Failed to change PDT for port %p: %d\n",
- port, ret);
+ drm_err(mgr->dev, "Failed to change PDT for port %p: %d\n", port, ret);
dowork = false;
}
@@ -2607,7 +2610,9 @@ static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_
if (port->port_num == port_num) {
mstb = port->mstb;
if (!mstb) {
- DRM_ERROR("failed to lookup MSTB with lct %d, rad %02x\n", lct, rad[0]);
+ drm_err(mgr->dev,
+ "failed to lookup MSTB with lct %d, rad %02x\n",
+ lct, rad[0]);
goto out;
}
@@ -2743,7 +2748,7 @@ static void drm_dp_mst_link_probe_work(struct work_struct *work)
* things work again.
*/
if (clear_payload_id_table) {
- DRM_DEBUG_KMS("Clearing payload ID table\n");
+ drm_dbg_kms(dev, "Clearing payload ID table\n");
drm_dp_send_clear_payload_id_table(mgr, mstb);
}
@@ -2805,7 +2810,7 @@ retry:
retries++;
goto retry;
}
- DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
+ drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret);
return -EIO;
}
@@ -2918,7 +2923,7 @@ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr)
struct drm_dp_sideband_msg_tx, next);
ret = process_single_tx_qlock(mgr, txmsg, false);
if (ret < 0) {
- DRM_DEBUG_KMS("failed to send msg in q %d\n", ret);
+ drm_dbg_kms(mgr->dev, "failed to send msg in q %d\n", ret);
list_del(&txmsg->next);
txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT;
wake_up_all(&mgr->tx_waitq);
@@ -2943,24 +2948,26 @@ static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr,
}
static void
-drm_dp_dump_link_address(struct drm_dp_link_address_ack_reply *reply)
+drm_dp_dump_link_address(const struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_link_address_ack_reply *reply)
{
struct drm_dp_link_addr_reply_port *port_reply;
int i;
for (i = 0; i < reply->nports; i++) {
port_reply = &reply->ports[i];
- DRM_DEBUG_KMS("port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
- i,
- port_reply->input_port,
- port_reply->peer_device_type,
- port_reply->port_number,
- port_reply->dpcd_revision,
- port_reply->mcs,
- port_reply->ddps,
- port_reply->legacy_device_plug_status,
- port_reply->num_sdp_streams,
- port_reply->num_sdp_stream_sinks);
+ drm_dbg_kms(mgr->dev,
+ "port %d: input %d, pdt: %d, pn: %d, dpcd_rev: %02x, mcs: %d, ddps: %d, ldps %d, sdp %d/%d\n",
+ i,
+ port_reply->input_port,
+ port_reply->peer_device_type,
+ port_reply->port_number,
+ port_reply->dpcd_revision,
+ port_reply->mcs,
+ port_reply->ddps,
+ port_reply->legacy_device_plug_status,
+ port_reply->num_sdp_streams,
+ port_reply->num_sdp_stream_sinks);
}
}
@@ -2986,26 +2993,25 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
/* FIXME: Actually do some real error handling here */
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret <= 0) {
- DRM_ERROR("Sending link address failed with %d\n", ret);
+ drm_err(mgr->dev, "Sending link address failed with %d\n", ret);
goto out;
}
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
- DRM_ERROR("link address NAK received\n");
+ drm_err(mgr->dev, "link address NAK received\n");
ret = -EIO;
goto out;
}
reply = &txmsg->reply.u.link_addr;
- DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
- drm_dp_dump_link_address(reply);
+ drm_dbg_kms(mgr->dev, "link address reply: %d\n", reply->nports);
+ drm_dp_dump_link_address(mgr, reply);
ret = drm_dp_check_mstb_guid(mstb, reply->guid);
if (ret) {
char buf[64];
drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
- DRM_ERROR("GUID check on %s failed: %d\n",
- buf, ret);
+ drm_err(mgr->dev, "GUID check on %s failed: %d\n", buf, ret);
goto out;
}
@@ -3029,8 +3035,8 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
if (port_mask & BIT(port->port_num))
continue;
- DRM_DEBUG_KMS("port %d was not in link address, removing\n",
- port->port_num);
+ drm_dbg_kms(mgr->dev, "port %d was not in link address, removing\n",
+ port->port_num);
list_del(&port->next);
drm_dp_mst_topology_put_port(port);
changed = true;
@@ -3062,7 +3068,7 @@ drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
- DRM_DEBUG_KMS("clear payload table id nak received\n");
+ drm_dbg_kms(mgr->dev, "clear payload table id nak received\n");
kfree(txmsg);
}
@@ -3091,15 +3097,15 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
path_res = &txmsg->reply.u.path_resources;
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
- DRM_DEBUG_KMS("enum path resources nak received\n");
+ drm_dbg_kms(mgr->dev, "enum path resources nak received\n");
} else {
if (port->port_num != path_res->port_number)
DRM_ERROR("got incorrect port in response\n");
- DRM_DEBUG_KMS("enum path resources %d: %d %d\n",
- path_res->port_number,
- path_res->full_payload_bw_number,
- path_res->avail_payload_bw_number);
+ drm_dbg_kms(mgr->dev, "enum path resources %d: %d %d\n",
+ path_res->port_number,
+ path_res->full_payload_bw_number,
+ path_res->avail_payload_bw_number);
/*
* If something changed, make sure we send a
@@ -3345,7 +3351,7 @@ static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
int id,
struct drm_dp_payload *payload)
{
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(mgr->dev, "\n");
/* it's okay for these to fail */
if (port) {
drm_dp_payload_send_msg(mgr, port, id, 0);
@@ -3451,7 +3457,7 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
continue;
}
- DRM_DEBUG_KMS("removing payload %d\n", i);
+ drm_dbg_kms(mgr->dev, "removing payload %d\n", i);
for (j = i; j < mgr->max_payloads - 1; j++) {
mgr->payloads[j] = mgr->payloads[j + 1];
mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1];
@@ -3498,7 +3504,7 @@ int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr)
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
- DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state);
+ drm_dbg_kms(mgr->dev, "payload %d %d\n", i, mgr->payloads[i].payload_state);
if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) {
ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]);
} else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) {
@@ -3543,8 +3549,8 @@ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
/* DPCD read should never be NACKed */
if (txmsg->reply.reply_type == 1) {
- DRM_ERROR("mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
- mstb, port->port_num, offset, size);
+ drm_err(mgr->dev, "mstb %p port %d: DPCD read on addr 0x%x for %d bytes NAKed\n",
+ mstb, port->port_num, offset, size);
ret = -EIO;
goto fail_free;
}
@@ -3637,6 +3643,7 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
/**
* drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
+ * @mgr: The &drm_dp_mst_topology_mgr to use
* @link_rate: link rate in 10kbits/s units
* @link_lane_count: lane count
*
@@ -3645,11 +3652,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
* convert the number of PBNs required for a given stream to the number of
* timeslots this stream requires in each MTP.
*/
-int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
+int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
+ int link_rate, int link_lane_count)
{
if (link_rate == 0 || link_lane_count == 0)
- DRM_DEBUG_KMS("invalid link rate/lane count: (%d / %d)\n",
- link_rate, link_lane_count);
+ drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",
+ link_rate, link_lane_count);
/* See DP v2.0 2.6.4.2, VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
return link_rate * link_lane_count / 54000;
@@ -3700,18 +3708,24 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
/* set the device into MST mode */
if (mst_state) {
struct drm_dp_payload reset_pay;
+ int lane_count;
+ int link_rate;
WARN_ON(mgr->mst_primary);
/* get dpcd info */
- ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
- if (ret != DP_RECEIVER_CAP_SIZE) {
- DRM_DEBUG_KMS("failed to read DPCD\n");
+ ret = drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd);
+ if (ret < 0) {
+ drm_dbg_kms(mgr->dev, "%s: failed to read DPCD, ret %d\n",
+ mgr->aux->name, ret);
goto out_unlock;
}
- mgr->pbn_div = drm_dp_get_vc_payload_bw(drm_dp_bw_code_to_link_rate(mgr->dpcd[1]),
- mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK);
+ lane_count = min_t(int, mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, mgr->max_lane_count);
+ link_rate = min_t(int, mgr->dpcd[1], mgr->max_link_rate);
+ mgr->pbn_div = drm_dp_get_vc_payload_bw(mgr,
+ drm_dp_bw_code_to_link_rate(link_rate),
+ lane_count);
if (mgr->pbn_div == 0) {
ret = -EINVAL;
goto out_unlock;
@@ -3840,7 +3854,7 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd,
DP_RECEIVER_CAP_SIZE);
if (ret != DP_RECEIVER_CAP_SIZE) {
- DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
@@ -3849,20 +3863,20 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
DP_UP_REQ_EN |
DP_UPSTREAM_IS_SRC);
if (ret < 0) {
- DRM_DEBUG_KMS("mst write failed - undocked during suspend?\n");
+ drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n");
goto out_fail;
}
/* Some hubs forget their guids after they resume */
ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
if (ret != 16) {
- DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
if (ret) {
- DRM_DEBUG_KMS("check mstb failed - undocked during suspend?\n");
+ drm_dbg_kms(mgr->dev, "check mstb failed - undocked during suspend?\n");
goto out_fail;
}
@@ -3875,7 +3889,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
mutex_unlock(&mgr->lock);
if (sync) {
- DRM_DEBUG_KMS("Waiting for link probe work to finish re-syncing topology...\n");
+ drm_dbg_kms(mgr->dev,
+ "Waiting for link probe work to finish re-syncing topology...\n");
flush_work(&mgr->work);
}
@@ -3908,15 +3923,15 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
len = min(mgr->max_dpcd_transaction_bytes, 16);
ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len);
if (ret != len) {
- DRM_DEBUG_KMS("failed to read DPCD down rep %d %d\n", len, ret);
+ drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret);
return false;
}
- ret = drm_dp_decode_sideband_msg_hdr(&hdr, replyblock, len, &hdrlen);
+ ret = drm_dp_decode_sideband_msg_hdr(mgr, &hdr, replyblock, len, &hdrlen);
if (ret == false) {
print_hex_dump(KERN_DEBUG, "failed hdr", DUMP_PREFIX_NONE, 16,
1, replyblock, len, false);
- DRM_DEBUG_KMS("ERROR: failed header\n");
+ drm_dbg_kms(mgr->dev, "ERROR: failed header\n");
return false;
}
@@ -3924,22 +3939,20 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
/* Caller is responsible for giving back this reference */
*mstb = drm_dp_get_mst_branch_device(mgr, hdr.lct, hdr.rad);
if (!*mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
- hdr.lct);
+ drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr.lct);
return false;
}
}
if (!drm_dp_sideband_msg_set_header(msg, &hdr, hdrlen)) {
- DRM_DEBUG_KMS("sideband msg set header failed %d\n",
- replyblock[0]);
+ drm_dbg_kms(mgr->dev, "sideband msg set header failed %d\n", replyblock[0]);
return false;
}
replylen = min(msg->curchunk_len, (u8)(len - hdrlen));
ret = drm_dp_sideband_append_payload(msg, replyblock + hdrlen, replylen);
if (!ret) {
- DRM_DEBUG_KMS("sideband msg build failed %d\n", replyblock[0]);
+ drm_dbg_kms(mgr->dev, "sideband msg build failed %d\n", replyblock[0]);
return false;
}
@@ -3950,14 +3963,14 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply,
replyblock, len);
if (ret != len) {
- DRM_DEBUG_KMS("failed to read a chunk (len %d, ret %d)\n",
- len, ret);
+ drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n",
+ len, ret);
return false;
}
ret = drm_dp_sideband_append_payload(msg, replyblock, len);
if (!ret) {
- DRM_DEBUG_KMS("failed to build sideband msg\n");
+ drm_dbg_kms(mgr->dev, "failed to build sideband msg\n");
return false;
}
@@ -3991,21 +4004,21 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
struct drm_dp_sideband_msg_hdr *hdr;
hdr = &msg->initial_hdr;
- DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n",
- mstb, hdr->seqno, hdr->lct, hdr->rad[0],
- msg->msg[0]);
+ drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n",
+ mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]);
goto out_clear_reply;
}
- drm_dp_sideband_parse_reply(msg, &txmsg->reply);
+ drm_dp_sideband_parse_reply(mgr, msg, &txmsg->reply);
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
- DRM_DEBUG_KMS("Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
- txmsg->reply.req_type,
- drm_dp_mst_req_type_str(txmsg->reply.req_type),
- txmsg->reply.u.nak.reason,
- drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
- txmsg->reply.u.nak.nak_data);
+ drm_dbg_kms(mgr->dev,
+ "Got NAK reply: req 0x%02x (%s), reason 0x%02x (%s), nak data 0x%02x\n",
+ txmsg->reply.req_type,
+ drm_dp_mst_req_type_str(txmsg->reply.req_type),
+ txmsg->reply.u.nak.reason,
+ drm_dp_mst_nak_reason_str(txmsg->reply.u.nak.reason),
+ txmsg->reply.u.nak.nak_data);
}
memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
@@ -4053,8 +4066,7 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
}
if (!mstb) {
- DRM_DEBUG_KMS("Got MST reply from unknown device %d\n",
- hdr->lct);
+ drm_dbg_kms(mgr->dev, "Got MST reply from unknown device %d\n", hdr->lct);
return false;
}
@@ -4114,12 +4126,12 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
INIT_LIST_HEAD(&up_req->next);
- drm_dp_sideband_parse_req(&mgr->up_req_recv, &up_req->msg);
+ drm_dp_sideband_parse_req(mgr, &mgr->up_req_recv, &up_req->msg);
if (up_req->msg.req_type != DP_CONNECTION_STATUS_NOTIFY &&
up_req->msg.req_type != DP_RESOURCE_STATUS_NOTIFY) {
- DRM_DEBUG_KMS("Received unknown up req type, ignoring: %x\n",
- up_req->msg.req_type);
+ drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",
+ up_req->msg.req_type);
kfree(up_req);
goto out;
}
@@ -4131,20 +4143,20 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
const struct drm_dp_connection_status_notify *conn_stat =
&up_req->msg.u.conn_stat;
- DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
- conn_stat->port_number,
- conn_stat->legacy_device_plug_status,
- conn_stat->displayport_device_plug_status,
- conn_stat->message_capability_status,
- conn_stat->input_port,
- conn_stat->peer_device_type);
+ drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
+ conn_stat->port_number,
+ conn_stat->legacy_device_plug_status,
+ conn_stat->displayport_device_plug_status,
+ conn_stat->message_capability_status,
+ conn_stat->input_port,
+ conn_stat->peer_device_type);
} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
const struct drm_dp_resource_status_notify *res_stat =
&up_req->msg.u.resource_stat;
- DRM_DEBUG_KMS("Got RSN: pn: %d avail_pbn %d\n",
- res_stat->port_number,
- res_stat->available_pbn);
+ drm_dbg_kms(mgr->dev, "Got RSN: pn: %d avail_pbn %d\n",
+ res_stat->port_number,
+ res_stat->available_pbn);
}
up_req->hdr = mgr->up_req_recv.initial_hdr;
@@ -4384,8 +4396,9 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
* which is an error
*/
if (WARN_ON(!prev_slots)) {
- DRM_ERROR("cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
- port);
+ drm_err(mgr->dev,
+ "cannot allocate and release VCPI on [MST PORT:%p] in the same state\n",
+ port);
return -EINVAL;
}
@@ -4402,12 +4415,12 @@ int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
req_slots = DIV_ROUND_UP(pbn, pbn_div);
- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
- port->connector->base.id, port->connector->name,
- port, prev_slots, req_slots);
- DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
- port->connector->base.id, port->connector->name,
- port, prev_bw, pbn);
+ drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] VCPI %d -> %d\n",
+ port->connector->base.id, port->connector->name,
+ port, prev_slots, req_slots);
+ drm_dbg_atomic(mgr->dev, "[CONNECTOR:%d:%s] [MST PORT:%p] PBN %d -> %d\n",
+ port->connector->base.id, port->connector->name,
+ port, prev_bw, pbn);
/* Add the new allocation to the state */
if (!vcpi) {
@@ -4471,12 +4484,12 @@ int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
}
}
if (WARN_ON(!found)) {
- DRM_ERROR("no VCPI for [MST PORT:%p] found in mst state %p\n",
- port, &topology_state->base);
+ drm_err(mgr->dev, "no VCPI for [MST PORT:%p] found in mst state %p\n",
+ port, &topology_state->base);
return -EINVAL;
}
- DRM_DEBUG_ATOMIC("[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
+ drm_dbg_atomic(mgr->dev, "[MST PORT:%p] VCPI %d -> 0\n", port, pos->vcpi);
if (pos->vcpi) {
drm_dp_mst_put_port_malloc(port);
pos->vcpi = 0;
@@ -4507,8 +4520,9 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
return false;
if (port->vcpi.vcpi > 0) {
- DRM_DEBUG_KMS("payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
- port->vcpi.vcpi, port->vcpi.pbn, pbn);
+ drm_dbg_kms(mgr->dev,
+ "payload: vcpi %d already allocated for pbn %d - requested pbn %d\n",
+ port->vcpi.vcpi, port->vcpi.pbn, pbn);
if (pbn == port->vcpi.pbn) {
drm_dp_mst_topology_put_port(port);
return true;
@@ -4517,13 +4531,12 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn, slots);
if (ret) {
- DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
- DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
+ drm_dbg_kms(mgr->dev, "failed to init vcpi slots=%d max=63 ret=%d\n",
+ DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
drm_dp_mst_topology_put_port(port);
goto out;
}
- DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
- pbn, port->vcpi.num_slots);
+ drm_dbg_kms(mgr->dev, "initing vcpi for pbn=%d slots=%d\n", pbn, port->vcpi.num_slots);
/* Keep port allocated until its payload has been removed */
drm_dp_mst_get_port_malloc(port);
@@ -4605,14 +4618,14 @@ static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
if (ret != 3) {
- DRM_DEBUG_KMS("failed to write payload allocation %d\n", ret);
+ drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret);
goto fail;
}
retry:
ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
if (ret < 0) {
- DRM_DEBUG_KMS("failed to read payload table status %d\n", ret);
+ drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret);
goto fail;
}
@@ -4622,7 +4635,8 @@ retry:
usleep_range(10000, 20000);
goto retry;
}
- DRM_DEBUG_KMS("status not set after read payload table status %d\n", status);
+ drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n",
+ status);
ret = -EINVAL;
goto fail;
}
@@ -4669,16 +4683,15 @@ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
status & DP_PAYLOAD_ACT_HANDLED || status < 0,
200, timeout_ms * USEC_PER_MSEC);
if (ret < 0 && status >= 0) {
- DRM_ERROR("Failed to get ACT after %dms, last status: %02x\n",
- timeout_ms, status);
+ drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n",
+ timeout_ms, status);
return -EINVAL;
} else if (status < 0) {
/*
* Failure here isn't unexpected - the hub may have
* just been unplugged
*/
- DRM_DEBUG_KMS("Failed to read payload table status: %d\n",
- status);
+ drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status);
return status;
}
@@ -5118,12 +5131,11 @@ drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
return 0;
if (mstb->port_parent)
- DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
- mstb->port_parent->parent, mstb->port_parent,
- mstb);
+ drm_dbg_atomic(mstb->mgr->dev,
+ "[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
+ mstb->port_parent->parent, mstb->port_parent, mstb);
else
- DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n",
- mstb);
+ drm_dbg_atomic(mstb->mgr->dev, "[MSTB:%p] Checking bandwidth limits\n", mstb);
list_for_each_entry(port, &mstb->ports, next) {
ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
@@ -5181,14 +5193,14 @@ drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
}
if (pbn_used > port->full_pbn) {
- DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
- port->parent, port, pbn_used,
- port->full_pbn);
+ drm_dbg_atomic(port->mgr->dev,
+ "[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
+ port->parent, port, pbn_used, port->full_pbn);
return -ENOSPC;
}
- DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
- port->parent, port, pbn_used, port->full_pbn);
+ drm_dbg_atomic(port->mgr->dev, "[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
+ port->parent, port, pbn_used, port->full_pbn);
return pbn_used;
}
@@ -5203,31 +5215,31 @@ drm_dp_mst_atomic_check_vcpi_alloc_limit(struct drm_dp_mst_topology_mgr *mgr,
list_for_each_entry(vcpi, &mst_state->vcpis, next) {
/* Releasing VCPI is always OK-even if the port is gone */
if (!vcpi->vcpi) {
- DRM_DEBUG_ATOMIC("[MST PORT:%p] releases all VCPI slots\n",
- vcpi->port);
+ drm_dbg_atomic(mgr->dev, "[MST PORT:%p] releases all VCPI slots\n",
+ vcpi->port);
continue;
}
- DRM_DEBUG_ATOMIC("[MST PORT:%p] requires %d vcpi slots\n",
- vcpi->port, vcpi->vcpi);
+ drm_dbg_atomic(mgr->dev, "[MST PORT:%p] requires %d vcpi slots\n",
+ vcpi->port, vcpi->vcpi);
avail_slots -= vcpi->vcpi;
if (avail_slots < 0) {
- DRM_DEBUG_ATOMIC("[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
- vcpi->port, mst_state,
- avail_slots + vcpi->vcpi);
+ drm_dbg_atomic(mgr->dev,
+ "[MST PORT:%p] not enough VCPI slots in mst state %p (avail=%d)\n",
+ vcpi->port, mst_state, avail_slots + vcpi->vcpi);
return -ENOSPC;
}
if (++payload_count > mgr->max_payloads) {
- DRM_DEBUG_ATOMIC("[MST MGR:%p] state %p has too many payloads (max=%d)\n",
- mgr, mst_state, mgr->max_payloads);
+ drm_dbg_atomic(mgr->dev,
+ "[MST MGR:%p] state %p has too many payloads (max=%d)\n",
+ mgr, mst_state, mgr->max_payloads);
return -EINVAL;
}
}
- DRM_DEBUG_ATOMIC("[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
- mgr, mst_state, avail_slots,
- 63 - avail_slots);
+ drm_dbg_atomic(mgr->dev, "[MST MGR:%p] mst state %p VCPI avail=%d used=%d\n",
+ mgr, mst_state, avail_slots, 63 - avail_slots);
return 0;
}
@@ -5284,8 +5296,8 @@ int drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state, struct drm
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- DRM_DEBUG_ATOMIC("[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
- mgr, crtc);
+ drm_dbg_atomic(mgr->dev, "[MST MGR:%p] Setting mode_changed flag on CRTC %p\n",
+ mgr, crtc);
crtc_state->mode_changed = true;
}
@@ -5330,21 +5342,24 @@ int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
}
if (!found) {
- DRM_DEBUG_ATOMIC("[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
- port, mst_state);
+ drm_dbg_atomic(state->dev,
+ "[MST PORT:%p] Couldn't find VCPI allocation in mst state %p\n",
+ port, mst_state);
return -EINVAL;
}
if (pos->dsc_enabled == enable) {
- DRM_DEBUG_ATOMIC("[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
- port, enable, pos->vcpi);
+ drm_dbg_atomic(state->dev,
+ "[MST PORT:%p] DSC flag is already set to %d, returning %d VCPI slots\n",
+ port, enable, pos->vcpi);
vcpi = pos->vcpi;
}
if (enable) {
vcpi = drm_dp_atomic_find_vcpi_slots(state, port->mgr, port, pbn, pbn_div);
- DRM_DEBUG_ATOMIC("[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
- port, vcpi);
+ drm_dbg_atomic(state->dev,
+ "[MST PORT:%p] Enabling DSC flag, reallocating %d VCPI slots on the port\n",
+ port, vcpi);
if (vcpi < 0)
return -EINVAL;
}
@@ -5438,14 +5453,17 @@ EXPORT_SYMBOL(drm_atomic_get_mst_topology_state);
* @aux: DP helper aux channel to talk to this device
* @max_dpcd_transaction_bytes: hw specific DPCD transaction limit
* @max_payloads: maximum number of payloads this GPU can source
+ * @max_lane_count: maximum number of lanes this GPU supports
+ * @max_link_rate: maximum link rate this GPU supports, units as in DPCD
* @conn_base_id: the connector object ID the MST device is connected to.
*
* Return 0 for success, or negative error code on failure
*/
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
struct drm_device *dev, struct drm_dp_aux *aux,
- int max_dpcd_transaction_bytes,
- int max_payloads, int conn_base_id)
+ int max_dpcd_transaction_bytes, int max_payloads,
+ u8 max_lane_count, u8 max_link_rate,
+ int conn_base_id)
{
struct drm_dp_mst_topology_state *mst_state;
@@ -5480,6 +5498,8 @@ int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
mgr->aux = aux;
mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes;
mgr->max_payloads = max_payloads;
+ mgr->max_lane_count = max_lane_count;
+ mgr->max_link_rate = max_link_rate;
mgr->conn_base_id = conn_base_id;
if (max_payloads + 1 > sizeof(mgr->payload_mask) * 8 ||
max_payloads + 1 > sizeof(mgr->vcpi_mask) * 8)
@@ -5691,7 +5711,7 @@ static int drm_dp_mst_i2c_xfer(struct i2c_adapter *adapter,
} else if (remote_i2c_write_ok(msgs, num)) {
ret = drm_dp_mst_i2c_write(mstb, port, msgs, num);
} else {
- DRM_DEBUG_KMS("Unsupported I2C transaction for MST device\n");
+ drm_dbg_kms(mgr->dev, "Unsupported I2C transaction for MST device\n");
ret = -EIO;
}
@@ -5886,14 +5906,13 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
port->parent == port->mgr->mst_primary) {
- u8 downstreamport;
+ u8 dpcd_ext[DP_RECEIVER_CAP_SIZE];
- if (drm_dp_dpcd_read(&port->aux, DP_DOWNSTREAMPORT_PRESENT,
- &downstreamport, 1) < 0)
+ if (drm_dp_read_dpcd_caps(port->mgr->aux, dpcd_ext) < 0)
return NULL;
- if ((downstreamport & DP_DWN_STRM_PORT_PRESENT) &&
- ((downstreamport & DP_DWN_STRM_PORT_TYPE_MASK)
+ if ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT) &&
+ ((dpcd_ext[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_TYPE_MASK)
!= DP_DWN_STRM_PORT_TYPE_ANALOG))
return port->mgr->aux;
}
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index c2f78dee9f2d..3d8d68a98b95 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -941,9 +941,7 @@ void drm_dev_unregister(struct drm_device *dev)
if (dev->driver->unload)
dev->driver->unload(dev);
- if (dev->agp)
- drm_pci_agp_destroy(dev);
-
+ drm_legacy_pci_agp_destroy(dev);
drm_legacy_rmmaps(dev);
remove_compat_control_link(dev);
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 7efbccffc2ea..d4f0bac6f8f8 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -774,19 +774,7 @@ void drm_event_cancel_free(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_event_cancel_free);
-/**
- * drm_send_event_helper - send DRM event to file descriptor
- * @dev: DRM device
- * @e: DRM event to deliver
- * @timestamp: timestamp to set for the fence event in kernel's CLOCK_MONOTONIC
- * time domain
- *
- * This helper function sends the event @e, initialized with
- * drm_event_reserve_init(), to its associated userspace DRM file.
- * The timestamp variant of dma_fence_signal is used when the caller
- * sends a valid timestamp.
- */
-void drm_send_event_helper(struct drm_device *dev,
+static void drm_send_event_helper(struct drm_device *dev,
struct drm_pending_event *e, ktime_t timestamp)
{
assert_spin_locked(&dev->event_lock);
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index c043ca364c86..0e885cd34107 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -52,6 +52,7 @@ EXPORT_SYMBOL(drm_fb_memcpy);
/**
* drm_fb_memcpy_dstclip - Copy clip buffer
* @dst: Destination buffer (iomem)
+ * @dst_pitch: Number of bytes between two consecutive scanlines within dst
* @vaddr: Source buffer
* @fb: DRM framebuffer
* @clip: Clip rectangle area to copy
@@ -59,12 +60,12 @@ EXPORT_SYMBOL(drm_fb_memcpy);
* This function applies clipping on dst, i.e. the destination is a
* full (iomem) framebuffer but only the clip rect content is copied over.
*/
-void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr,
- struct drm_framebuffer *fb,
+void drm_fb_memcpy_dstclip(void __iomem *dst, unsigned int dst_pitch,
+ void *vaddr, struct drm_framebuffer *fb,
struct drm_rect *clip)
{
unsigned int cpp = fb->format->cpp[0];
- unsigned int offset = clip_offset(clip, fb->pitches[0], cpp);
+ unsigned int offset = clip_offset(clip, dst_pitch, cpp);
size_t len = (clip->x2 - clip->x1) * cpp;
unsigned int y, lines = clip->y2 - clip->y1;
@@ -73,7 +74,7 @@ void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr,
for (y = 0; y < lines; y++) {
memcpy_toio(dst, vaddr, len);
vaddr += fb->pitches[0];
- dst += fb->pitches[0];
+ dst += dst_pitch;
}
}
EXPORT_SYMBOL(drm_fb_memcpy_dstclip);
@@ -343,3 +344,90 @@ void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
}
EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray8);
+/**
+ * drm_fb_blit_rect_dstclip - Copy parts of a framebuffer to display memory
+ * @dst: The display memory to copy to
+ * @dst_pitch: Number of bytes between two consecutive scanlines within dst
+ * @dst_format: FOURCC code of the display's color format
+ * @vmap: The framebuffer memory to copy from
+ * @fb: The framebuffer to copy from
+ * @clip: Clip rectangle area to copy
+ *
+ * This function copies parts of a framebuffer to display memory. If the
+ * formats of the display and the framebuffer mismatch, the blit function
+ * will attempt to convert between them.
+ *
+ * Use drm_fb_blit_dstclip() to copy the full framebuffer.
+ *
+ * Returns:
+ * 0 on success, or
+ * -EINVAL if the color-format conversion failed, or
+ * a negative error code otherwise.
+ */
+int drm_fb_blit_rect_dstclip(void __iomem *dst, unsigned int dst_pitch,
+ uint32_t dst_format, void *vmap,
+ struct drm_framebuffer *fb,
+ struct drm_rect *clip)
+{
+ uint32_t fb_format = fb->format->format;
+
+ /* treat alpha channel like filler bits */
+ if (fb_format == DRM_FORMAT_ARGB8888)
+ fb_format = DRM_FORMAT_XRGB8888;
+ if (dst_format == DRM_FORMAT_ARGB8888)
+ dst_format = DRM_FORMAT_XRGB8888;
+
+ if (dst_format == fb_format) {
+ drm_fb_memcpy_dstclip(dst, dst_pitch, vmap, fb, clip);
+ return 0;
+
+ } else if (dst_format == DRM_FORMAT_RGB565) {
+ if (fb_format == DRM_FORMAT_XRGB8888) {
+ drm_fb_xrgb8888_to_rgb565_dstclip(dst, dst_pitch,
+ vmap, fb, clip,
+ false);
+ return 0;
+ }
+ } else if (dst_format == DRM_FORMAT_RGB888) {
+ if (fb_format == DRM_FORMAT_XRGB8888) {
+ drm_fb_xrgb8888_to_rgb888_dstclip(dst, dst_pitch,
+ vmap, fb, clip);
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+EXPORT_SYMBOL(drm_fb_blit_rect_dstclip);
+
+/**
+ * drm_fb_blit_dstclip - Copy framebuffer to display memory
+ * @dst: The display memory to copy to
+ * @dst_pitch: Number of bytes between two consecutive scanlines within dst
+ * @dst_format: FOURCC code of the display's color format
+ * @vmap: The framebuffer memory to copy from
+ * @fb: The framebuffer to copy from
+ *
+ * This function copies a full framebuffer to display memory. If the formats
+ * of the display and the framebuffer mismatch, the copy function will
+ * attempt to convert between them.
+ *
+ * See drm_fb_blit_rect_dstclip() for more inforamtion.
+ *
+ * Returns:
+ * 0 on success, or a negative error code otherwise.
+ */
+int drm_fb_blit_dstclip(void __iomem *dst, unsigned int dst_pitch,
+ uint32_t dst_format, void *vmap,
+ struct drm_framebuffer *fb)
+{
+ struct drm_rect fullscreen = {
+ .x1 = 0,
+ .x2 = fb->width,
+ .y1 = 0,
+ .y2 = fb->height,
+ };
+ return drm_fb_blit_rect_dstclip(dst, dst_pitch, dst_format, vmap, fb,
+ &fullscreen);
+}
+EXPORT_SYMBOL(drm_fb_blit_dstclip);
diff --git a/drivers/gpu/drm/drm_gem_ttm_helper.c b/drivers/gpu/drm/drm_gem_ttm_helper.c
index de28720757af..b14bed8be771 100644
--- a/drivers/gpu/drm/drm_gem_ttm_helper.c
+++ b/drivers/gpu/drm/drm_gem_ttm_helper.c
@@ -114,5 +114,38 @@ int drm_gem_ttm_mmap(struct drm_gem_object *gem,
}
EXPORT_SYMBOL(drm_gem_ttm_mmap);
+/**
+ * drm_gem_ttm_dumb_map_offset() - Implements struct &drm_driver.dumb_map_offset
+ * @file: DRM file pointer.
+ * @dev: DRM device.
+ * @handle: GEM handle
+ * @offset: Returns the mapping's memory offset on success
+ *
+ * Provides an implementation of struct &drm_driver.dumb_map_offset for
+ * TTM-based GEM drivers. TTM allocates the offset internally and
+ * drm_gem_ttm_dumb_map_offset() returns it for dumb-buffer implementations.
+ *
+ * See struct &drm_driver.dumb_map_offset.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise.
+ */
+int drm_gem_ttm_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset)
+{
+ struct drm_gem_object *gem;
+
+ gem = drm_gem_object_lookup(file, handle);
+ if (!gem)
+ return -ENOENT;
+
+ *offset = drm_vma_node_offset_addr(&gem->vma_node);
+
+ drm_gem_object_put(gem);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_gem_ttm_dumb_map_offset);
+
MODULE_DESCRIPTION("DRM gem ttm helpers");
MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index 2b7c3a07956d..797200315854 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -245,22 +245,6 @@ void drm_gem_vram_put(struct drm_gem_vram_object *gbo)
}
EXPORT_SYMBOL(drm_gem_vram_put);
-/**
- * drm_gem_vram_mmap_offset() - Returns a GEM VRAM object's mmap offset
- * @gbo: the GEM VRAM object
- *
- * See drm_vma_node_offset_addr() for more information.
- *
- * Returns:
- * The buffer object's offset for userspace mappings on success, or
- * 0 if no offset is allocated.
- */
-u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo)
-{
- return drm_vma_node_offset_addr(&gbo->bo.base.vma_node);
-}
-EXPORT_SYMBOL(drm_gem_vram_mmap_offset);
-
static u64 drm_gem_vram_pg_offset(struct drm_gem_vram_object *gbo)
{
/* Keep TTM behavior for now, remove when drivers are audited */
@@ -638,38 +622,6 @@ int drm_gem_vram_driver_dumb_create(struct drm_file *file,
}
EXPORT_SYMBOL(drm_gem_vram_driver_dumb_create);
-/**
- * drm_gem_vram_driver_dumb_mmap_offset() - \
- Implements &struct drm_driver.dumb_mmap_offset
- * @file: DRM file pointer.
- * @dev: DRM device.
- * @handle: GEM handle
- * @offset: Returns the mapping's memory offset on success
- *
- * Returns:
- * 0 on success, or
- * a negative errno code otherwise.
- */
-int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset)
-{
- struct drm_gem_object *gem;
- struct drm_gem_vram_object *gbo;
-
- gem = drm_gem_object_lookup(file, handle);
- if (!gem)
- return -ENOENT;
-
- gbo = drm_gem_vram_of_gem(gem);
- *offset = drm_gem_vram_mmap_offset(gbo);
-
- drm_gem_object_put(gem);
-
- return 0;
-}
-EXPORT_SYMBOL(drm_gem_vram_driver_dumb_mmap_offset);
-
/*
* Helpers for struct drm_plane_helper_funcs
*/
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 1265de2b9d90..1dcb5797a3bb 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -56,7 +56,6 @@ void drm_lastclose(struct drm_device *dev);
/* drm_pci.c */
int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-void drm_pci_agp_destroy(struct drm_device *dev);
int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
#else
@@ -67,10 +66,6 @@ static inline int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
return -EINVAL;
}
-static inline void drm_pci_agp_destroy(struct drm_device *dev)
-{
-}
-
static inline int drm_pci_set_busid(struct drm_device *dev,
struct drm_master *master)
{
diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
index 33390f02f5eb..d29907955ff7 100644
--- a/drivers/gpu/drm/drm_ioc32.c
+++ b/drivers/gpu/drm/drm_ioc32.c
@@ -31,7 +31,6 @@
#include <linux/ratelimit.h>
#include <linux/export.h>
-#include <drm/drm_agpsupport.h>
#include <drm/drm_file.h>
#include <drm/drm_print.h>
@@ -619,6 +618,7 @@ static int compat_drm_dma(struct file *file, unsigned int cmd,
}
#endif
+#if IS_ENABLED(CONFIG_DRM_LEGACY)
#if IS_ENABLED(CONFIG_AGP)
typedef struct drm_agp_mode32 {
u32 mode; /**< AGP mode */
@@ -633,7 +633,7 @@ static int compat_drm_agp_enable(struct file *file, unsigned int cmd,
if (get_user(mode.mode, &argp->mode))
return -EFAULT;
- return drm_ioctl_kernel(file, drm_agp_enable_ioctl, &mode,
+ return drm_ioctl_kernel(file, drm_legacy_agp_enable_ioctl, &mode,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
@@ -659,7 +659,7 @@ static int compat_drm_agp_info(struct file *file, unsigned int cmd,
struct drm_agp_info info;
int err;
- err = drm_ioctl_kernel(file, drm_agp_info_ioctl, &info, DRM_AUTH);
+ err = drm_ioctl_kernel(file, drm_legacy_agp_info_ioctl, &info, DRM_AUTH);
if (err)
return err;
@@ -698,7 +698,7 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
request.size = req32.size;
request.type = req32.type;
- err = drm_ioctl_kernel(file, drm_agp_alloc_ioctl, &request,
+ err = drm_ioctl_kernel(file, drm_legacy_agp_alloc_ioctl, &request,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
if (err)
return err;
@@ -706,7 +706,7 @@ static int compat_drm_agp_alloc(struct file *file, unsigned int cmd,
req32.handle = request.handle;
req32.physical = request.physical;
if (copy_to_user(argp, &req32, sizeof(req32))) {
- drm_ioctl_kernel(file, drm_agp_free_ioctl, &request,
+ drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
return -EFAULT;
}
@@ -723,7 +723,7 @@ static int compat_drm_agp_free(struct file *file, unsigned int cmd,
if (get_user(request.handle, &argp->handle))
return -EFAULT;
- return drm_ioctl_kernel(file, drm_agp_free_ioctl, &request,
+ return drm_ioctl_kernel(file, drm_legacy_agp_free_ioctl, &request,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
@@ -744,7 +744,7 @@ static int compat_drm_agp_bind(struct file *file, unsigned int cmd,
request.handle = req32.handle;
request.offset = req32.offset;
- return drm_ioctl_kernel(file, drm_agp_bind_ioctl, &request,
+ return drm_ioctl_kernel(file, drm_legacy_agp_bind_ioctl, &request,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
@@ -757,12 +757,11 @@ static int compat_drm_agp_unbind(struct file *file, unsigned int cmd,
if (get_user(request.handle, &argp->handle))
return -EFAULT;
- return drm_ioctl_kernel(file, drm_agp_unbind_ioctl, &request,
+ return drm_ioctl_kernel(file, drm_legacy_agp_unbind_ioctl, &request,
DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY);
}
#endif /* CONFIG_AGP */
-#if IS_ENABLED(CONFIG_DRM_LEGACY)
typedef struct drm_scatter_gather32 {
u32 size; /**< In bytes -- will round to page boundary */
u32 handle; /**< Used for mapping / unmapping */
@@ -935,7 +934,6 @@ static struct {
DRM_IOCTL32_DEF(DRM_IOCTL_GET_SAREA_CTX, compat_drm_getsareactx),
DRM_IOCTL32_DEF(DRM_IOCTL_RES_CTX, compat_drm_resctx),
DRM_IOCTL32_DEF(DRM_IOCTL_DMA, compat_drm_dma),
-#endif
#if IS_ENABLED(CONFIG_AGP)
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_ENABLE, compat_drm_agp_enable),
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_INFO, compat_drm_agp_info),
@@ -944,6 +942,7 @@ static struct {
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_BIND, compat_drm_agp_bind),
DRM_IOCTL32_DEF(DRM_IOCTL_AGP_UNBIND, compat_drm_agp_unbind),
#endif
+#endif
#if IS_ENABLED(CONFIG_DRM_LEGACY)
DRM_IOCTL32_DEF(DRM_IOCTL_SG_ALLOC, compat_drm_sg_alloc),
DRM_IOCTL32_DEF(DRM_IOCTL_SG_FREE, compat_drm_sg_free),
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index d273d1a8603a..b0856c139693 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -33,7 +33,6 @@
#include <linux/pci.h>
#include <linux/uaccess.h>
-#include <drm/drm_agpsupport.h>
#include <drm/drm_auth.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
@@ -627,14 +626,21 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_CONTROL, drm_legacy_irq_control, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#if IS_ENABLED(CONFIG_AGP)
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_agp_acquire_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_agp_release_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_agp_enable_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_agp_info_ioctl, DRM_AUTH),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_agp_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_agp_free_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_agp_bind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
- DRM_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_agp_unbind_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ACQUIRE, drm_legacy_agp_acquire_ioctl,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_RELEASE, drm_legacy_agp_release_ioctl,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ENABLE, drm_legacy_agp_enable_ioctl,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_INFO, drm_legacy_agp_info_ioctl, DRM_AUTH),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_ALLOC, drm_legacy_agp_alloc_ioctl,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_FREE, drm_legacy_agp_free_ioctl,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_BIND, drm_legacy_agp_bind_ioctl,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+ DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_AGP_UNBIND, drm_legacy_agp_unbind_ioctl,
+ DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
#endif
DRM_LEGACY_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_legacy_sg_alloc, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
diff --git a/drivers/gpu/drm/drm_legacy.h b/drivers/gpu/drm/drm_legacy.h
index f71358f9eac9..7080d2538421 100644
--- a/drivers/gpu/drm/drm_legacy.h
+++ b/drivers/gpu/drm/drm_legacy.h
@@ -148,6 +148,30 @@ struct drm_agp_mem {
struct list_head head;
};
+/* drm_agpsupport.c */
+#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP)
+void drm_legacy_agp_clear(struct drm_device *dev);
+
+int drm_legacy_agp_acquire_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_legacy_agp_release_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_legacy_agp_enable_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_legacy_agp_info_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_legacy_agp_alloc_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_legacy_agp_free_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_legacy_agp_unbind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_legacy_agp_bind_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+#else
+static inline void drm_legacy_agp_clear(struct drm_device *dev) {}
+#endif
+
/* drm_lock.c */
#if IS_ENABLED(CONFIG_DRM_LEGACY)
int drm_legacy_lock(struct drm_device *d, void *v, struct drm_file *f);
@@ -211,4 +235,10 @@ void drm_master_legacy_init(struct drm_master *master);
static inline void drm_master_legacy_init(struct drm_master *master) {}
#endif
+#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_PCI)
+void drm_legacy_pci_agp_destroy(struct drm_device *dev);
+#else
+static inline void drm_legacy_pci_agp_destroy(struct drm_device *dev) {}
+#endif
+
#endif /* __DRM_LEGACY_H__ */
diff --git a/drivers/gpu/drm/drm_legacy_misc.c b/drivers/gpu/drm/drm_legacy_misc.c
index 8f54e6a78b6f..83db43b7a25e 100644
--- a/drivers/gpu/drm/drm_legacy_misc.c
+++ b/drivers/gpu/drm/drm_legacy_misc.c
@@ -33,7 +33,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/drm_agpsupport.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_irq.h>
diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c
index e4f20a2eb6e7..d2e1dccd8113 100644
--- a/drivers/gpu/drm/drm_memory.c
+++ b/drivers/gpu/drm/drm_memory.c
@@ -38,7 +38,6 @@
#include <linux/pci.h>
#include <linux/vmalloc.h>
-#include <drm/drm_agpsupport.h>
#include <drm/drm_cache.h>
#include <drm/drm_device.h>
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 73e4de3c7f49..ae53ea624c73 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1176,16 +1176,11 @@ enum drm_mode_status
drm_mode_validate_ycbcr420(const struct drm_display_mode *mode,
struct drm_connector *connector)
{
- u8 vic = drm_match_cea_mode(mode);
- enum drm_mode_status status = MODE_OK;
- struct drm_hdmi_info *hdmi = &connector->display_info.hdmi;
-
- if (test_bit(vic, hdmi->y420_vdb_modes)) {
- if (!connector->ycbcr_420_allowed)
- status = MODE_NO_420;
- }
+ if (!connector->ycbcr_420_allowed &&
+ drm_mode_is_420_only(&connector->display_info, mode))
+ return MODE_NO_420;
- return status;
+ return MODE_OK;
}
EXPORT_SYMBOL(drm_mode_validate_ycbcr420);
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 2294a1580d35..38c3cb72e7e6 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -30,7 +30,6 @@
#include <linux/slab.h>
#include <drm/drm.h>
-#include <drm/drm_agpsupport.h>
#include <drm/drm_drv.h>
#include <drm/drm_print.h>
@@ -41,64 +40,6 @@
/* List of devices hanging off drivers with stealth attach. */
static LIST_HEAD(legacy_dev_list);
static DEFINE_MUTEX(legacy_dev_list_lock);
-
-/**
- * drm_pci_alloc - Allocate a PCI consistent memory block, for DMA.
- * @dev: DRM device
- * @size: size of block to allocate
- * @align: alignment of block
- *
- * FIXME: This is a needless abstraction of the Linux dma-api and should be
- * removed.
- *
- * Return: A handle to the allocated memory block on success or NULL on
- * failure.
- */
-drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
-{
- drm_dma_handle_t *dmah;
-
- /* pci_alloc_consistent only guarantees alignment to the smallest
- * PAGE_SIZE order which is greater than or equal to the requested size.
- * Return NULL here for now to make sure nobody tries for larger alignment
- */
- if (align > size)
- return NULL;
-
- dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
- if (!dmah)
- return NULL;
-
- dmah->size = size;
- dmah->vaddr = dma_alloc_coherent(dev->dev, size,
- &dmah->busaddr,
- GFP_KERNEL);
-
- if (dmah->vaddr == NULL) {
- kfree(dmah);
- return NULL;
- }
-
- return dmah;
-}
-EXPORT_SYMBOL(drm_pci_alloc);
-
-/**
- * drm_pci_free - Free a PCI consistent memory block
- * @dev: DRM device
- * @dmah: handle to memory block
- *
- * FIXME: This is a needless abstraction of the Linux dma-api and should be
- * removed.
- */
-void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
-{
- dma_free_coherent(dev->dev, dmah->size, dmah->vaddr,
- dmah->busaddr);
- kfree(dmah);
-}
-
-EXPORT_SYMBOL(drm_pci_free);
#endif
static int drm_get_pci_domain(struct drm_device *dev)
@@ -177,7 +118,9 @@ int drm_legacy_irq_by_busid(struct drm_device *dev, void *data,
return drm_pci_irq_by_busid(dev, p);
}
-void drm_pci_agp_destroy(struct drm_device *dev)
+#ifdef CONFIG_DRM_LEGACY
+
+void drm_legacy_pci_agp_destroy(struct drm_device *dev)
{
if (dev->agp) {
arch_phys_wc_del(dev->agp->agp_mtrr);
@@ -187,13 +130,11 @@ void drm_pci_agp_destroy(struct drm_device *dev)
}
}
-#ifdef CONFIG_DRM_LEGACY
-
-static void drm_pci_agp_init(struct drm_device *dev)
+static void drm_legacy_pci_agp_init(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
if (pci_find_capability(to_pci_dev(dev->dev), PCI_CAP_ID_AGP))
- dev->agp = drm_agp_init(dev);
+ dev->agp = drm_legacy_agp_init(dev);
if (dev->agp) {
dev->agp->agp_mtrr = arch_phys_wc_add(
dev->agp->agp_info.aper_base,
@@ -203,9 +144,9 @@ static void drm_pci_agp_init(struct drm_device *dev)
}
}
-static int drm_get_pci_dev(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- const struct drm_driver *driver)
+static int drm_legacy_get_pci_dev(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ const struct drm_driver *driver)
{
struct drm_device *dev;
int ret;
@@ -220,7 +161,6 @@ static int drm_get_pci_dev(struct pci_dev *pdev,
if (ret)
goto err_free;
- dev->pdev = pdev;
#ifdef __alpha__
dev->hose = pdev->sysdata;
#endif
@@ -228,7 +168,7 @@ static int drm_get_pci_dev(struct pci_dev *pdev,
if (drm_core_check_feature(dev, DRIVER_MODESET))
pci_set_drvdata(pdev, dev);
- drm_pci_agp_init(dev);
+ drm_legacy_pci_agp_init(dev);
ret = drm_dev_register(dev, ent->driver_data);
if (ret)
@@ -243,7 +183,7 @@ static int drm_get_pci_dev(struct pci_dev *pdev,
return 0;
err_agp:
- drm_pci_agp_destroy(dev);
+ drm_legacy_pci_agp_destroy(dev);
pci_disable_device(pdev);
err_free:
drm_dev_put(dev);
@@ -290,7 +230,7 @@ int drm_legacy_pci_init(const struct drm_driver *driver,
/* stealth mode requires a manual probe */
pci_dev_get(pdev);
- drm_get_pci_dev(pdev, pid, driver);
+ drm_legacy_get_pci_dev(pdev, pid, driver);
}
}
return 0;
diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c
index 0dd43882fe7c..b373958ecb30 100644
--- a/drivers/gpu/drm/drm_plane.c
+++ b/drivers/gpu/drm/drm_plane.c
@@ -128,6 +128,13 @@
* pairs supported by this plane. The blob is a struct
* drm_format_modifier_blob. Without this property the plane doesn't
* support buffers with modifiers. Userspace cannot change this property.
+ *
+ * Note that userspace can check the &DRM_CAP_ADDFB2_MODIFIERS driver
+ * capability for general modifier support. If this flag is set then every
+ * plane will have the IN_FORMATS property, even when it only supports
+ * DRM_FORMAT_MOD_LINEAR. Before linux kernel release v5.1 there have been
+ * various bugs in this area with inconsistencies between the capability
+ * flag and per-plane properties.
*/
static unsigned int drm_num_planes(struct drm_device *dev)
@@ -277,8 +284,14 @@ static int __drm_universal_plane_init(struct drm_device *dev,
format_modifier_count++;
}
- if (format_modifier_count)
+ /* autoset the cap and check for consistency across all planes */
+ if (format_modifier_count) {
+ drm_WARN_ON(dev, !config->allow_fb_modifiers &&
+ !list_empty(&config->plane_list));
config->allow_fb_modifiers = true;
+ } else {
+ drm_WARN_ON(dev, config->allow_fb_modifiers);
+ }
plane->modifier_count = format_modifier_count;
plane->modifiers = kmalloc_array(format_modifier_count,
@@ -360,6 +373,9 @@ static int __drm_universal_plane_init(struct drm_device *dev,
* drm_universal_plane_init() to let the DRM managed resource infrastructure
* take care of cleanup and deallocation.
*
+ * Drivers supporting modifiers must set @format_modifiers on all their planes,
+ * even those that only support DRM_FORMAT_MOD_LINEAR.
+ *
* Returns:
* Zero on success, error code on failure.
*/
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 9b3b989d7cad..e957d4851dc0 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -45,8 +45,6 @@
#endif
#include <linux/mem_encrypt.h>
-
-#include <drm/drm_agpsupport.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 64370b634cca..79fa3649185c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -177,7 +177,5 @@ void exynos_drm_mode_config_init(struct drm_device *dev)
dev->mode_config.funcs = &exynos_drm_mode_config_funcs;
dev->mode_config.helper_private = &exynos_drm_mode_config_helpers;
- dev->mode_config.allow_fb_modifiers = true;
-
dev->mode_config.normalize_zpos = true;
}
diff --git a/drivers/gpu/drm/gma500/backlight.c b/drivers/gpu/drm/gma500/backlight.c
index 35600d070cb5..9e90258541a4 100644
--- a/drivers/gpu/drm/gma500/backlight.c
+++ b/drivers/gpu/drm/gma500/backlight.c
@@ -42,7 +42,7 @@ void gma_backlight_disable(struct drm_device *dev)
dev_priv->backlight_device->props.brightness = 0;
do_gma_backlight_set(dev);
}
-#endif
+#endif
}
void gma_backlight_set(struct drm_device *dev, int v)
@@ -54,7 +54,7 @@ void gma_backlight_set(struct drm_device *dev, int v)
dev_priv->backlight_device->props.brightness = v;
do_gma_backlight_set(dev);
}
-#endif
+#endif
}
int gma_backlight_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/cdv_intel_dp.c b/drivers/gpu/drm/gma500/cdv_intel_dp.c
index 6d3ada39ff86..595b765ecc71 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_dp.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_dp.c
@@ -245,7 +245,7 @@ i2c_dp_aux_add_bus(struct i2c_adapter *adapter)
if (W && !in_dbg_master()) msleep(W); \
} \
ret__; \
-})
+})
#define wait_for(COND, MS) _wait_for(COND, MS, 1)
@@ -386,7 +386,7 @@ static void cdv_intel_edp_panel_vdd_on(struct gma_encoder *intel_encoder)
if (intel_dp->panel_on) {
DRM_DEBUG_KMS("Skip VDD on because of panel on\n");
return;
- }
+ }
DRM_DEBUG_KMS("\n");
pp = REG_READ(PP_CONTROL);
@@ -433,7 +433,7 @@ static bool cdv_intel_edp_panel_on(struct gma_encoder *intel_encoder)
DRM_DEBUG_KMS("Error in Powering up eDP panel, status %x\n", REG_READ(PP_STATUS));
intel_dp->panel_on = false;
} else
- intel_dp->panel_on = true;
+ intel_dp->panel_on = true;
msleep(intel_dp->panel_power_up_delay);
return false;
@@ -449,7 +449,7 @@ static void cdv_intel_edp_panel_off (struct gma_encoder *intel_encoder)
pp = REG_READ(PP_CONTROL);
- if ((pp & POWER_TARGET_ON) == 0)
+ if ((pp & POWER_TARGET_ON) == 0)
return;
intel_dp->panel_on = false;
@@ -464,7 +464,7 @@ static void cdv_intel_edp_panel_off (struct gma_encoder *intel_encoder)
DRM_DEBUG_KMS("PP_STATUS %x\n", REG_READ(PP_STATUS));
if (wait_for((REG_READ(PP_STATUS) & idle_off_mask) == 0, 1000)) {
- DRM_DEBUG_KMS("Error in turning off Panel\n");
+ DRM_DEBUG_KMS("Error in turning off Panel\n");
}
msleep(intel_dp->panel_power_cycle_delay);
@@ -535,7 +535,7 @@ cdv_intel_dp_mode_valid(struct drm_connector *connector,
if (cdv_intel_dp_link_required(mode->clock, 24)
> cdv_intel_dp_max_data_rate(max_link_clock, max_lanes))
return MODE_CLOCK_HIGH;
-
+
}
if (mode->clock < 10000)
return MODE_CLOCK_LOW;
@@ -606,7 +606,7 @@ cdv_intel_dp_aux_ch(struct gma_encoder *encoder,
for (i = 0; i < send_bytes; i += 4)
REG_WRITE(ch_data + i,
pack_aux(send + i, send_bytes - i));
-
+
/* Send the command and wait for it to complete */
REG_WRITE(ch_ctl,
DP_AUX_CH_CTL_SEND_BUSY |
@@ -623,7 +623,7 @@ cdv_intel_dp_aux_ch(struct gma_encoder *encoder,
break;
udelay(100);
}
-
+
/* Clear done status and any errors */
REG_WRITE(ch_ctl,
status |
@@ -659,7 +659,7 @@ cdv_intel_dp_aux_ch(struct gma_encoder *encoder,
DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT);
if (recv_bytes > recv_size)
recv_bytes = recv_size;
-
+
for (i = 0; i < recv_bytes; i += 4)
unpack_aux(REG_READ(ch_data + i),
recv + i, recv_bytes - i);
@@ -870,7 +870,7 @@ cdv_intel_dp_i2c_init(struct gma_connector *connector,
ret = i2c_dp_aux_add_bus(&intel_dp->adapter);
if (is_edp(encoder))
cdv_intel_edp_panel_vdd_off(encoder);
-
+
return ret;
}
@@ -1291,13 +1291,13 @@ cdv_intel_get_adjust_train(struct gma_encoder *encoder)
if (this_p > p)
p = this_p;
}
-
+
if (v >= CDV_DP_VOLTAGE_MAX)
v = CDV_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
if (p == DP_TRAIN_PRE_EMPHASIS_MASK)
p |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
-
+
for (lane = 0; lane < 4; lane++)
intel_dp->train_set[lane] = v | p;
}
@@ -1358,7 +1358,6 @@ cdv_intel_dp_set_link_train(struct gma_encoder *encoder,
uint32_t dp_reg_value,
uint8_t dp_train_pat)
{
-
struct drm_device *dev = encoder->base.dev;
int ret;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1384,7 +1383,6 @@ static bool
cdv_intel_dplink_set_level(struct gma_encoder *encoder,
uint8_t dp_train_pat)
{
-
int ret;
struct cdv_intel_dp *intel_dp = encoder->dev_priv;
@@ -1462,7 +1460,7 @@ cdv_intel_dp_set_vswing_premph(struct gma_encoder *encoder, uint8_t signal_level
/* ;gfx_dpio_set_reg(0x8124, 0x00004000) */
index = 2 * premph + 1;
cdv_sb_write(dev, ddi_reg->PreEmph2, dp_vswing_premph_table[index]);
- return;
+ return;
}
@@ -1481,8 +1479,8 @@ cdv_intel_dp_start_link_train(struct gma_encoder *encoder)
DP |= DP_PORT_EN;
DP &= ~DP_LINK_TRAIN_MASK;
-
- reg = DP;
+
+ reg = DP;
reg |= DP_LINK_TRAIN_PAT_1;
/* Enable output, wait for it to become active */
REG_WRITE(intel_dp->output_reg, reg);
@@ -1556,7 +1554,7 @@ cdv_intel_dp_start_link_train(struct gma_encoder *encoder)
if (!clock_recovery) {
DRM_DEBUG_KMS("failure in DP patter 1 training, train set %x\n", intel_dp->train_set[0]);
}
-
+
intel_dp->DP = DP;
}
@@ -1747,7 +1745,7 @@ static int cdv_intel_dp_get_modes(struct drm_connector *connector)
if (is_edp(intel_encoder)) {
struct drm_device *dev = connector->dev;
struct drm_psb_private *dev_priv = dev->dev_private;
-
+
cdv_intel_edp_panel_vdd_off(intel_encoder);
if (ret) {
if (edp && !intel_dp->panel_fixed_mode) {
@@ -1942,11 +1940,11 @@ static void cdv_disable_intel_clock_gating(struct drm_device *dev)
DPCUNIT_CLOCK_GATE_DISABLE |
DPLSUNIT_CLOCK_GATE_DISABLE |
DPOUNIT_CLOCK_GATE_DISABLE |
- DPIOUNIT_CLOCK_GATE_DISABLE);
+ DPIOUNIT_CLOCK_GATE_DISABLE);
REG_WRITE(DSPCLK_GATE_D, reg_value);
- udelay(500);
+ udelay(500);
}
void
@@ -1990,7 +1988,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
gma_encoder->dev_priv=intel_dp;
intel_dp->encoder = gma_encoder;
intel_dp->output_reg = output_reg;
-
+
drm_encoder_helper_add(encoder, &cdv_intel_dp_helper_funcs);
drm_connector_helper_add(connector, &cdv_intel_dp_connector_helper_funcs);
@@ -2027,7 +2025,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
pp_on = REG_READ(PP_CONTROL);
pp_on &= ~PANEL_UNLOCK_MASK;
pp_on |= PANEL_UNLOCK_REGS;
-
+
REG_WRITE(PP_CONTROL, pp_on);
pwm_ctrl = REG_READ(BLC_PWM_CTL2);
@@ -2037,7 +2035,7 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
pp_on = REG_READ(PP_ON_DELAYS);
pp_off = REG_READ(PP_OFF_DELAYS);
pp_div = REG_READ(PP_DIVISOR);
-
+
/* Pull timing values out of registers */
cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >>
PANEL_POWER_UP_DELAY_SHIFT;
@@ -2085,9 +2083,9 @@ cdv_intel_dp_init(struct drm_device *dev, struct psb_intel_mode_device *mode_dev
goto err_connector;
} else {
DRM_DEBUG_KMS("DPCD: Rev=%x LN_Rate=%x LN_CNT=%x LN_DOWNSP=%x\n",
- intel_dp->dpcd[0], intel_dp->dpcd[1],
+ intel_dp->dpcd[0], intel_dp->dpcd[1],
intel_dp->dpcd[2], intel_dp->dpcd[3]);
-
+
}
/* The CDV reference driver moves pnale backlight setup into the displays that
have a backlight: this is a good idea and one we should probably adopt, however
diff --git a/drivers/gpu/drm/gma500/cdv_intel_lvds.c b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
index 5bff7d9e3aa6..8a2219fcf9b4 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_lvds.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_lvds.c
@@ -21,7 +21,7 @@
#include "psb_intel_drv.h"
#include "psb_intel_reg.h"
-/**
+/*
* LVDS I2C backlight control macros
*/
#define BRIGHTNESS_MAX_LEVEL 100
diff --git a/drivers/gpu/drm/gma500/intel_gmbus.c b/drivers/gpu/drm/gma500/intel_gmbus.c
index eb0924473a21..c17cbafa468a 100644
--- a/drivers/gpu/drm/gma500/intel_gmbus.c
+++ b/drivers/gpu/drm/gma500/intel_gmbus.c
@@ -379,7 +379,7 @@ static const struct i2c_algorithm gmbus_algorithm = {
};
/**
- * intel_gmbus_setup - instantiate all Intel i2c GMBuses
+ * gma_intel_setup_gmbus() - instantiate all Intel i2c GMBuses
* @dev: DRM device
*/
int gma_intel_setup_gmbus(struct drm_device *dev)
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 49afa577d442..d6e7c2c2c947 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -646,7 +646,7 @@ extern u32 psb_get_vblank_counter(struct drm_crtc *crtc);
extern int psbfb_probed(struct drm_device *dev);
extern int psbfb_remove(struct drm_device *dev,
struct drm_framebuffer *fb);
-/* accel_2d.c */
+/* psb_drv.c */
extern void psb_spank(struct drm_psb_private *dev_priv);
/* psb_reset.c */
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index de2f2d2dbc60..b65105585578 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -86,7 +86,7 @@ static inline u8 gud_from_fourcc(u32 fourcc)
return GUD_PIXEL_FORMAT_XRGB8888;
case DRM_FORMAT_ARGB8888:
return GUD_PIXEL_FORMAT_ARGB8888;
- };
+ }
return 0;
}
@@ -104,7 +104,7 @@ static inline u32 gud_to_fourcc(u8 format)
return DRM_FORMAT_XRGB8888;
case GUD_PIXEL_FORMAT_ARGB8888:
return DRM_FORMAT_ARGB8888;
- };
+ }
return 0;
}
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index abd6250d5a14..f4bc5386574a 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -14,6 +14,7 @@
#include <linux/module.h>
#include <linux/pci.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_gem_framebuffer_helper.h>
@@ -60,7 +61,7 @@ static const struct drm_driver hibmc_driver = {
.minor = 0,
.debugfs_init = drm_vram_mm_debugfs_init,
.dumb_create = hibmc_dumb_create,
- .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset,
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
.gem_prime_mmap = drm_gem_prime_mmap,
.irq_handler = hibmc_drm_interrupt,
};
@@ -313,8 +314,7 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
struct drm_device *dev;
int ret;
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
- "hibmcdrmfb");
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "hibmcdrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index 88250860f8e4..d78c82af367c 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -34,7 +34,6 @@
#include <linux/mman.h>
#include <linux/pci.h>
-#include <drm/drm_agpsupport.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
@@ -220,7 +219,7 @@ static int i810_dma_cleanup(struct drm_device *dev)
if (dev_priv->ring.virtual_start)
drm_legacy_ioremapfree(&dev_priv->ring.map, dev);
if (dev_priv->hw_status_page) {
- dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
+ dma_free_coherent(dev->dev, PAGE_SIZE,
dev_priv->hw_status_page,
dev_priv->dma_status_page);
}
@@ -398,7 +397,7 @@ static int i810_dma_initialize(struct drm_device *dev,
/* Program Hardware Status Page */
dev_priv->hw_status_page =
- dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
+ dma_alloc_coherent(dev->dev, PAGE_SIZE,
&dev_priv->dma_status_page, GFP_KERNEL);
if (!dev_priv->hw_status_page) {
dev->dev_private = (void *)dev_priv;
@@ -1197,7 +1196,9 @@ static int i810_flip_bufs(struct drm_device *dev, void *data,
int i810_driver_load(struct drm_device *dev, unsigned long flags)
{
- dev->agp = drm_agp_init(dev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ dev->agp = drm_legacy_agp_init(dev);
if (dev->agp) {
dev->agp->agp_mtrr = arch_phys_wc_add(
dev->agp->agp_info.aper_base,
@@ -1209,7 +1210,7 @@ int i810_driver_load(struct drm_device *dev, unsigned long flags)
if (!dev->agp)
return -EINVAL;
- pci_set_master(dev->pdev);
+ pci_set_master(pdev);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index 4fa389fce8cb..084da7a76b1c 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -109,16 +109,6 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
return -EINVAL;
}
-static bool blob_equal(const struct drm_property_blob *a,
- const struct drm_property_blob *b)
-{
- if (a && b)
- return a->length == b->length &&
- !memcmp(a->data, b->data, a->length);
-
- return !a == !b;
-}
-
int intel_digital_connector_atomic_check(struct drm_connector *conn,
struct drm_atomic_state *state)
{
@@ -149,8 +139,7 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
new_conn_state->base.picture_aspect_ratio != old_conn_state->base.picture_aspect_ratio ||
new_conn_state->base.content_type != old_conn_state->base.content_type ||
new_conn_state->base.scaling_mode != old_conn_state->base.scaling_mode ||
- !blob_equal(new_conn_state->base.hdr_output_metadata,
- old_conn_state->base.hdr_output_metadata))
+ !drm_connector_atomic_hdr_metadata_equal(old_state, new_state))
crtc_state->mode_changed = true;
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index d5ceb7bdc14b..9bed1ccecea0 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -282,14 +282,12 @@ void
intel_attach_hdmi_colorspace_property(struct drm_connector *connector)
{
if (!drm_mode_create_hdmi_colorspace_property(connector))
- drm_object_attach_property(&connector->base,
- connector->colorspace_property, 0);
+ drm_connector_attach_colorspace_property(connector);
}
void
intel_attach_dp_colorspace_property(struct drm_connector *connector)
{
if (!drm_mode_create_dp_colorspace_property(connector))
- drm_object_attach_property(&connector->base,
- connector->colorspace_property, 0);
+ drm_connector_attach_colorspace_property(connector);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 64e9107d70f7..9b9b538b0cb6 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -11705,8 +11705,6 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
mode_config->preferred_depth = 24;
mode_config->prefer_shadow = 1;
- mode_config->allow_fb_modifiers = true;
-
mode_config->funcs = &intel_mode_funcs;
mode_config->async_page_flip = has_async_flips(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index 7e83bc2cc34a..c4b446d6a042 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -682,6 +682,7 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
else
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
+ intel_dp->aux.drm_dev = &dev_priv->drm;
drm_dp_aux_init(&intel_dp->aux);
/* Failure to allocate our preferred name is not critical */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 02a003fd48fb..59efa9be3015 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -513,7 +513,7 @@ static void intel_dp_link_training_clock_recovery_delay(struct intel_dp *intel_d
enum drm_dp_phy dp_phy)
{
if (dp_phy == DP_PHY_DPRX)
- drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
+ drm_dp_link_train_clock_recovery_delay(&intel_dp->aux, intel_dp->dpcd);
else
drm_dp_lttpr_link_train_clock_recovery_delay();
}
@@ -665,11 +665,11 @@ intel_dp_link_training_channel_equalization_delay(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
if (dp_phy == DP_PHY_DPRX) {
- drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
+ drm_dp_link_train_channel_eq_delay(&intel_dp->aux, intel_dp->dpcd);
} else {
const u8 *phy_caps = intel_dp_lttpr_phy_caps(intel_dp, dp_phy);
- drm_dp_lttpr_link_train_channel_eq_delay(phy_caps);
+ drm_dp_lttpr_link_train_channel_eq_delay(&intel_dp->aux, phy_caps);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index 2daa3f67791e..f608c0cb98f4 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -70,7 +70,8 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
slots = drm_dp_atomic_find_vcpi_slots(state, &intel_dp->mst_mgr,
connector->port,
crtc_state->pbn,
- drm_dp_get_vc_payload_bw(crtc_state->port_clock,
+ drm_dp_get_vc_payload_bw(&intel_dp->mst_mgr,
+ crtc_state->port_clock,
crtc_state->lane_count));
if (slots == -EDEADLK)
return slots;
@@ -941,6 +942,8 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
struct intel_dp *intel_dp = &dig_port->dp;
enum port port = dig_port->base.port;
int ret;
+ int max_source_rate =
+ intel_dp->source_rates[intel_dp->num_source_rates - 1];
if (!HAS_DP_MST(i915) || intel_dp_is_edp(intel_dp))
return 0;
@@ -956,7 +959,10 @@ intel_dp_mst_encoder_init(struct intel_digital_port *dig_port, int conn_base_id)
/* create encoders */
intel_dp_create_fake_mst_encoders(dig_port);
ret = drm_dp_mst_topology_mgr_init(&intel_dp->mst_mgr, &i915->drm,
- &intel_dp->aux, 16, 3, conn_base_id);
+ &intel_dp->aux, 16, 3,
+ (u8)dig_port->max_lanes,
+ drm_dp_link_rate_to_bw_code(max_source_rate),
+ conn_base_id);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index d69f0a6dc26d..3c767bcc47b1 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -1251,8 +1251,7 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n",
enable ? "Enabling" : "Disabling");
- drm_dp_dual_mode_set_tmds_output(hdmi->dp_dual_mode.type,
- adapter, enable);
+ drm_dp_dual_mode_set_tmds_output(&dev_priv->drm, hdmi->dp_dual_mode.type, adapter, enable);
}
static int intel_hdmi_hdcp_read(struct intel_digital_port *dig_port,
@@ -2223,7 +2222,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
enum port port = hdmi_to_dig_port(hdmi)->base.port;
struct i2c_adapter *adapter =
intel_gmbus_get_adapter(dev_priv, hdmi->ddc_bus);
- enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(adapter);
+ enum drm_dp_dual_mode_type type = drm_dp_dual_mode_detect(&dev_priv->drm, adapter);
/*
* Type 1 DVI adaptors are not required to implement any
@@ -2256,7 +2255,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid)
hdmi->dp_dual_mode.type = type;
hdmi->dp_dual_mode.max_tmds_clock =
- drm_dp_dual_mode_max_tmds_clock(type, adapter);
+ drm_dp_dual_mode_max_tmds_clock(&dev_priv->drm, type, adapter);
drm_dbg_kms(&dev_priv->drm,
"DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
@@ -2460,8 +2459,7 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
drm_connector_attach_content_type_property(connector);
if (DISPLAY_VER(dev_priv) >= 10)
- drm_object_attach_property(&connector->base,
- connector->dev->mode_config.hdr_output_metadata_property, 0);
+ drm_connector_attach_hdr_output_metadata_property(connector);
if (!HAS_GMCH(dev_priv))
drm_connector_attach_max_bpc_property(connector, 8, 12);
diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c
index e4ff533e3a69..ec0048024746 100644
--- a/drivers/gpu/drm/i915/display/intel_lspcon.c
+++ b/drivers/gpu/drm/i915/display/intel_lspcon.c
@@ -139,10 +139,11 @@ void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon)
static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon)
{
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
enum drm_lspcon_mode current_mode;
- struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+ struct i2c_adapter *adapter = &intel_dp->aux.ddc;
- if (drm_lspcon_get_mode(adapter, &current_mode)) {
+ if (drm_lspcon_get_mode(intel_dp->aux.drm_dev, adapter, &current_mode)) {
DRM_DEBUG_KMS("Error reading LSPCON mode\n");
return DRM_LSPCON_MODE_INVALID;
}
@@ -175,11 +176,12 @@ out:
static int lspcon_change_mode(struct intel_lspcon *lspcon,
enum drm_lspcon_mode mode)
{
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
int err;
enum drm_lspcon_mode current_mode;
- struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+ struct i2c_adapter *adapter = &intel_dp->aux.ddc;
- err = drm_lspcon_get_mode(adapter, &current_mode);
+ err = drm_lspcon_get_mode(intel_dp->aux.drm_dev, adapter, &current_mode);
if (err) {
DRM_ERROR("Error reading LSPCON mode\n");
return err;
@@ -190,7 +192,7 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon,
return 0;
}
- err = drm_lspcon_set_mode(adapter, mode);
+ err = drm_lspcon_set_mode(intel_dp->aux.drm_dev, adapter, mode);
if (err < 0) {
DRM_ERROR("LSPCON mode change failed\n");
return err;
@@ -221,7 +223,8 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
{
int retry;
enum drm_dp_dual_mode_type adaptor_type;
- struct i2c_adapter *adapter = &lspcon_to_intel_dp(lspcon)->aux.ddc;
+ struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon);
+ struct i2c_adapter *adapter = &intel_dp->aux.ddc;
enum drm_lspcon_mode expected_mode;
expected_mode = lspcon_wake_native_aux_ch(lspcon) ?
@@ -232,7 +235,7 @@ static bool lspcon_probe(struct intel_lspcon *lspcon)
if (retry)
usleep_range(500, 1000);
- adaptor_type = drm_dp_dual_mode_detect(adapter);
+ adaptor_type = drm_dp_dual_mode_detect(intel_dp->aux.drm_dev, adapter);
if (adaptor_type == DRM_DP_DUAL_MODE_LSPCON)
break;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_region_lmem.c b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
index be6f2c8f5184..73fceb0c25fc 100644
--- a/drivers/gpu/drm/i915/gt/intel_region_lmem.c
+++ b/drivers/gpu/drm/i915/gt/intel_region_lmem.c
@@ -177,7 +177,7 @@ static struct intel_memory_region *setup_lmem(struct intel_gt *gt)
{
struct drm_i915_private *i915 = gt->i915;
struct intel_uncore *uncore = gt->uncore;
- struct pci_dev *pdev = i915->drm.pdev;
+ struct pci_dev *pdev = to_pci_dev(i915->drm.dev);
struct intel_memory_region *mem;
resource_size_t io_start;
resource_size_t lmem_size;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index c2329bc44f55..6d28eff6c493 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -39,6 +39,7 @@
#include <linux/vga_switcheroo.h>
#include <linux/vt.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
@@ -553,7 +554,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (ret)
goto err_perf;
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "inteldrmfb");
if (ret)
goto err_ggtt;
@@ -757,7 +758,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ERR(i915))
return i915;
- i915->drm.pdev = pdev;
pci_set_drvdata(pdev, i915);
/* Device parameters start as a copy of module parameters. */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.h b/drivers/gpu/drm/i915/intel_runtime_pm.h
index 1e4ddd11c12b..183ea2b187fe 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.h
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.h
@@ -49,7 +49,7 @@ enum i915_drm_suspend_mode {
*/
struct intel_runtime_pm {
atomic_t wakeref_count;
- struct device *kdev; /* points to i915->drm.pdev->dev */
+ struct device *kdev; /* points to i915->drm.dev */
bool available;
bool suspended;
bool irqs_enabled;
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 0188f877cab2..2a07a008de2e 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -146,7 +146,6 @@ struct drm_i915_private *mock_gem_device(void)
}
pci_set_drvdata(pdev, i915);
- i915->drm.pdev = pdev;
dev_pm_domain_set(&pdev->dev, &pm_domain);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/gpu/drm/imx/dcss/dcss-kms.c b/drivers/gpu/drm/imx/dcss/dcss-kms.c
index b549ce5e7607..37ae68a7fba5 100644
--- a/drivers/gpu/drm/imx/dcss/dcss-kms.c
+++ b/drivers/gpu/drm/imx/dcss/dcss-kms.c
@@ -52,7 +52,6 @@ static void dcss_kms_mode_config_init(struct dcss_kms_dev *kms)
config->min_height = 1;
config->max_width = 4096;
config->max_height = 4096;
- config->allow_fb_modifiers = true;
config->normalize_zpos = true;
config->funcs = &dcss_drm_mode_config_funcs;
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index e6a88c8cbd69..877d45eeb78e 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -210,7 +210,6 @@ static int imx_drm_bind(struct device *dev)
drm->mode_config.max_height = 4096;
drm->mode_config.funcs = &imx_drm_mode_config_funcs;
drm->mode_config.helper_private = &imx_drm_mode_config_helpers;
- drm->mode_config.allow_fb_modifiers = true;
drm->mode_config.normalize_zpos = true;
ret = drmm_mode_config_init(drm);
diff --git a/drivers/gpu/drm/kmb/kmb_dsi.c b/drivers/gpu/drm/kmb/kmb_dsi.c
index 4b5d82af84b3..231041b269f5 100644
--- a/drivers/gpu/drm/kmb/kmb_dsi.c
+++ b/drivers/gpu/drm/kmb/kmb_dsi.c
@@ -281,7 +281,7 @@ static u32 mipi_get_datatype_params(u32 data_type, u32 data_mode,
default:
DRM_ERROR("DSI: Invalid data_mode %d\n", data_mode);
return -EINVAL;
- };
+ }
break;
case DSI_LP_DT_PPS_YCBCR422_16B:
data_type_param.size_constraint_pixels = 2;
@@ -301,7 +301,7 @@ static u32 mipi_get_datatype_params(u32 data_type, u32 data_mode,
default:
DRM_ERROR("DSI: Invalid data_mode %d\n", data_mode);
return -EINVAL;
- };
+ }
break;
case DSI_LP_DT_LPPS_YCBCR422_20B:
case DSI_LP_DT_PPS_YCBCR422_24B:
@@ -318,7 +318,7 @@ static u32 mipi_get_datatype_params(u32 data_type, u32 data_mode,
default:
DRM_ERROR("DSI: Invalid data_mode %d\n", data_mode);
return -EINVAL;
- };
+ }
break;
case DSI_LP_DT_PPS_RGB565_16B:
data_type_param.size_constraint_pixels = 1;
@@ -337,7 +337,7 @@ static u32 mipi_get_datatype_params(u32 data_type, u32 data_mode,
default:
DRM_ERROR("DSI: Invalid data_mode %d\n", data_mode);
return -EINVAL;
- };
+ }
break;
case DSI_LP_DT_PPS_RGB666_18B:
data_type_param.size_constraint_pixels = 4;
@@ -361,7 +361,7 @@ static u32 mipi_get_datatype_params(u32 data_type, u32 data_mode,
default:
DRM_ERROR("DSI: Invalid data_type %d\n", data_type);
return -EINVAL;
- };
+ }
*params = data_type_param;
return 0;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 453d8b4c5763..66de3f4f7222 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -15,6 +15,7 @@
#include <linux/platform_device.h>
#include <linux/soc/amlogic/meson-canvas.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
@@ -156,23 +157,6 @@ static void meson_vpu_init(struct meson_drm *priv)
writel_relaxed(value, priv->io_base + _REG(VPU_WRARB_MODE_L2C1));
}
-static void meson_remove_framebuffers(void)
-{
- struct apertures_struct *ap;
-
- ap = alloc_apertures(1);
- if (!ap)
- return;
-
- /* The framebuffer can be located anywhere in RAM */
- ap->ranges[0].base = 0;
- ap->ranges[0].size = ~0;
-
- drm_fb_helper_remove_conflicting_framebuffers(ap, "meson-drm-fb",
- false);
- kfree(ap);
-}
-
struct meson_drm_soc_attr {
struct meson_drm_soc_limits limits;
const struct soc_device_attribute *attrs;
@@ -297,8 +281,13 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
}
}
- /* Remove early framebuffers (ie. simplefb) */
- meson_remove_framebuffers();
+ /*
+ * Remove early framebuffers (ie. simplefb). The framebuffer can be
+ * located anywhere in RAM
+ */
+ ret = drm_aperture_remove_framebuffers(false, "meson-drm-fb");
+ if (ret)
+ goto free_drm;
ret = drmm_mode_config_init(drm);
if (ret)
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index aad75a22dc33..2ed87cfdd735 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -1103,6 +1103,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
dw_plat_data->phy_data = meson_dw_hdmi;
dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
dw_plat_data->ycbcr_420_allowed = true;
+ dw_plat_data->disable_cec = true;
if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
diff --git a/drivers/gpu/drm/mga/mga_dma.c b/drivers/gpu/drm/mga/mga_dma.c
index 1cb7d120d18f..403efc1f1a7c 100644
--- a/drivers/gpu/drm/mga/mga_dma.c
+++ b/drivers/gpu/drm/mga/mga_dma.c
@@ -389,6 +389,7 @@ int mga_freelist_put(struct drm_device *dev, struct drm_buf *buf)
int mga_driver_load(struct drm_device *dev, unsigned long flags)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
drm_mga_private_t *dev_priv;
int ret;
@@ -400,9 +401,9 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
* device is 0x0021 (HB6 Universal PCI-PCI bridge), we reject the
* device.
*/
- if ((dev->pdev->device == 0x0525) && dev->pdev->bus->self
- && (dev->pdev->bus->self->vendor == 0x3388)
- && (dev->pdev->bus->self->device == 0x0021)
+ if ((pdev->device == 0x0525) && pdev->bus->self
+ && (pdev->bus->self->vendor == 0x3388)
+ && (pdev->bus->self->device == 0x0021)
&& dev->agp) {
/* FIXME: This should be quirked in the pci core, but oh well
* the hw probably stopped existing. */
@@ -419,10 +420,10 @@ int mga_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->usec_timeout = MGA_DEFAULT_USEC_TIMEOUT;
dev_priv->chipset = flags;
- pci_set_master(dev->pdev);
+ pci_set_master(pdev);
- dev_priv->mmio_base = pci_resource_start(dev->pdev, 1);
- dev_priv->mmio_size = pci_resource_len(dev->pdev, 1);
+ dev_priv->mmio_base = pci_resource_start(pdev, 1);
+ dev_priv->mmio_size = pci_resource_len(pdev, 1);
ret = drm_vblank_init(dev, 1);
@@ -468,20 +469,20 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
struct drm_agp_binding bind_req;
/* Acquire AGP. */
- err = drm_agp_acquire(dev);
+ err = drm_legacy_agp_acquire(dev);
if (err) {
DRM_ERROR("Unable to acquire AGP: %d\n", err);
return err;
}
- err = drm_agp_info(dev, &info);
+ err = drm_legacy_agp_info(dev, &info);
if (err) {
DRM_ERROR("Unable to get AGP info: %d\n", err);
return err;
}
mode.mode = (info.mode & ~0x07) | dma_bs->agp_mode;
- err = drm_agp_enable(dev, mode);
+ err = drm_legacy_agp_enable(dev, mode);
if (err) {
DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
return err;
@@ -501,7 +502,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
/* Allocate and bind AGP memory. */
agp_req.size = agp_size;
agp_req.type = 0;
- err = drm_agp_alloc(dev, &agp_req);
+ err = drm_legacy_agp_alloc(dev, &agp_req);
if (err) {
dev_priv->agp_size = 0;
DRM_ERROR("Unable to allocate %uMB AGP memory\n",
@@ -514,7 +515,7 @@ static int mga_do_agp_dma_bootstrap(struct drm_device *dev,
bind_req.handle = agp_req.handle;
bind_req.offset = 0;
- err = drm_agp_bind(dev, &bind_req);
+ err = drm_legacy_agp_bind(dev, &bind_req);
if (err) {
DRM_ERROR("Unable to bind AGP memory: %d\n", err);
return err;
@@ -971,10 +972,10 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
struct drm_agp_buffer free_req;
unbind_req.handle = dev_priv->agp_handle;
- drm_agp_unbind(dev, &unbind_req);
+ drm_legacy_agp_unbind(dev, &unbind_req);
free_req.handle = dev_priv->agp_handle;
- drm_agp_free(dev, &free_req);
+ drm_legacy_agp_free(dev, &free_req);
dev_priv->agp_textures = NULL;
dev_priv->agp_size = 0;
@@ -982,7 +983,7 @@ static int mga_do_cleanup_dma(struct drm_device *dev, int full_cleanup)
}
if ((dev->agp != NULL) && dev->agp->acquired)
- err = drm_agp_release(dev);
+ err = drm_legacy_agp_release(dev);
#endif
}
diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
index 66df51607896..84395d81ab9b 100644
--- a/drivers/gpu/drm/mga/mga_drv.h
+++ b/drivers/gpu/drm/mga/mga_drv.h
@@ -35,7 +35,6 @@
#include <linux/pci.h>
#include <linux/slab.h>
-#include <drm/drm_agpsupport.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
diff --git a/drivers/gpu/drm/mga/mga_state.c b/drivers/gpu/drm/mga/mga_state.c
index 0dec4062e5a2..5b7247b58451 100644
--- a/drivers/gpu/drm/mga/mga_state.c
+++ b/drivers/gpu/drm/mga/mga_state.c
@@ -1005,6 +1005,7 @@ int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
{
drm_mga_private_t *dev_priv = dev->dev_private;
drm_mga_getparam_t *param = data;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int value;
if (!dev_priv) {
@@ -1016,7 +1017,7 @@ int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
switch (param->param) {
case MGA_PARAM_IRQ_NR:
- value = dev->pdev->irq;
+ value = pdev->irq;
break;
case MGA_PARAM_CARD_TYPE:
value = dev_priv->chipset;
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 4e4c105f9a50..a701d9563257 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -11,6 +11,7 @@
#include <linux/pci.h>
#include <linux/vmalloc.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -341,7 +342,9 @@ mgag200_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct drm_device *dev;
int ret;
- drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "mgag200drmfb");
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "mgag200drmfb");
+ if (ret)
+ return ret;
ret = pcim_enable_device(pdev);
if (ret)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index cece3e57fb27..9d576240faed 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1554,7 +1554,7 @@ mgag200_handle_damage(struct mga_device *mdev, struct drm_framebuffer *fb,
{
void *vmap = map->vaddr; /* TODO: Use mapping abstraction properly */
- drm_fb_memcpy_dstclip(mdev->vram, vmap, fb, clip);
+ drm_fb_memcpy_dstclip(mdev->vram, fb->pitches[0], vmap, fb, clip);
/* Always scanout image at VRAM offset 0 */
mgag200_set_startadd(mdev, (u32)0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index 88e9cc38c13b..93bc3575bf53 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1020,11 +1020,6 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms->catalog->caps->max_mixer_width * 2;
dev->mode_config.max_height = 4096;
- /*
- * Support format modifiers for compression etc.
- */
- dev->mode_config.allow_fb_modifiers = true;
-
dev->max_vblank_count = 0xffffffff;
/* Disable vblank irqs aggressively for power-saving */
dev->vblank_disable_immediate = true;
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 3d729270bde1..4a5b518288b0 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -88,8 +88,6 @@ static int mdp4_hw_init(struct msm_kms *kms)
if (mdp4_kms->rev > 1)
mdp4_write(mdp4_kms, REG_MDP4_RESET_STATUS, 1);
- dev->mode_config.allow_fb_modifiers = true;
-
out:
pm_runtime_put_sync(dev->dev);
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
index 9aecca919f24..49bdabea8ed5 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_plane.c
@@ -349,6 +349,12 @@ enum mdp4_pipe mdp4_plane_pipe(struct drm_plane *plane)
return mdp4_plane->pipe;
}
+static const uint64_t supported_format_modifiers[] = {
+ DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
/* initialize plane */
struct drm_plane *mdp4_plane_init(struct drm_device *dev,
enum mdp4_pipe pipe_id, bool private_plane)
@@ -377,7 +383,7 @@ struct drm_plane *mdp4_plane_init(struct drm_device *dev,
type = private_plane ? DRM_PLANE_TYPE_PRIMARY : DRM_PLANE_TYPE_OVERLAY;
ret = drm_universal_plane_init(dev, plane, 0xff, &mdp4_plane_funcs,
mdp4_plane->formats, mdp4_plane->nformats,
- NULL, type, NULL);
+ supported_format_modifiers, type, NULL);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 1390f3547fde..2cebd17a7289 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -1103,7 +1103,7 @@ static int dp_ctrl_link_train_1(struct dp_ctrl_private *ctrl,
tries = 0;
old_v_level = ctrl->link->phy_params.v_level;
for (tries = 0; tries < maximum_retries; tries++) {
- drm_dp_link_train_clock_recovery_delay(ctrl->panel->dpcd);
+ drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd);
ret = dp_ctrl_read_link_status(ctrl, link_status);
if (ret)
@@ -1184,7 +1184,7 @@ static int dp_ctrl_link_lane_down_shift(struct dp_ctrl_private *ctrl)
static void dp_ctrl_clear_training_pattern(struct dp_ctrl_private *ctrl)
{
dp_ctrl_train_pattern_set(ctrl, DP_TRAINING_PATTERN_DISABLE);
- drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
+ drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
}
static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
@@ -1215,7 +1215,7 @@ static int dp_ctrl_link_train_2(struct dp_ctrl_private *ctrl,
dp_ctrl_train_pattern_set(ctrl, pattern | DP_RECOVERED_CLOCK_OUT_EN);
for (tries = 0; tries <= maximum_retries; tries++) {
- drm_dp_link_train_channel_eq_delay(ctrl->panel->dpcd);
+ drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd);
ret = dp_ctrl_read_link_status(ctrl, link_status);
if (ret)
diff --git a/drivers/gpu/drm/msm/edp/edp.h b/drivers/gpu/drm/msm/edp/edp.h
index eb34243dad53..8590f2ce274d 100644
--- a/drivers/gpu/drm/msm/edp/edp.h
+++ b/drivers/gpu/drm/msm/edp/edp.h
@@ -46,8 +46,7 @@ void edp_bridge_destroy(struct drm_bridge *bridge);
struct drm_connector *msm_edp_connector_init(struct msm_edp *edp);
/* AUX */
-void *msm_edp_aux_init(struct device *dev, void __iomem *regbase,
- struct drm_dp_aux **drm_aux);
+void *msm_edp_aux_init(struct msm_edp *edp, void __iomem *regbase, struct drm_dp_aux **drm_aux);
void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux);
irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr);
void msm_edp_aux_ctrl(struct edp_aux *aux, int enable);
diff --git a/drivers/gpu/drm/msm/edp/edp_aux.c b/drivers/gpu/drm/msm/edp/edp_aux.c
index df10a0196d94..e3d85c622cfb 100644
--- a/drivers/gpu/drm/msm/edp/edp_aux.c
+++ b/drivers/gpu/drm/msm/edp/edp_aux.c
@@ -184,9 +184,9 @@ unlock_exit:
return ret;
}
-void *msm_edp_aux_init(struct device *dev, void __iomem *regbase,
- struct drm_dp_aux **drm_aux)
+void *msm_edp_aux_init(struct msm_edp *edp, void __iomem *regbase, struct drm_dp_aux **drm_aux)
{
+ struct device *dev = &edp->pdev->dev;
struct edp_aux *aux = NULL;
int ret;
@@ -201,6 +201,7 @@ void *msm_edp_aux_init(struct device *dev, void __iomem *regbase,
aux->drm_aux.name = "msm_edp_aux";
aux->drm_aux.dev = dev;
+ aux->drm_aux.drm_dev = edp->dev;
aux->drm_aux.transfer = edp_aux_transfer;
ret = drm_dp_aux_register(&aux->drm_aux);
if (ret) {
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
index 0d9657cc70db..4fb397ee7c84 100644
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ b/drivers/gpu/drm/msm/edp/edp_ctrl.c
@@ -608,7 +608,7 @@ static int edp_start_link_train_1(struct edp_ctrl *ctrl)
tries = 0;
old_v_level = ctrl->v_level;
while (1) {
- drm_dp_link_train_clock_recovery_delay(ctrl->dpcd);
+ drm_dp_link_train_clock_recovery_delay(ctrl->drm_aux, ctrl->dpcd);
rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status);
if (rlen < DP_LINK_STATUS_SIZE) {
@@ -665,7 +665,7 @@ static int edp_start_link_train_2(struct edp_ctrl *ctrl)
return ret;
while (1) {
- drm_dp_link_train_channel_eq_delay(ctrl->dpcd);
+ drm_dp_link_train_channel_eq_delay(ctrl->drm_aux, ctrl->dpcd);
rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status);
if (rlen < DP_LINK_STATUS_SIZE) {
@@ -743,7 +743,7 @@ static int edp_clear_training_pattern(struct edp_ctrl *ctrl)
ret = edp_train_pattern_set_write(ctrl, 0);
- drm_dp_link_train_channel_eq_delay(ctrl->dpcd);
+ drm_dp_link_train_channel_eq_delay(ctrl->drm_aux, ctrl->dpcd);
return ret;
}
@@ -1153,7 +1153,7 @@ int msm_edp_ctrl_init(struct msm_edp *edp)
}
/* Init aux and phy */
- ctrl->aux = msm_edp_aux_init(dev, ctrl->base, &ctrl->drm_aux);
+ ctrl->aux = msm_edp_aux_init(edp, ctrl->base, &ctrl->drm_aux);
if (!ctrl->aux || !ctrl->drm_aux) {
pr_err("%s:failed to init aux\n", __func__);
return -ENOMEM;
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 678dba1725a6..227404077e39 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -4,6 +4,7 @@
* Author: Rob Clark <robdclark@gmail.com>
*/
+#include <drm/drm_aperture.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
@@ -168,7 +169,9 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
}
/* the fw fb could be anywhere in memory */
- drm_fb_helper_remove_conflicting_framebuffers(NULL, "msm", false);
+ ret = drm_aperture_remove_framebuffers(false, "msm");
+ if (ret)
+ goto fini;
ret = drm_fb_helper_initial_config(helper, 32);
if (ret)
diff --git a/drivers/gpu/drm/mxsfb/Kconfig b/drivers/gpu/drm/mxsfb/Kconfig
index 0143d539f8f8..ee22cd25d3e3 100644
--- a/drivers/gpu/drm/mxsfb/Kconfig
+++ b/drivers/gpu/drm/mxsfb/Kconfig
@@ -10,7 +10,6 @@ config DRM_MXSFB
depends on COMMON_CLK
select DRM_MXS
select DRM_KMS_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_CMA_HELPER
select DRM_PANEL
select DRM_PANEL_BRIDGE
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 1c9c0cdf85db..c46d0374b6e6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1617,7 +1617,8 @@ nv50_mstm_new(struct nouveau_encoder *outp, struct drm_dp_aux *aux, int aux_max,
mstm->mgr.cbs = &nv50_mstm;
ret = drm_dp_mst_topology_mgr_init(&mstm->mgr, dev, aux, aux_max,
- max_payloads, conn_base_id);
+ (u8)max_payloads, outp->dcb->dpconf.link_nr,
+ (u8)outp->dcb->dpconf.link_bw, conn_base_id);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 3e09df0472ce..7a2624c0ba4c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -302,7 +302,6 @@ nouveau_bo_init(struct nouveau_bo *nvbo, u64 size, int align, u32 domain,
int type = sg ? ttm_bo_type_sg : ttm_bo_type_device;
int ret;
- nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, domain, 0);
INIT_LIST_HEAD(&nvbo->io_reserve_lru);
@@ -364,12 +363,12 @@ static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
{
struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
- u32 vram_pages = drm->client.device.info.ram_size >> PAGE_SHIFT;
+ u64 vram_size = drm->client.device.info.ram_size;
unsigned i, fpfn, lpfn;
if (drm->client.device.info.family == NV_DEVICE_INFO_V0_CELSIUS &&
nvbo->mode && (domain & NOUVEAU_GEM_DOMAIN_VRAM) &&
- nvbo->bo.mem.num_pages < vram_pages / 4) {
+ nvbo->bo.base.size < vram_size / 4) {
/*
* Make sure that the color and depth buffers are handled
* by independent memory controller units. Up to a 9x
@@ -377,11 +376,11 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t domain)
* at the same time.
*/
if (nvbo->zeta) {
- fpfn = vram_pages / 2;
+ fpfn = (vram_size / 2) >> PAGE_SHIFT;
lpfn = ~0;
} else {
fpfn = 0;
- lpfn = vram_pages / 2;
+ lpfn = (vram_size / 2) >> PAGE_SHIFT;
}
for (i = 0; i < nvbo->placement.num_placement; ++i) {
nvbo->placements[i].fpfn = fpfn;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 61e6d7412505..7f38788a6c2b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -401,7 +401,6 @@ nouveau_connector_destroy(struct drm_connector *connector)
drm_connector_cleanup(connector);
if (nv_connector->aux.transfer) {
drm_dp_cec_unregister_connector(&nv_connector->aux);
- drm_dp_aux_unregister(&nv_connector->aux);
kfree(nv_connector->aux.name);
}
kfree(connector);
@@ -905,13 +904,29 @@ nouveau_connector_late_register(struct drm_connector *connector)
int ret;
ret = nouveau_backlight_init(connector);
+ if (ret)
+ return ret;
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ ret = drm_dp_aux_register(&nouveau_connector(connector)->aux);
+ if (ret)
+ goto backlight_fini;
+ }
+
+ return 0;
+backlight_fini:
+ nouveau_backlight_fini(connector);
return ret;
}
static void
nouveau_connector_early_unregister(struct drm_connector *connector)
{
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP ||
+ connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort)
+ drm_dp_aux_unregister(&nouveau_connector(connector)->aux);
+
nouveau_backlight_fini(connector);
}
@@ -1339,18 +1354,19 @@ nouveau_connector_create(struct drm_device *dev,
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
nv_connector->aux.dev = connector->kdev;
+ nv_connector->aux.drm_dev = dev;
nv_connector->aux.transfer = nouveau_connector_aux_xfer;
snprintf(aux_name, sizeof(aux_name), "sor-%04x-%04x",
dcbe->hasht, dcbe->hashm);
nv_connector->aux.name = kstrdup(aux_name, GFP_KERNEL);
- ret = drm_dp_aux_register(&nv_connector->aux);
+ drm_dp_aux_init(&nv_connector->aux);
if (ret) {
- NV_ERROR(drm, "failed to register aux channel\n");
+ NV_ERROR(drm, "Failed to init AUX adapter for sor-%04x-%04x: %d\n",
+ dcbe->hasht, dcbe->hashm, ret);
kfree(nv_connector);
return ERR_PTR(ret);
}
- funcs = &nouveau_connector_funcs;
- break;
+ fallthrough;
default:
funcs = &nouveau_connector_funcs;
break;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index dac02c7be54d..929de41c281f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -697,7 +697,6 @@ nouveau_display_create(struct drm_device *dev)
dev->mode_config.preferred_depth = 24;
dev->mode_config.prefer_shadow = 1;
- dev->mode_config.allow_fb_modifiers = true;
if (drm->client.device.info.chipset < 0x11)
dev->mode_config.async_page_flip = false;
@@ -838,21 +837,3 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
drm_gem_object_put(&bo->bo.base);
return ret;
}
-
-int
-nouveau_display_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle, uint64_t *poffset)
-{
- struct drm_gem_object *gem;
-
- gem = drm_gem_object_lookup(file_priv, handle);
- if (gem) {
- struct nouveau_bo *bo = nouveau_gem_object(gem);
- *poffset = drm_vma_node_offset_addr(&bo->bo.base.vma_node);
- drm_gem_object_put(gem);
- return 0;
- }
-
- return -ENOENT;
-}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 616c43427059..2ab2ddb1eadf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -58,8 +58,6 @@ bool nouveau_display_scanoutpos(struct drm_crtc *crtc,
int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
struct drm_mode_create_dumb *args);
-int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
- u32 handle, u64 *offset);
void nouveau_hdmi_mode_set(struct drm_encoder *, struct drm_display_mode *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 885815ea917f..3204fc0a90d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -30,7 +30,9 @@
#include <linux/vga_switcheroo.h>
#include <linux/mmu_notifier.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_vblank.h>
@@ -736,7 +738,7 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
nvkm_device_del(&device);
/* Remove conflicting drivers (vesafb, efifb etc). */
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "nouveaufb");
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "nouveaufb");
if (ret)
return ret;
@@ -1212,7 +1214,7 @@ driver_stub = {
.gem_prime_import_sg_table = nouveau_gem_prime_import_sg_table,
.dumb_create = nouveau_display_dumb_create,
- .dumb_map_offset = nouveau_display_dumb_map_offset,
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 4fc0fa696461..93ac78bda750 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -379,10 +379,10 @@ nouveau_fbcon_create(struct drm_fb_helper *helper,
FBINFO_HWACCEL_IMAGEBLIT;
info->fbops = &nouveau_fbcon_sw_ops;
info->fix.smem_start = nvbo->bo.mem.bus.offset;
- info->fix.smem_len = nvbo->bo.mem.num_pages << PAGE_SHIFT;
+ info->fix.smem_len = nvbo->bo.base.size;
info->screen_base = nvbo_kmap_obj_iovirtual(nvbo);
- info->screen_size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
+ info->screen_size = nvbo->bo.base.size;
drm_fb_helper_fill_info(info, &fbcon->helper, sizes);
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index c88cbb85f101..a70e82413fa7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -253,7 +253,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem,
rep->offset = vma->addr;
}
- rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
+ rep->size = nvbo->bo.base.size;
rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.base.vma_node);
rep->tile_mode = nvbo->mode;
rep->tile_flags = nvbo->contig ? 0 : NOUVEAU_GEM_TILE_NONCONTIG;
@@ -638,7 +638,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv;
if (unlikely(r->reloc_bo_offset + 4 >
- nvbo->bo.mem.num_pages << PAGE_SHIFT)) {
+ nvbo->bo.base.size)) {
NV_PRINTK(err, cli, "reloc outside of bo\n");
ret = -EINVAL;
break;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index 4894913936e9..ef87d92cdf49 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -80,6 +80,7 @@ config DRM_PANEL_SIMPLE
tristate "support for simple panels"
depends on OF
depends on BACKLIGHT_CLASS_DEVICE
+ depends on PM
select VIDEOMODE_HELPERS
help
DRM panel driver for dumb panels that need at most a regulator and
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index be312b5c04dd..9be050ab372f 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -27,6 +27,7 @@
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <video/display_timing.h>
@@ -175,6 +176,8 @@ struct panel_simple {
bool enabled;
bool no_hpd;
+ bool prepared;
+
ktime_t prepared_time;
ktime_t unprepared_time;
@@ -186,6 +189,8 @@ struct panel_simple {
struct gpio_desc *enable_gpio;
struct gpio_desc *hpd_gpio;
+ struct edid *edid;
+
struct drm_display_mode override_mode;
enum drm_panel_orientation orientation;
@@ -334,25 +339,39 @@ static int panel_simple_disable(struct drm_panel *panel)
return 0;
}
+static int panel_simple_suspend(struct device *dev)
+{
+ struct panel_simple *p = dev_get_drvdata(dev);
+
+ gpiod_set_value_cansleep(p->enable_gpio, 0);
+ regulator_disable(p->supply);
+ p->unprepared_time = ktime_get();
+
+ kfree(p->edid);
+ p->edid = NULL;
+
+ return 0;
+}
+
static int panel_simple_unprepare(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
+ int ret;
- if (p->prepared_time == 0)
+ /* Unpreparing when already unprepared is a no-op */
+ if (!p->prepared)
return 0;
- gpiod_set_value_cansleep(p->enable_gpio, 0);
-
- regulator_disable(p->supply);
-
- p->prepared_time = 0;
- p->unprepared_time = ktime_get();
+ pm_runtime_mark_last_busy(panel->dev);
+ ret = pm_runtime_put_autosuspend(panel->dev);
+ if (ret < 0)
+ return ret;
+ p->prepared = false;
return 0;
}
-static int panel_simple_get_hpd_gpio(struct device *dev,
- struct panel_simple *p, bool from_probe)
+static int panel_simple_get_hpd_gpio(struct device *dev, struct panel_simple *p)
{
int err;
@@ -360,38 +379,28 @@ static int panel_simple_get_hpd_gpio(struct device *dev,
if (IS_ERR(p->hpd_gpio)) {
err = PTR_ERR(p->hpd_gpio);
- /*
- * If we're called from probe we won't consider '-EPROBE_DEFER'
- * to be an error--we'll leave the error code in "hpd_gpio".
- * When we try to use it we'll try again. This allows for
- * circular dependencies where the component providing the
- * hpd gpio needs the panel to init before probing.
- */
- if (err != -EPROBE_DEFER || !from_probe) {
+ if (err != -EPROBE_DEFER)
dev_err(dev, "failed to get 'hpd' GPIO: %d\n", err);
- return err;
- }
+
+ return err;
}
return 0;
}
-static int panel_simple_prepare_once(struct drm_panel *panel)
+static int panel_simple_prepare_once(struct panel_simple *p)
{
- struct panel_simple *p = to_panel_simple(panel);
+ struct device *dev = p->base.dev;
unsigned int delay;
int err;
int hpd_asserted;
unsigned long hpd_wait_us;
- if (p->prepared_time != 0)
- return 0;
-
panel_simple_wait(p->unprepared_time, p->desc->delay.unprepare);
err = regulator_enable(p->supply);
if (err < 0) {
- dev_err(panel->dev, "failed to enable supply: %d\n", err);
+ dev_err(dev, "failed to enable supply: %d\n", err);
return err;
}
@@ -404,12 +413,6 @@ static int panel_simple_prepare_once(struct drm_panel *panel)
msleep(delay);
if (p->hpd_gpio) {
- if (IS_ERR(p->hpd_gpio)) {
- err = panel_simple_get_hpd_gpio(panel->dev, p, false);
- if (err)
- goto error;
- }
-
if (p->desc->delay.hpd_absent_delay)
hpd_wait_us = p->desc->delay.hpd_absent_delay * 1000UL;
else
@@ -423,7 +426,7 @@ static int panel_simple_prepare_once(struct drm_panel *panel)
if (err) {
if (err != -ETIMEDOUT)
- dev_err(panel->dev,
+ dev_err(dev,
"error waiting for hpd GPIO: %d\n", err);
goto error;
}
@@ -447,25 +450,46 @@ error:
*/
#define MAX_PANEL_PREPARE_TRIES 5
-static int panel_simple_prepare(struct drm_panel *panel)
+static int panel_simple_resume(struct device *dev)
{
+ struct panel_simple *p = dev_get_drvdata(dev);
int ret;
int try;
for (try = 0; try < MAX_PANEL_PREPARE_TRIES; try++) {
- ret = panel_simple_prepare_once(panel);
+ ret = panel_simple_prepare_once(p);
if (ret != -ETIMEDOUT)
break;
}
if (ret == -ETIMEDOUT)
- dev_err(panel->dev, "Prepare timeout after %d tries\n", try);
+ dev_err(dev, "Prepare timeout after %d tries\n", try);
else if (try)
- dev_warn(panel->dev, "Prepare needed %d retries\n", try);
+ dev_warn(dev, "Prepare needed %d retries\n", try);
return ret;
}
+static int panel_simple_prepare(struct drm_panel *panel)
+{
+ struct panel_simple *p = to_panel_simple(panel);
+ int ret;
+
+ /* Preparing when already prepared is a no-op */
+ if (p->prepared)
+ return 0;
+
+ ret = pm_runtime_get_sync(panel->dev);
+ if (ret < 0) {
+ pm_runtime_put_autosuspend(panel->dev);
+ return ret;
+ }
+
+ p->prepared = true;
+
+ return 0;
+}
+
static int panel_simple_enable(struct drm_panel *panel)
{
struct panel_simple *p = to_panel_simple(panel);
@@ -491,13 +515,16 @@ static int panel_simple_get_modes(struct drm_panel *panel,
/* probe EDID if a DDC bus is available */
if (p->ddc) {
- struct edid *edid = drm_get_edid(connector, p->ddc);
+ pm_runtime_get_sync(panel->dev);
- drm_connector_update_edid_property(connector, edid);
- if (edid) {
- num += drm_add_edid_modes(connector, edid);
- kfree(edid);
- }
+ if (!p->edid)
+ p->edid = drm_get_edid(connector, p->ddc);
+
+ if (p->edid)
+ num += drm_add_edid_modes(connector, p->edid);
+
+ pm_runtime_mark_last_busy(panel->dev);
+ pm_runtime_put_autosuspend(panel->dev);
}
/* add hard-coded panel modes */
@@ -649,7 +676,7 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
panel->no_hpd = of_property_read_bool(dev->of_node, "no-hpd");
if (!panel->no_hpd) {
- err = panel_simple_get_hpd_gpio(dev, panel, true);
+ err = panel_simple_get_hpd_gpio(dev, panel);
if (err)
return err;
}
@@ -748,18 +775,30 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
break;
}
+ dev_set_drvdata(dev, panel);
+
+ /*
+ * We use runtime PM for prepare / unprepare since those power the panel
+ * on and off and those can be very slow operations. This is important
+ * to optimize powering the panel on briefly to read the EDID before
+ * fully enabling the panel.
+ */
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev, 1000);
+ pm_runtime_use_autosuspend(dev);
+
drm_panel_init(&panel->base, dev, &panel_simple_funcs, connector_type);
err = drm_panel_of_backlight(&panel->base);
if (err)
- goto free_ddc;
+ goto disable_pm_runtime;
drm_panel_add(&panel->base);
- dev_set_drvdata(dev, panel);
-
return 0;
+disable_pm_runtime:
+ pm_runtime_disable(dev);
free_ddc:
if (panel->ddc)
put_device(&panel->ddc->dev);
@@ -775,6 +814,7 @@ static int panel_simple_remove(struct device *dev)
drm_panel_disable(&panel->base);
drm_panel_unprepare(&panel->base);
+ pm_runtime_disable(dev);
if (panel->ddc)
put_device(&panel->ddc->dev);
@@ -4603,10 +4643,17 @@ static void panel_simple_platform_shutdown(struct platform_device *pdev)
panel_simple_shutdown(&pdev->dev);
}
+static const struct dev_pm_ops panel_simple_pm_ops = {
+ SET_RUNTIME_PM_OPS(panel_simple_suspend, panel_simple_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+};
+
static struct platform_driver panel_simple_platform_driver = {
.driver = {
.name = "panel-simple",
.of_match_table = platform_of_match,
+ .pm = &panel_simple_pm_ops,
},
.probe = panel_simple_platform_probe,
.remove = panel_simple_platform_remove,
@@ -4901,6 +4948,7 @@ static struct mipi_dsi_driver panel_simple_dsi_driver = {
.driver = {
.name = "panel-simple-dsi",
.of_match_table = dsi_of_match,
+ .pm = &panel_simple_pm_ops,
},
.probe = panel_simple_dsi_probe,
.remove = panel_simple_dsi_remove,
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index a7637e79cb42..9e0a1e836011 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -677,10 +677,8 @@ static void qxl_primary_atomic_disable(struct drm_plane *plane,
if (bo->shadow)
bo = bo->shadow;
- if (bo->is_primary) {
+ if (bo->is_primary)
qxl_io_destroy_primary(qdev);
- bo->is_primary = false;
- }
}
}
@@ -803,6 +801,7 @@ static void qxl_prepare_shadow(struct qxl_device *qdev, struct qxl_bo *user_bo,
qdev->dumb_shadow_bo->surf.width != surf.width ||
qdev->dumb_shadow_bo->surf.height != surf.height) {
if (qdev->dumb_shadow_bo) {
+ qxl_bo_unpin(qdev->dumb_shadow_bo);
drm_gem_object_put
(&qdev->dumb_shadow_bo->tbo.base);
qdev->dumb_shadow_bo = NULL;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 6754f578fed2..854e6c5a563f 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -28,14 +28,18 @@
*/
#include "qxl_drv.h"
+
#include <linux/console.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/vgaarb.h>
#include <drm/drm.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
+#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_modeset_helper.h>
#include <drm/drm_prime.h>
#include <drm/drm_probe_helper.h>
@@ -91,7 +95,7 @@ qxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
return ret;
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "qxl");
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "qxl");
if (ret)
goto disable_pci;
@@ -270,7 +274,7 @@ static struct drm_driver qxl_driver = {
.driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
.dumb_create = qxl_mode_dumb_create,
- .dumb_map_offset = qxl_mode_dumb_mmap,
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset,
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h
index 6dd57cfb2e7c..20a0f3ab84ad 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.h
+++ b/drivers/gpu/drm/qxl/qxl_drv.h
@@ -330,9 +330,6 @@ void qxl_bo_force_delete(struct qxl_device *qdev);
int qxl_mode_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int qxl_mode_dumb_mmap(struct drm_file *filp,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p);
/* qxl ttm */
int qxl_ttm_init(struct qxl_device *qdev);
diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c
index 48a58ba1db96..a635d9fdf8ac 100644
--- a/drivers/gpu/drm/qxl/qxl_dumb.c
+++ b/drivers/gpu/drm/qxl/qxl_dumb.c
@@ -69,20 +69,3 @@ int qxl_mode_dumb_create(struct drm_file *file_priv,
args->handle = handle;
return 0;
}
-
-int qxl_mode_dumb_mmap(struct drm_file *file_priv,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p)
-{
- struct drm_gem_object *gobj;
- struct qxl_bo *qobj;
-
- BUG_ON(!offset_p);
- gobj = drm_gem_object_lookup(file_priv, handle);
- if (gobj == NULL)
- return -ENOENT;
- qobj = gem_to_qxl_bo(gobj);
- *offset_p = qxl_bo_mmap_offset(qobj);
- drm_gem_object_put(gobj);
- return 0;
-}
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index b6075f452b9e..38aabcbe2238 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -67,8 +67,8 @@ static int qxl_map_ioctl(struct drm_device *dev, void *data,
struct qxl_device *qdev = to_qxl(dev);
struct drm_qxl_map *qxl_map = data;
- return qxl_mode_dumb_mmap(file_priv, &qdev->ddev, qxl_map->handle,
- &qxl_map->offset);
+ return drm_gem_ttm_dumb_map_offset(file_priv, &qdev->ddev, qxl_map->handle,
+ &qxl_map->offset);
}
struct qxl_reloc_info {
diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h
index ee9c29de4d3d..cee4b52b75dd 100644
--- a/drivers/gpu/drm/qxl/qxl_object.h
+++ b/drivers/gpu/drm/qxl/qxl_object.h
@@ -53,11 +53,6 @@ static inline unsigned long qxl_bo_size(struct qxl_bo *bo)
return bo->tbo.base.size;
}
-static inline u64 qxl_bo_mmap_offset(struct qxl_bo *bo)
-{
- return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
-}
-
extern int qxl_bo_create(struct qxl_device *qdev,
unsigned long size,
bool kernel, bool pinned, u32 domain,
diff --git a/drivers/gpu/drm/r128/ati_pcigart.c b/drivers/gpu/drm/r128/ati_pcigart.c
index 1234ec60c0af..5d73043446e3 100644
--- a/drivers/gpu/drm/r128/ati_pcigart.c
+++ b/drivers/gpu/drm/r128/ati_pcigart.c
@@ -45,24 +45,39 @@
static int drm_ati_alloc_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
- gart_info->table_handle = drm_pci_alloc(dev, gart_info->table_size,
- PAGE_SIZE);
- if (gart_info->table_handle == NULL)
+ drm_dma_handle_t *dmah = kmalloc(sizeof(drm_dma_handle_t), GFP_KERNEL);
+
+ if (!dmah)
+ return -ENOMEM;
+
+ dmah->size = gart_info->table_size;
+ dmah->vaddr = dma_alloc_coherent(dev->dev,
+ dmah->size,
+ &dmah->busaddr,
+ GFP_KERNEL);
+
+ if (!dmah->vaddr) {
+ kfree(dmah);
return -ENOMEM;
+ }
+ gart_info->table_handle = dmah;
return 0;
}
static void drm_ati_free_pcigart_table(struct drm_device *dev,
struct drm_ati_pcigart_info *gart_info)
{
- drm_pci_free(dev, gart_info->table_handle);
+ drm_dma_handle_t *dmah = gart_info->table_handle;
+
+ dma_free_coherent(dev->dev, dmah->size, dmah->vaddr, dmah->busaddr);
gart_info->table_handle = NULL;
}
int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info *gart_info)
{
struct drm_sg_mem *entry = dev->sg;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned long pages;
int i;
int max_pages;
@@ -82,8 +97,7 @@ int drm_ati_pcigart_cleanup(struct drm_device *dev, struct drm_ati_pcigart_info
for (i = 0; i < pages; i++) {
if (!entry->busaddr[i])
break;
- pci_unmap_page(dev->pdev, entry->busaddr[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ pci_unmap_page(pdev, entry->busaddr[i], PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
}
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN)
@@ -102,6 +116,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
{
struct drm_local_map *map = &gart_info->mapping;
struct drm_sg_mem *entry = dev->sg;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
void *address = NULL;
unsigned long pages;
u32 *pci_gart = NULL, page_base, gart_idx;
@@ -117,7 +132,7 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
if (gart_info->gart_table_location == DRM_ATI_GART_MAIN) {
DRM_DEBUG("PCI: no table in VRAM: using normal RAM\n");
- if (pci_set_dma_mask(dev->pdev, gart_info->table_mask)) {
+ if (pci_set_dma_mask(pdev, gart_info->table_mask)) {
DRM_ERROR("fail to set dma mask to 0x%Lx\n",
(unsigned long long)gart_info->table_mask);
ret = -EFAULT;
@@ -156,9 +171,9 @@ int drm_ati_pcigart_init(struct drm_device *dev, struct drm_ati_pcigart_info *ga
gart_idx = 0;
for (i = 0; i < pages; i++) {
/* we need to support large memory configurations */
- entry->busaddr[i] = pci_map_page(dev->pdev, entry->pagelist[i],
+ entry->busaddr[i] = pci_map_page(pdev, entry->pagelist[i],
0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, entry->busaddr[i])) {
+ if (pci_dma_mapping_error(pdev, entry->busaddr[i])) {
DRM_ERROR("unable to map PCIGART pages!\n");
drm_ati_pcigart_cleanup(dev, gart_info);
address = NULL;
diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
index 138af32480d4..2a2933c16308 100644
--- a/drivers/gpu/drm/r128/r128_cce.c
+++ b/drivers/gpu/drm/r128/r128_cce.c
@@ -37,10 +37,10 @@
#include <linux/slab.h>
#include <linux/uaccess.h>
-#include <drm/drm_agpsupport.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_irq.h>
+#include <drm/drm_legacy.h>
#include <drm/drm_print.h>
#include <drm/r128_drm.h>
diff --git a/drivers/gpu/drm/r128/r128_drv.c b/drivers/gpu/drm/r128/r128_drv.c
index b7a5f162ebae..e35a3a1449bd 100644
--- a/drivers/gpu/drm/r128/r128_drv.c
+++ b/drivers/gpu/drm/r128/r128_drv.c
@@ -85,7 +85,9 @@ static struct drm_driver driver = {
int r128_driver_load(struct drm_device *dev, unsigned long flags)
{
- pci_set_master(dev->pdev);
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ pci_set_master(pdev);
return drm_vblank_init(dev, 1);
}
diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
index 9d74c9d914cb..ac13fc2a0214 100644
--- a/drivers/gpu/drm/r128/r128_state.c
+++ b/drivers/gpu/drm/r128/r128_state.c
@@ -1582,6 +1582,7 @@ int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv
{
drm_r128_private_t *dev_priv = dev->dev_private;
drm_r128_getparam_t *param = data;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int value;
DEV_INIT_TEST_WITH_RETURN(dev_priv);
@@ -1590,7 +1591,7 @@ int r128_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv
switch (param->param) {
case R128_PARAM_IRQ_NR:
- value = dev->pdev->irq;
+ value = pdev->irq;
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 15b00a347560..4c1e551d9714 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -232,6 +232,7 @@ void radeon_dp_aux_init(struct radeon_connector *radeon_connector)
radeon_connector->ddc_bus->rec.hpd = radeon_connector->hpd.hpd;
radeon_connector->ddc_bus->aux.dev = radeon_connector->base.kdev;
+ radeon_connector->ddc_bus->aux.drm_dev = radeon_connector->base.dev;
if (ASIC_IS_DCE5(rdev)) {
if (radeon_auxch)
radeon_connector->ddc_bus->aux.transfer = radeon_dp_aux_transfer_native;
@@ -679,7 +680,7 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
dp_info->tries = 0;
voltage = 0xff;
while (1) {
- drm_dp_link_train_clock_recovery_delay(dp_info->dpcd);
+ drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
dp_info->link_status) <= 0) {
@@ -742,7 +743,7 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
dp_info->tries = 0;
channel_eq = false;
while (1) {
- drm_dp_link_train_channel_eq_delay(dp_info->dpcd);
+ drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd);
if (drm_dp_dpcd_read_link_status(dp_info->aux,
dp_info->link_status) <= 0) {
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 56ed5634cebe..65301d6acf13 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -60,6 +60,7 @@
* are considered as fatal)
*/
+#include <linux/agp_backend.h>
#include <linux/atomic.h>
#include <linux/wait.h>
#include <linux/list.h>
@@ -1110,6 +1111,46 @@ typedef int (*radeon_packet0_check_t)(struct radeon_cs_parser *p,
/*
* AGP
*/
+
+struct radeon_agp_mode {
+ unsigned long mode; /**< AGP mode */
+};
+
+struct radeon_agp_info {
+ int agp_version_major;
+ int agp_version_minor;
+ unsigned long mode;
+ unsigned long aperture_base; /* physical address */
+ unsigned long aperture_size; /* bytes */
+ unsigned long memory_allowed; /* bytes */
+ unsigned long memory_used;
+
+ /* PCI information */
+ unsigned short id_vendor;
+ unsigned short id_device;
+};
+
+struct radeon_agp_head {
+ struct agp_kern_info agp_info;
+ struct list_head memory;
+ unsigned long mode;
+ struct agp_bridge_data *bridge;
+ int enabled;
+ int acquired;
+ unsigned long base;
+ int agp_mtrr;
+ int cant_use_aperture;
+ unsigned long page_mask;
+};
+
+#if IS_ENABLED(CONFIG_AGP)
+struct radeon_agp_head *radeon_agp_head_init(struct drm_device *dev);
+#else
+static inline struct radeon_agp_head *radeon_agp_head_init(struct drm_device *dev)
+{
+ return NULL;
+}
+#endif
int radeon_agp_init(struct radeon_device *rdev);
void radeon_agp_resume(struct radeon_device *rdev);
void radeon_agp_suspend(struct radeon_device *rdev);
@@ -2303,6 +2344,7 @@ struct radeon_device {
#ifdef __alpha__
struct pci_controller *hose;
#endif
+ struct radeon_agp_head *agp;
struct rw_semaphore exclusive_lock;
/* ASIC */
union radeon_asic_config config;
diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c
index 0aca7bdf54c7..d124600b5f58 100644
--- a/drivers/gpu/drm/radeon/radeon_agp.c
+++ b/drivers/gpu/drm/radeon/radeon_agp.c
@@ -27,7 +27,6 @@
#include <linux/pci.h>
-#include <drm/drm_agpsupport.h>
#include <drm/drm_device.h>
#include <drm/radeon_drm.h>
@@ -127,38 +126,127 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
PCI_VENDOR_ID_SONY, 0x8175, 1},
{ 0, 0, 0, 0, 0, 0, 0 },
};
+
+struct radeon_agp_head *radeon_agp_head_init(struct drm_device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+ struct radeon_agp_head *head = NULL;
+
+ head = kzalloc(sizeof(*head), GFP_KERNEL);
+ if (!head)
+ return NULL;
+ head->bridge = agp_find_bridge(pdev);
+ if (!head->bridge) {
+ head->bridge = agp_backend_acquire(pdev);
+ if (!head->bridge) {
+ kfree(head);
+ return NULL;
+ }
+ agp_copy_info(head->bridge, &head->agp_info);
+ agp_backend_release(head->bridge);
+ } else {
+ agp_copy_info(head->bridge, &head->agp_info);
+ }
+ if (head->agp_info.chipset == NOT_SUPPORTED) {
+ kfree(head);
+ return NULL;
+ }
+ INIT_LIST_HEAD(&head->memory);
+ head->cant_use_aperture = head->agp_info.cant_use_aperture;
+ head->page_mask = head->agp_info.page_mask;
+ head->base = head->agp_info.aper_base;
+
+ return head;
+}
+
+static int radeon_agp_head_acquire(struct radeon_device *rdev)
+{
+ struct drm_device *dev = rdev->ddev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
+
+ if (!rdev->agp)
+ return -ENODEV;
+ if (rdev->agp->acquired)
+ return -EBUSY;
+ rdev->agp->bridge = agp_backend_acquire(pdev);
+ if (!rdev->agp->bridge)
+ return -ENODEV;
+ rdev->agp->acquired = 1;
+ return 0;
+}
+
+static int radeon_agp_head_release(struct radeon_device *rdev)
+{
+ if (!rdev->agp || !rdev->agp->acquired)
+ return -EINVAL;
+ agp_backend_release(rdev->agp->bridge);
+ rdev->agp->acquired = 0;
+ return 0;
+}
+
+static int radeon_agp_head_enable(struct radeon_device *rdev, struct radeon_agp_mode mode)
+{
+ if (!rdev->agp || !rdev->agp->acquired)
+ return -EINVAL;
+
+ rdev->agp->mode = mode.mode;
+ agp_enable(rdev->agp->bridge, mode.mode);
+ rdev->agp->enabled = 1;
+ return 0;
+}
+
+static int radeon_agp_head_info(struct radeon_device *rdev, struct radeon_agp_info *info)
+{
+ struct agp_kern_info *kern;
+
+ if (!rdev->agp || !rdev->agp->acquired)
+ return -EINVAL;
+
+ kern = &rdev->agp->agp_info;
+ info->agp_version_major = kern->version.major;
+ info->agp_version_minor = kern->version.minor;
+ info->mode = kern->mode;
+ info->aperture_base = kern->aper_base;
+ info->aperture_size = kern->aper_size * 1024 * 1024;
+ info->memory_allowed = kern->max_memory << PAGE_SHIFT;
+ info->memory_used = kern->current_memory << PAGE_SHIFT;
+ info->id_vendor = kern->device->vendor;
+ info->id_device = kern->device->device;
+
+ return 0;
+}
#endif
int radeon_agp_init(struct radeon_device *rdev)
{
#if IS_ENABLED(CONFIG_AGP)
struct radeon_agpmode_quirk *p = radeon_agpmode_quirk_list;
- struct drm_agp_mode mode;
- struct drm_agp_info info;
+ struct radeon_agp_mode mode;
+ struct radeon_agp_info info;
uint32_t agp_status;
int default_mode;
bool is_v3;
int ret;
/* Acquire AGP. */
- ret = drm_agp_acquire(rdev->ddev);
+ ret = radeon_agp_head_acquire(rdev);
if (ret) {
DRM_ERROR("Unable to acquire AGP: %d\n", ret);
return ret;
}
- ret = drm_agp_info(rdev->ddev, &info);
+ ret = radeon_agp_head_info(rdev, &info);
if (ret) {
- drm_agp_release(rdev->ddev);
+ radeon_agp_head_release(rdev);
DRM_ERROR("Unable to get AGP info: %d\n", ret);
return ret;
}
- if (rdev->ddev->agp->agp_info.aper_size < 32) {
- drm_agp_release(rdev->ddev);
+ if (rdev->agp->agp_info.aper_size < 32) {
+ radeon_agp_head_release(rdev);
dev_warn(rdev->dev, "AGP aperture too small (%zuM) "
"need at least 32M, disabling AGP\n",
- rdev->ddev->agp->agp_info.aper_size);
+ rdev->agp->agp_info.aper_size);
return -EINVAL;
}
@@ -239,15 +327,15 @@ int radeon_agp_init(struct radeon_device *rdev)
}
mode.mode &= ~RADEON_AGP_FW_MODE; /* disable fw */
- ret = drm_agp_enable(rdev->ddev, mode);
+ ret = radeon_agp_head_enable(rdev, mode);
if (ret) {
DRM_ERROR("Unable to enable AGP (mode = 0x%lx)\n", mode.mode);
- drm_agp_release(rdev->ddev);
+ radeon_agp_head_release(rdev);
return ret;
}
- rdev->mc.agp_base = rdev->ddev->agp->agp_info.aper_base;
- rdev->mc.gtt_size = rdev->ddev->agp->agp_info.aper_size << 20;
+ rdev->mc.agp_base = rdev->agp->agp_info.aper_base;
+ rdev->mc.gtt_size = rdev->agp->agp_info.aper_size << 20;
rdev->mc.gtt_start = rdev->mc.agp_base;
rdev->mc.gtt_end = rdev->mc.gtt_start + rdev->mc.gtt_size - 1;
dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
@@ -278,8 +366,8 @@ void radeon_agp_resume(struct radeon_device *rdev)
void radeon_agp_fini(struct radeon_device *rdev)
{
#if IS_ENABLED(CONFIG_AGP)
- if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
- drm_agp_release(rdev->ddev);
+ if (rdev->agp && rdev->agp->acquired) {
+ radeon_agp_head_release(rdev);
}
#endif
}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 59cf1d288465..13072c2a6502 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -629,13 +629,20 @@ int
radeon_dp_mst_init(struct radeon_connector *radeon_connector)
{
struct drm_device *dev = radeon_connector->base.dev;
+ int max_link_rate;
if (!radeon_connector->ddc_bus->has_aux)
return 0;
+ if (radeon_connector_is_dp12_capable(&radeon_connector->base))
+ max_link_rate = 0x14;
+ else
+ max_link_rate = 0x0a;
+
radeon_connector->mst_mgr.cbs = &mst_cbs;
return drm_dp_mst_topology_mgr_init(&radeon_connector->mst_mgr, dev,
&radeon_connector->ddc_bus->aux, 16, 6,
+ 4, (u8)max_link_rate,
radeon_connector->base.base.id);
}
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index efeb115ae70e..31d3dd0e5258 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -38,7 +38,7 @@
#include <linux/mmu_notifier.h>
#include <linux/pci.h>
-#include <drm/drm_agpsupport.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
@@ -330,7 +330,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
return -EPROBE_DEFER;
/* Get rid of things like offb */
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "radeondrmfb");
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "radeondrmfb");
if (ret)
return ret;
@@ -344,15 +344,6 @@ static int radeon_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
- if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
- dev->agp = drm_agp_init(dev);
- if (dev->agp) {
- dev->agp->agp_mtrr = arch_phys_wc_add(
- dev->agp->agp_info.aper_base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024);
- }
-
ret = drm_dev_register(dev, ent->driver_data);
if (ret)
goto err_agp;
@@ -360,9 +351,6 @@ static int radeon_pci_probe(struct pci_dev *pdev,
return 0;
err_agp:
- if (dev->agp)
- arch_phys_wc_del(dev->agp->agp_mtrr);
- kfree(dev->agp);
pci_disable_device(pdev);
err_free:
drm_dev_put(dev);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 58876bb4ef2a..0473583dcdac 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -32,7 +32,6 @@
#include <linux/uaccess.h>
#include <linux/vga_switcheroo.h>
-#include <drm/drm_agpsupport.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -80,10 +79,10 @@ void radeon_driver_unload_kms(struct drm_device *dev)
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
- if (dev->agp)
- arch_phys_wc_del(dev->agp->agp_mtrr);
- kfree(dev->agp);
- dev->agp = NULL;
+ if (rdev->agp)
+ arch_phys_wc_del(rdev->agp->agp_mtrr);
+ kfree(rdev->agp);
+ rdev->agp = NULL;
done_free:
kfree(rdev);
@@ -119,6 +118,15 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
rdev->hose = pdev->sysdata;
#endif
+ if (pci_find_capability(pdev, PCI_CAP_ID_AGP))
+ rdev->agp = radeon_agp_head_init(rdev->ddev);
+ if (rdev->agp) {
+ rdev->agp->agp_mtrr = arch_phys_wc_add(
+ rdev->agp->agp_info.aper_base,
+ rdev->agp->agp_info.aper_size *
+ 1024 * 1024);
+ }
+
/* update BUS flag */
if (pci_find_capability(pdev, PCI_CAP_ID_AGP)) {
flags |= RADEON_IS_AGP;
diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h
index 9896d8231fe5..fd4116bdde0f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.h
+++ b/drivers/gpu/drm/radeon/radeon_object.h
@@ -119,7 +119,7 @@ static inline unsigned radeon_bo_ngpu_pages(struct radeon_bo *bo)
static inline unsigned radeon_bo_gpu_page_alignment(struct radeon_bo *bo)
{
- return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
+ return (bo->tbo.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 380b3007fd0b..3361d11769a2 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -38,7 +38,6 @@
#include <linux/swap.h>
#include <linux/swiotlb.h>
-#include <drm/drm_agpsupport.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_prime.h>
@@ -291,7 +290,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resourc
/* RADEON_IS_AGP is set only if AGP is active */
mem->bus.offset = (mem->start << PAGE_SHIFT) +
rdev->mc.agp_base;
- mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture;
+ mem->bus.is_iomem = !rdev->agp->cant_use_aperture;
mem->bus.caching = ttm_write_combined;
}
#endif
@@ -513,8 +512,7 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo,
struct radeon_device *rdev = radeon_get_rdev(bo->bdev);
if (rdev->flags & RADEON_IS_AGP) {
- return ttm_agp_tt_create(bo, rdev->ddev->agp->bridge,
- page_flags);
+ return ttm_agp_tt_create(bo, rdev->agp->bridge, page_flags);
}
#endif
rbo = container_of(bo, struct radeon_bo, tbo);
diff --git a/drivers/gpu/drm/savage/savage_bci.c b/drivers/gpu/drm/savage/savage_bci.c
index 606e5b807a6e..e33385dfe3ed 100644
--- a/drivers/gpu/drm/savage/savage_bci.c
+++ b/drivers/gpu/drm/savage/savage_bci.c
@@ -547,6 +547,7 @@ static void savage_fake_dma_flush(drm_savage_private_t * dev_priv)
int savage_driver_load(struct drm_device *dev, unsigned long chipset)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
drm_savage_private_t *dev_priv;
dev_priv = kzalloc(sizeof(drm_savage_private_t), GFP_KERNEL);
@@ -557,7 +558,7 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->chipset = (enum savage_family)chipset;
- pci_set_master(dev->pdev);
+ pci_set_master(pdev);
return 0;
}
@@ -572,16 +573,17 @@ int savage_driver_load(struct drm_device *dev, unsigned long chipset)
int savage_driver_firstopen(struct drm_device *dev)
{
drm_savage_private_t *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned long mmio_base, fb_base, fb_size, aperture_base;
int ret = 0;
if (S3_SAVAGE3D_SERIES(dev_priv->chipset)) {
- fb_base = pci_resource_start(dev->pdev, 0);
+ fb_base = pci_resource_start(pdev, 0);
fb_size = SAVAGE_FB_SIZE_S3;
mmio_base = fb_base + SAVAGE_FB_SIZE_S3;
aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
/* this should always be true */
- if (pci_resource_len(dev->pdev, 0) == 0x08000000) {
+ if (pci_resource_len(pdev, 0) == 0x08000000) {
/* Don't make MMIO write-cobining! We need 3
* MTRRs. */
dev_priv->mtrr_handles[0] =
@@ -595,16 +597,16 @@ int savage_driver_firstopen(struct drm_device *dev)
} else {
DRM_ERROR("strange pci_resource_len %08llx\n",
(unsigned long long)
- pci_resource_len(dev->pdev, 0));
+ pci_resource_len(pdev, 0));
}
} else if (dev_priv->chipset != S3_SUPERSAVAGE &&
dev_priv->chipset != S3_SAVAGE2000) {
- mmio_base = pci_resource_start(dev->pdev, 0);
- fb_base = pci_resource_start(dev->pdev, 1);
+ mmio_base = pci_resource_start(pdev, 0);
+ fb_base = pci_resource_start(pdev, 1);
fb_size = SAVAGE_FB_SIZE_S4;
aperture_base = fb_base + SAVAGE_APERTURE_OFFSET;
/* this should always be true */
- if (pci_resource_len(dev->pdev, 1) == 0x08000000) {
+ if (pci_resource_len(pdev, 1) == 0x08000000) {
/* Can use one MTRR to cover both fb and
* aperture. */
dev_priv->mtrr_handles[0] =
@@ -613,13 +615,13 @@ int savage_driver_firstopen(struct drm_device *dev)
} else {
DRM_ERROR("strange pci_resource_len %08llx\n",
(unsigned long long)
- pci_resource_len(dev->pdev, 1));
+ pci_resource_len(pdev, 1));
}
} else {
- mmio_base = pci_resource_start(dev->pdev, 0);
- fb_base = pci_resource_start(dev->pdev, 1);
- fb_size = pci_resource_len(dev->pdev, 1);
- aperture_base = pci_resource_start(dev->pdev, 2);
+ mmio_base = pci_resource_start(pdev, 0);
+ fb_base = pci_resource_start(pdev, 1);
+ fb_size = pci_resource_len(pdev, 1);
+ aperture_base = pci_resource_start(pdev, 2);
/* Automatic MTRR setup will do the right thing. */
}
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index f0790e9471d1..0249c7450188 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -192,7 +192,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
EXPORT_SYMBOL(drm_sched_entity_flush);
/**
- * drm_sched_entity_kill_jobs - helper for drm_sched_entity_kill_jobs
+ * drm_sched_entity_kill_jobs_cb - helper for drm_sched_entity_kill_jobs
*
* @f: signaled fence
* @cb: our callback structure
@@ -250,7 +250,7 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity)
}
/**
- * drm_sched_entity_cleanup - Destroy a context entity
+ * drm_sched_entity_fini - Destroy a context entity
*
* @entity: scheduler entity
*
@@ -295,7 +295,7 @@ void drm_sched_entity_fini(struct drm_sched_entity *entity)
EXPORT_SYMBOL(drm_sched_entity_fini);
/**
- * drm_sched_entity_fini - Destroy a context entity
+ * drm_sched_entity_destroy - Destroy a context entity
*
* @entity: scheduler entity
*
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 92d8de24d0a1..f4f474944169 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -671,7 +671,7 @@ drm_sched_select_entity(struct drm_gpu_scheduler *sched)
static struct drm_sched_job *
drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
{
- struct drm_sched_job *job;
+ struct drm_sched_job *job, *next;
/*
* Don't destroy jobs while the timeout worker is running OR thread
@@ -690,6 +690,13 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
/* remove job from pending_list */
list_del_init(&job->list);
+ /* make the scheduled timestamp more accurate */
+ next = list_first_entry_or_null(&sched->pending_list,
+ typeof(*next), list);
+ if (next)
+ next->s_fence->scheduled.timestamp =
+ job->s_fence->finished.timestamp;
+
} else {
job = NULL;
/* queue timeout for next job */
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 2c54b33abb54..e35e719cf315 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -41,9 +41,10 @@ static struct pci_device_id pciidlist[] = {
static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
drm_sis_private_t *dev_priv;
- pci_set_master(dev->pdev);
+ pci_set_master(pdev);
dev_priv = kzalloc(sizeof(drm_sis_private_t), GFP_KERNEL);
if (dev_priv == NULL)
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index 65c3c79ad1d5..e99771b947b6 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -1326,8 +1326,6 @@ int ltdc_load(struct drm_device *ddev)
goto err;
}
- ddev->mode_config.allow_fb_modifiers = true;
-
ret = ltdc_crtc_init(ddev, crtc);
if (ret) {
DRM_ERROR("Failed to init crtc\n");
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 91502937f26d..af335f58bdfc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -13,6 +13,7 @@
#include <linux/of_reserved_mem.h>
#include <linux/platform_device.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
@@ -99,7 +100,9 @@ static int sun4i_drv_bind(struct device *dev)
drm->irq_enabled = true;
/* Remove early framebuffers (ie. simplefb) */
- drm_fb_helper_remove_conflicting_framebuffers(NULL, "sun4i-drm-fb", false);
+ ret = drm_aperture_remove_framebuffers(false, "sun4i-drm-fb");
+ if (ret)
+ goto cleanup_mode_config;
sun4i_framebuffer_init(drm);
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index f9120dc24682..074563ca586c 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -999,6 +999,11 @@ static const struct drm_plane_helper_funcs tegra_cursor_plane_helper_funcs = {
.atomic_disable = tegra_cursor_atomic_disable,
};
+static const uint64_t linear_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
struct tegra_dc *dc)
{
@@ -1032,7 +1037,7 @@ static struct drm_plane *tegra_dc_cursor_plane_create(struct drm_device *drm,
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats,
- num_formats, NULL,
+ num_formats, linear_modifiers,
DRM_PLANE_TYPE_CURSOR, NULL);
if (err < 0) {
kfree(plane);
@@ -1151,7 +1156,8 @@ static struct drm_plane *tegra_dc_overlay_plane_create(struct drm_device *drm,
err = drm_universal_plane_init(drm, &plane->base, possible_crtcs,
&tegra_plane_funcs, formats,
- num_formats, NULL, type, NULL);
+ num_formats, linear_modifiers,
+ type, NULL);
if (err < 0) {
kfree(plane);
return ERR_PTR(err);
diff --git a/drivers/gpu/drm/tegra/dpaux.c b/drivers/gpu/drm/tegra/dpaux.c
index ea56c6ec25e4..7d7cc90b6fc9 100644
--- a/drivers/gpu/drm/tegra/dpaux.c
+++ b/drivers/gpu/drm/tegra/dpaux.c
@@ -719,6 +719,7 @@ int drm_dp_aux_attach(struct drm_dp_aux *aux, struct tegra_output *output)
unsigned long timeout;
int err;
+ aux->drm_dev = output->connector.dev;
err = drm_dp_aux_register(aux);
if (err < 0)
return err;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 0c350b0daab4..f96c237b2242 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_debugfs.h>
@@ -1124,8 +1125,6 @@ static int host1x_drm_probe(struct host1x_device *dev)
drm->mode_config.max_width = 0;
drm->mode_config.max_height = 0;
- drm->mode_config.allow_fb_modifiers = true;
-
drm->mode_config.normalize_zpos = true;
drm->mode_config.funcs = &tegra_drm_mode_config_funcs;
@@ -1205,8 +1204,7 @@ static int host1x_drm_probe(struct host1x_device *dev)
drm_mode_config_reset(drm);
- err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb",
- false);
+ err = drm_aperture_remove_framebuffers(false, "tegradrmfb");
if (err < 0)
goto hub;
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index 9bbaa1a69050..d46f95d9196d 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -38,6 +38,22 @@ config DRM_GM12U320
This is a KMS driver for projectors which use the GM12U320 chipset
for video transfer over USB2/3, such as the Acer C120 mini projector.
+config DRM_SIMPLEDRM
+ tristate "Simple framebuffer driver"
+ depends on DRM
+ select DRM_GEM_SHMEM_HELPER
+ select DRM_KMS_HELPER
+ help
+ DRM driver for simple platform-provided framebuffers.
+
+ This driver assumes that the display hardware has been initialized
+ by the firmware or bootloader before the kernel boots. Scanout
+ buffer, size, and display format must be provided via device tree,
+ UEFI, VESA, etc.
+
+ On x86 and compatible, you should also select CONFIG_X86_SYSFB to
+ use UEFI and VESA framebuffers.
+
config TINYDRM_HX8357D
tristate "DRM support for HX8357D display panels"
depends on DRM && SPI
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index bef6780bdd6f..9cc847e756da 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -3,6 +3,7 @@
obj-$(CONFIG_DRM_ARCPGU) += arcpgu.o
obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus.o
obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
+obj-$(CONFIG_DRM_SIMPLEDRM) += simpledrm.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o
diff --git a/drivers/gpu/drm/tiny/cirrus.c b/drivers/gpu/drm/tiny/cirrus.c
index ad922c3ec681..42611dacde88 100644
--- a/drivers/gpu/drm/tiny/cirrus.c
+++ b/drivers/gpu/drm/tiny/cirrus.c
@@ -24,6 +24,7 @@
#include <video/cirrus.h>
#include <video/vga.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_connector.h>
@@ -323,7 +324,7 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb, const struct dma_buf_
return -ENODEV;
if (cirrus->cpp == fb->format->cpp[0])
- drm_fb_memcpy_dstclip(cirrus->vram,
+ drm_fb_memcpy_dstclip(cirrus->vram, fb->pitches[0],
vmap, fb, rect);
else if (fb->format->cpp[0] == 4 && cirrus->cpp == 2)
@@ -549,7 +550,7 @@ static int cirrus_pci_probe(struct pci_dev *pdev,
struct cirrus_device *cirrus;
int ret;
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "cirrusdrmfb");
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "cirrusdrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/tiny/hx8357d.c b/drivers/gpu/drm/tiny/hx8357d.c
index 3e2c2868a363..da5df93450de 100644
--- a/drivers/gpu/drm/tiny/hx8357d.c
+++ b/drivers/gpu/drm/tiny/hx8357d.c
@@ -234,10 +234,8 @@ static int hx8357d_probe(struct spi_device *spi)
drm = &dbidev->drm;
dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
- if (IS_ERR(dc)) {
- DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
- return PTR_ERR(dc);
- }
+ if (IS_ERR(dc))
+ return dev_err_probe(dev, PTR_ERR(dc), "Failed to get GPIO 'dc'\n");
dbidev->backlight = devm_of_find_backlight(dev);
if (IS_ERR(dbidev->backlight))
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index 6b87df19eec1..69265d8a3beb 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -379,16 +379,12 @@ static int ili9225_probe(struct spi_device *spi)
drm = &dbidev->drm;
dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(dbi->reset)) {
- DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
- return PTR_ERR(dbi->reset);
- }
+ if (IS_ERR(dbi->reset))
+ return dev_err_probe(dev, PTR_ERR(dbi->reset), "Failed to get GPIO 'reset'\n");
rs = devm_gpiod_get(dev, "rs", GPIOD_OUT_LOW);
- if (IS_ERR(rs)) {
- DRM_DEV_ERROR(dev, "Failed to get gpio 'rs'\n");
- return PTR_ERR(rs);
- }
+ if (IS_ERR(rs))
+ return dev_err_probe(dev, PTR_ERR(rs), "Failed to get GPIO 'rs'\n");
device_property_read_u32(dev, "rotation", &rotation);
diff --git a/drivers/gpu/drm/tiny/ili9341.c b/drivers/gpu/drm/tiny/ili9341.c
index a97f3f70e4a6..ad9ce7b4f76f 100644
--- a/drivers/gpu/drm/tiny/ili9341.c
+++ b/drivers/gpu/drm/tiny/ili9341.c
@@ -192,16 +192,12 @@ static int ili9341_probe(struct spi_device *spi)
drm = &dbidev->drm;
dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(dbi->reset)) {
- DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
- return PTR_ERR(dbi->reset);
- }
+ if (IS_ERR(dbi->reset))
+ return dev_err_probe(dev, PTR_ERR(dbi->reset), "Failed to get GPIO 'reset'\n");
dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
- if (IS_ERR(dc)) {
- DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
- return PTR_ERR(dc);
- }
+ if (IS_ERR(dc))
+ return dev_err_probe(dev, PTR_ERR(dc), "Failed to get GPIO 'dc'\n");
dbidev->backlight = devm_of_find_backlight(dev);
if (IS_ERR(dbidev->backlight))
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
index 6422a7f67079..75aa1476c66c 100644
--- a/drivers/gpu/drm/tiny/ili9486.c
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -206,16 +206,12 @@ static int ili9486_probe(struct spi_device *spi)
drm = &dbidev->drm;
dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(dbi->reset)) {
- DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
- return PTR_ERR(dbi->reset);
- }
+ if (IS_ERR(dbi->reset))
+ return dev_err_probe(dev, PTR_ERR(dbi->reset), "Failed to get GPIO 'reset'\n");
dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
- if (IS_ERR(dc)) {
- DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
- return PTR_ERR(dc);
- }
+ if (IS_ERR(dc))
+ return dev_err_probe(dev, PTR_ERR(dc), "Failed to get GPIO 'dc'\n");
dbidev->backlight = devm_of_find_backlight(dev);
if (IS_ERR(dbidev->backlight))
diff --git a/drivers/gpu/drm/tiny/mi0283qt.c b/drivers/gpu/drm/tiny/mi0283qt.c
index dc76fe53aa72..82fd1ad3413f 100644
--- a/drivers/gpu/drm/tiny/mi0283qt.c
+++ b/drivers/gpu/drm/tiny/mi0283qt.c
@@ -196,16 +196,12 @@ static int mi0283qt_probe(struct spi_device *spi)
drm = &dbidev->drm;
dbi->reset = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(dbi->reset)) {
- DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
- return PTR_ERR(dbi->reset);
- }
+ if (IS_ERR(dbi->reset))
+ return dev_err_probe(dev, PTR_ERR(dbi->reset), "Failed to get GPIO 'reset'\n");
dc = devm_gpiod_get_optional(dev, "dc", GPIOD_OUT_LOW);
- if (IS_ERR(dc)) {
- DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
- return PTR_ERR(dc);
- }
+ if (IS_ERR(dc))
+ return dev_err_probe(dev, PTR_ERR(dc), "Failed to get GPIO 'dc'\n");
dbidev->regulator = devm_regulator_get(dev, "power");
if (IS_ERR(dbidev->regulator))
diff --git a/drivers/gpu/drm/tiny/simpledrm.c b/drivers/gpu/drm/tiny/simpledrm.c
new file mode 100644
index 000000000000..2bdb477d9326
--- /dev/null
+++ b/drivers/gpu/drm/tiny/simpledrm.c
@@ -0,0 +1,896 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/clk.h>
+#include <linux/of_clk.h>
+#include <linux/platform_data/simplefb.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_aperture.h>
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_device.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_gem_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_gem_shmem_helper.h>
+#include <drm/drm_managed.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
+
+#define DRIVER_NAME "simpledrm"
+#define DRIVER_DESC "DRM driver for simple-framebuffer platform devices"
+#define DRIVER_DATE "20200625"
+#define DRIVER_MAJOR 1
+#define DRIVER_MINOR 0
+
+/*
+ * Assume a monitor resolution of 96 dpi to
+ * get a somewhat reasonable screen size.
+ */
+#define RES_MM(d) \
+ (((d) * 254ul) / (96ul * 10ul))
+
+#define SIMPLEDRM_MODE(hd, vd) \
+ DRM_SIMPLE_MODE(hd, vd, RES_MM(hd), RES_MM(vd))
+
+/*
+ * Helpers for simplefb
+ */
+
+static int
+simplefb_get_validated_int(struct drm_device *dev, const char *name,
+ uint32_t value)
+{
+ if (value > INT_MAX) {
+ drm_err(dev, "simplefb: invalid framebuffer %s of %u\n",
+ name, value);
+ return -EINVAL;
+ }
+ return (int)value;
+}
+
+static int
+simplefb_get_validated_int0(struct drm_device *dev, const char *name,
+ uint32_t value)
+{
+ if (!value) {
+ drm_err(dev, "simplefb: invalid framebuffer %s of %u\n",
+ name, value);
+ return -EINVAL;
+ }
+ return simplefb_get_validated_int(dev, name, value);
+}
+
+static const struct drm_format_info *
+simplefb_get_validated_format(struct drm_device *dev, const char *format_name)
+{
+ static const struct simplefb_format formats[] = SIMPLEFB_FORMATS;
+ const struct simplefb_format *fmt = formats;
+ const struct simplefb_format *end = fmt + ARRAY_SIZE(formats);
+
+ if (!format_name) {
+ drm_err(dev, "simplefb: missing framebuffer format\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ while (fmt < end) {
+ if (!strcmp(format_name, fmt->name))
+ return drm_format_info(fmt->fourcc);
+ ++fmt;
+ }
+
+ drm_err(dev, "simplefb: unknown framebuffer format %s\n",
+ format_name);
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int
+simplefb_get_width_pd(struct drm_device *dev,
+ const struct simplefb_platform_data *pd)
+{
+ return simplefb_get_validated_int0(dev, "width", pd->width);
+}
+
+static int
+simplefb_get_height_pd(struct drm_device *dev,
+ const struct simplefb_platform_data *pd)
+{
+ return simplefb_get_validated_int0(dev, "height", pd->height);
+}
+
+static int
+simplefb_get_stride_pd(struct drm_device *dev,
+ const struct simplefb_platform_data *pd)
+{
+ return simplefb_get_validated_int(dev, "stride", pd->stride);
+}
+
+static const struct drm_format_info *
+simplefb_get_format_pd(struct drm_device *dev,
+ const struct simplefb_platform_data *pd)
+{
+ return simplefb_get_validated_format(dev, pd->format);
+}
+
+static int
+simplefb_read_u32_of(struct drm_device *dev, struct device_node *of_node,
+ const char *name, u32 *value)
+{
+ int ret = of_property_read_u32(of_node, name, value);
+
+ if (ret)
+ drm_err(dev, "simplefb: cannot parse framebuffer %s: error %d\n",
+ name, ret);
+ return ret;
+}
+
+static int
+simplefb_read_string_of(struct drm_device *dev, struct device_node *of_node,
+ const char *name, const char **value)
+{
+ int ret = of_property_read_string(of_node, name, value);
+
+ if (ret)
+ drm_err(dev, "simplefb: cannot parse framebuffer %s: error %d\n",
+ name, ret);
+ return ret;
+}
+
+static int
+simplefb_get_width_of(struct drm_device *dev, struct device_node *of_node)
+{
+ u32 width;
+ int ret = simplefb_read_u32_of(dev, of_node, "width", &width);
+
+ if (ret)
+ return ret;
+ return simplefb_get_validated_int0(dev, "width", width);
+}
+
+static int
+simplefb_get_height_of(struct drm_device *dev, struct device_node *of_node)
+{
+ u32 height;
+ int ret = simplefb_read_u32_of(dev, of_node, "height", &height);
+
+ if (ret)
+ return ret;
+ return simplefb_get_validated_int0(dev, "height", height);
+}
+
+static int
+simplefb_get_stride_of(struct drm_device *dev, struct device_node *of_node)
+{
+ u32 stride;
+ int ret = simplefb_read_u32_of(dev, of_node, "stride", &stride);
+
+ if (ret)
+ return ret;
+ return simplefb_get_validated_int(dev, "stride", stride);
+}
+
+static const struct drm_format_info *
+simplefb_get_format_of(struct drm_device *dev, struct device_node *of_node)
+{
+ const char *format;
+ int ret = simplefb_read_string_of(dev, of_node, "format", &format);
+
+ if (ret)
+ return ERR_PTR(ret);
+ return simplefb_get_validated_format(dev, format);
+}
+
+/*
+ * Simple Framebuffer device
+ */
+
+struct simpledrm_device {
+ struct drm_device dev;
+ struct platform_device *pdev;
+
+ /* clocks */
+#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
+ unsigned int clk_count;
+ struct clk **clks;
+#endif
+ /* regulators */
+#if defined CONFIG_OF && defined CONFIG_REGULATOR
+ unsigned int regulator_count;
+ struct regulator **regulators;
+#endif
+
+ /* simplefb settings */
+ struct drm_display_mode mode;
+ const struct drm_format_info *format;
+ unsigned int pitch;
+
+ /* memory management */
+ struct resource *mem;
+ void __iomem *screen_base;
+
+ /* modesetting */
+ uint32_t formats[8];
+ size_t nformats;
+ struct drm_connector connector;
+ struct drm_simple_display_pipe pipe;
+};
+
+static struct simpledrm_device *simpledrm_device_of_dev(struct drm_device *dev)
+{
+ return container_of(dev, struct simpledrm_device, dev);
+}
+
+/*
+ * Hardware
+ */
+
+#if defined CONFIG_OF && defined CONFIG_COMMON_CLK
+/*
+ * Clock handling code.
+ *
+ * Here we handle the clocks property of our "simple-framebuffer" dt node.
+ * This is necessary so that we can make sure that any clocks needed by
+ * the display engine that the bootloader set up for us (and for which it
+ * provided a simplefb dt node), stay up, for the life of the simplefb
+ * driver.
+ *
+ * When the driver unloads, we cleanly disable, and then release the clocks.
+ *
+ * We only complain about errors here, no action is taken as the most likely
+ * error can only happen due to a mismatch between the bootloader which set
+ * up simplefb, and the clock definitions in the device tree. Chances are
+ * that there are no adverse effects, and if there are, a clean teardown of
+ * the fb probe will not help us much either. So just complain and carry on,
+ * and hope that the user actually gets a working fb at the end of things.
+ */
+
+static void simpledrm_device_release_clocks(void *res)
+{
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(res);
+ unsigned int i;
+
+ for (i = 0; i < sdev->clk_count; ++i) {
+ if (sdev->clks[i]) {
+ clk_disable_unprepare(sdev->clks[i]);
+ clk_put(sdev->clks[i]);
+ }
+ }
+}
+
+static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
+{
+ struct drm_device *dev = &sdev->dev;
+ struct platform_device *pdev = sdev->pdev;
+ struct device_node *of_node = pdev->dev.of_node;
+ struct clk *clock;
+ unsigned int i;
+ int ret;
+
+ if (dev_get_platdata(&pdev->dev) || !of_node)
+ return 0;
+
+ sdev->clk_count = of_clk_get_parent_count(of_node);
+ if (!sdev->clk_count)
+ return 0;
+
+ sdev->clks = drmm_kzalloc(dev, sdev->clk_count * sizeof(sdev->clks[0]),
+ GFP_KERNEL);
+ if (!sdev->clks)
+ return -ENOMEM;
+
+ for (i = 0; i < sdev->clk_count; ++i) {
+ clock = of_clk_get(of_node, i);
+ if (IS_ERR(clock)) {
+ ret = PTR_ERR(clock);
+ if (ret == -EPROBE_DEFER)
+ goto err;
+ drm_err(dev, "clock %u not found: %d\n", i, ret);
+ continue;
+ }
+ ret = clk_prepare_enable(clock);
+ if (ret) {
+ drm_err(dev, "failed to enable clock %u: %d\n",
+ i, ret);
+ clk_put(clock);
+ }
+ sdev->clks[i] = clock;
+ }
+
+ return devm_add_action_or_reset(&pdev->dev,
+ simpledrm_device_release_clocks,
+ sdev);
+
+err:
+ while (i) {
+ --i;
+ if (sdev->clks[i]) {
+ clk_disable_unprepare(sdev->clks[i]);
+ clk_put(sdev->clks[i]);
+ }
+ }
+ return ret;
+}
+#else
+static int simpledrm_device_init_clocks(struct simpledrm_device *sdev)
+{
+ return 0;
+}
+#endif
+
+#if defined CONFIG_OF && defined CONFIG_REGULATOR
+
+#define SUPPLY_SUFFIX "-supply"
+
+/*
+ * Regulator handling code.
+ *
+ * Here we handle the num-supplies and vin*-supply properties of our
+ * "simple-framebuffer" dt node. This is necessary so that we can make sure
+ * that any regulators needed by the display hardware that the bootloader
+ * set up for us (and for which it provided a simplefb dt node), stay up,
+ * for the life of the simplefb driver.
+ *
+ * When the driver unloads, we cleanly disable, and then release the
+ * regulators.
+ *
+ * We only complain about errors here, no action is taken as the most likely
+ * error can only happen due to a mismatch between the bootloader which set
+ * up simplefb, and the regulator definitions in the device tree. Chances are
+ * that there are no adverse effects, and if there are, a clean teardown of
+ * the fb probe will not help us much either. So just complain and carry on,
+ * and hope that the user actually gets a working fb at the end of things.
+ */
+
+static void simpledrm_device_release_regulators(void *res)
+{
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(res);
+ unsigned int i;
+
+ for (i = 0; i < sdev->regulator_count; ++i) {
+ if (sdev->regulators[i]) {
+ regulator_disable(sdev->regulators[i]);
+ regulator_put(sdev->regulators[i]);
+ }
+ }
+}
+
+static int simpledrm_device_init_regulators(struct simpledrm_device *sdev)
+{
+ struct drm_device *dev = &sdev->dev;
+ struct platform_device *pdev = sdev->pdev;
+ struct device_node *of_node = pdev->dev.of_node;
+ struct property *prop;
+ struct regulator *regulator;
+ const char *p;
+ unsigned int count = 0, i = 0;
+ int ret;
+
+ if (dev_get_platdata(&pdev->dev) || !of_node)
+ return 0;
+
+ /* Count the number of regulator supplies */
+ for_each_property_of_node(of_node, prop) {
+ p = strstr(prop->name, SUPPLY_SUFFIX);
+ if (p && p != prop->name)
+ ++count;
+ }
+
+ if (!count)
+ return 0;
+
+ sdev->regulators = drmm_kzalloc(dev,
+ count * sizeof(sdev->regulators[0]),
+ GFP_KERNEL);
+ if (!sdev->regulators)
+ return -ENOMEM;
+
+ for_each_property_of_node(of_node, prop) {
+ char name[32]; /* 32 is max size of property name */
+ size_t len;
+
+ p = strstr(prop->name, SUPPLY_SUFFIX);
+ if (!p || p == prop->name)
+ continue;
+ len = strlen(prop->name) - strlen(SUPPLY_SUFFIX) + 1;
+ strscpy(name, prop->name, min(sizeof(name), len));
+
+ regulator = regulator_get_optional(&pdev->dev, name);
+ if (IS_ERR(regulator)) {
+ ret = PTR_ERR(regulator);
+ if (ret == -EPROBE_DEFER)
+ goto err;
+ drm_err(dev, "regulator %s not found: %d\n",
+ name, ret);
+ continue;
+ }
+
+ ret = regulator_enable(regulator);
+ if (ret) {
+ drm_err(dev, "failed to enable regulator %u: %d\n",
+ i, ret);
+ regulator_put(regulator);
+ }
+
+ sdev->regulators[i++] = regulator;
+ }
+ sdev->regulator_count = i;
+
+ return devm_add_action_or_reset(&pdev->dev,
+ simpledrm_device_release_regulators,
+ sdev);
+
+err:
+ while (i) {
+ --i;
+ if (sdev->regulators[i]) {
+ regulator_disable(sdev->regulators[i]);
+ regulator_put(sdev->regulators[i]);
+ }
+ }
+ return ret;
+}
+#else
+static int simpledrm_device_init_regulators(struct simpledrm_device *sdev)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Simplefb settings
+ */
+
+static struct drm_display_mode simpledrm_mode(unsigned int width,
+ unsigned int height)
+{
+ struct drm_display_mode mode = { SIMPLEDRM_MODE(width, height) };
+
+ mode.clock = 60 /* Hz */ * mode.hdisplay * mode.vdisplay;
+ drm_mode_set_name(&mode);
+
+ return mode;
+}
+
+static int simpledrm_device_init_fb(struct simpledrm_device *sdev)
+{
+ int width, height, stride;
+ const struct drm_format_info *format;
+ struct drm_format_name_buf buf;
+ struct drm_device *dev = &sdev->dev;
+ struct platform_device *pdev = sdev->pdev;
+ const struct simplefb_platform_data *pd = dev_get_platdata(&pdev->dev);
+ struct device_node *of_node = pdev->dev.of_node;
+
+ if (pd) {
+ width = simplefb_get_width_pd(dev, pd);
+ if (width < 0)
+ return width;
+ height = simplefb_get_height_pd(dev, pd);
+ if (height < 0)
+ return height;
+ stride = simplefb_get_stride_pd(dev, pd);
+ if (stride < 0)
+ return stride;
+ format = simplefb_get_format_pd(dev, pd);
+ if (IS_ERR(format))
+ return PTR_ERR(format);
+ } else if (of_node) {
+ width = simplefb_get_width_of(dev, of_node);
+ if (width < 0)
+ return width;
+ height = simplefb_get_height_of(dev, of_node);
+ if (height < 0)
+ return height;
+ stride = simplefb_get_stride_of(dev, of_node);
+ if (stride < 0)
+ return stride;
+ format = simplefb_get_format_of(dev, of_node);
+ if (IS_ERR(format))
+ return PTR_ERR(format);
+ } else {
+ drm_err(dev, "no simplefb configuration found\n");
+ return -ENODEV;
+ }
+
+ sdev->mode = simpledrm_mode(width, height);
+ sdev->format = format;
+ sdev->pitch = stride;
+
+ drm_dbg_kms(dev, "display mode={" DRM_MODE_FMT "}\n",
+ DRM_MODE_ARG(&sdev->mode));
+ drm_dbg_kms(dev,
+ "framebuffer format=\"%s\", size=%dx%d, stride=%d byte\n",
+ drm_get_format_name(format->format, &buf), width,
+ height, stride);
+
+ return 0;
+}
+
+/*
+ * Memory management
+ */
+
+static int simpledrm_device_init_mm(struct simpledrm_device *sdev)
+{
+ struct drm_device *dev = &sdev->dev;
+ struct platform_device *pdev = sdev->pdev;
+ struct resource *mem;
+ void __iomem *screen_base;
+ int ret;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -EINVAL;
+
+ ret = devm_aperture_acquire_from_firmware(dev, mem->start, resource_size(mem));
+ if (ret) {
+ drm_err(dev, "could not acquire memory range [0x%llx:0x%llx]: error %d\n",
+ mem->start, mem->end, ret);
+ return ret;
+ }
+
+ screen_base = devm_ioremap_wc(&pdev->dev, mem->start,
+ resource_size(mem));
+ if (!screen_base)
+ return -ENOMEM;
+
+ sdev->mem = mem;
+ sdev->screen_base = screen_base;
+
+ return 0;
+}
+
+/*
+ * Modesetting
+ */
+
+/*
+ * Support all formats of simplefb and maybe more; in order
+ * of preference. The display's update function will do any
+ * conversion necessary.
+ *
+ * TODO: Add blit helpers for remaining formats and uncomment
+ * constants.
+ */
+static const uint32_t simpledrm_default_formats[] = {
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_RGB565,
+ //DRM_FORMAT_XRGB1555,
+ //DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_RGB888,
+ //DRM_FORMAT_XRGB2101010,
+ //DRM_FORMAT_ARGB2101010,
+};
+
+static const uint64_t simpledrm_format_modifiers[] = {
+ DRM_FORMAT_MOD_LINEAR,
+ DRM_FORMAT_MOD_INVALID
+};
+
+static int simpledrm_connector_helper_get_modes(struct drm_connector *connector)
+{
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(connector->dev);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &sdev->mode);
+ if (!mode)
+ return 0;
+
+ if (mode->name[0] == '\0')
+ drm_mode_set_name(mode);
+
+ mode->type |= DRM_MODE_TYPE_PREFERRED;
+ drm_mode_probed_add(connector, mode);
+
+ if (mode->width_mm)
+ connector->display_info.width_mm = mode->width_mm;
+ if (mode->height_mm)
+ connector->display_info.height_mm = mode->height_mm;
+
+ return 1;
+}
+
+static const struct drm_connector_helper_funcs simpledrm_connector_helper_funcs = {
+ .get_modes = simpledrm_connector_helper_get_modes,
+};
+
+static const struct drm_connector_funcs simpledrm_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int
+simpledrm_simple_display_pipe_mode_valid(struct drm_simple_display_pipe *pipe,
+ const struct drm_display_mode *mode)
+{
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(pipe->crtc.dev);
+
+ if (mode->hdisplay != sdev->mode.hdisplay &&
+ mode->vdisplay != sdev->mode.vdisplay)
+ return MODE_ONE_SIZE;
+ else if (mode->hdisplay != sdev->mode.hdisplay)
+ return MODE_ONE_WIDTH;
+ else if (mode->vdisplay != sdev->mode.vdisplay)
+ return MODE_ONE_HEIGHT;
+
+ return MODE_OK;
+}
+
+static void
+simpledrm_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(pipe->crtc.dev);
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ struct drm_framebuffer *fb = plane_state->fb;
+ void *vmap = shadow_plane_state->map[0].vaddr; /* TODO: Use mapping abstraction properly */
+ struct drm_device *dev = &sdev->dev;
+ int idx;
+
+ if (!fb)
+ return;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
+ drm_fb_blit_dstclip(sdev->screen_base, sdev->pitch,
+ sdev->format->format, vmap, fb);
+ drm_dev_exit(idx);
+}
+
+static void
+simpledrm_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
+{
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(pipe->crtc.dev);
+ struct drm_device *dev = &sdev->dev;
+ int idx;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
+ /* Clear screen to black if disabled */
+ memset_io(sdev->screen_base, 0, sdev->pitch * sdev->mode.vdisplay);
+
+ drm_dev_exit(idx);
+}
+
+static void
+simpledrm_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_plane_state)
+{
+ struct simpledrm_device *sdev = simpledrm_device_of_dev(pipe->crtc.dev);
+ struct drm_plane_state *plane_state = pipe->plane.state;
+ struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state);
+ void *vmap = shadow_plane_state->map[0].vaddr; /* TODO: Use mapping abstraction properly */
+ struct drm_framebuffer *fb = plane_state->fb;
+ struct drm_device *dev = &sdev->dev;
+ struct drm_rect clip;
+ int idx;
+
+ if (!fb)
+ return;
+
+ if (!drm_atomic_helper_damage_merged(old_plane_state, plane_state, &clip))
+ return;
+
+ if (!drm_dev_enter(dev, &idx))
+ return;
+
+ drm_fb_blit_rect_dstclip(sdev->screen_base, sdev->pitch,
+ sdev->format->format, vmap, fb, &clip);
+
+ drm_dev_exit(idx);
+}
+
+static const struct drm_simple_display_pipe_funcs
+simpledrm_simple_display_pipe_funcs = {
+ .mode_valid = simpledrm_simple_display_pipe_mode_valid,
+ .enable = simpledrm_simple_display_pipe_enable,
+ .disable = simpledrm_simple_display_pipe_disable,
+ .update = simpledrm_simple_display_pipe_update,
+ DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS,
+};
+
+static const struct drm_mode_config_funcs simpledrm_mode_config_funcs = {
+ .fb_create = drm_gem_fb_create_with_dirty,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static const uint32_t *simpledrm_device_formats(struct simpledrm_device *sdev,
+ size_t *nformats_out)
+{
+ struct drm_device *dev = &sdev->dev;
+ size_t i;
+
+ if (sdev->nformats)
+ goto out; /* don't rebuild list on recurring calls */
+
+ /* native format goes first */
+ sdev->formats[0] = sdev->format->format;
+ sdev->nformats = 1;
+
+ /* default formats go second */
+ for (i = 0; i < ARRAY_SIZE(simpledrm_default_formats); ++i) {
+ if (simpledrm_default_formats[i] == sdev->format->format)
+ continue; /* native format already went first */
+ sdev->formats[sdev->nformats] = simpledrm_default_formats[i];
+ sdev->nformats++;
+ }
+
+ /*
+ * TODO: The simpledrm driver converts framebuffers to the native
+ * format when copying them to device memory. If there are more
+ * formats listed than supported by the driver, the native format
+ * is not supported by the conversion helpers. Therefore *only*
+ * support the native format and add a conversion helper ASAP.
+ */
+ if (drm_WARN_ONCE(dev, i != sdev->nformats,
+ "format conversion helpers required for %p4cc",
+ &sdev->format->format)) {
+ sdev->nformats = 1;
+ }
+
+out:
+ *nformats_out = sdev->nformats;
+ return sdev->formats;
+}
+
+static int simpledrm_device_init_modeset(struct simpledrm_device *sdev)
+{
+ struct drm_device *dev = &sdev->dev;
+ struct drm_display_mode *mode = &sdev->mode;
+ struct drm_connector *connector = &sdev->connector;
+ struct drm_simple_display_pipe *pipe = &sdev->pipe;
+ const uint32_t *formats;
+ size_t nformats;
+ int ret;
+
+ ret = drmm_mode_config_init(dev);
+ if (ret)
+ return ret;
+
+ dev->mode_config.min_width = mode->hdisplay;
+ dev->mode_config.max_width = mode->hdisplay;
+ dev->mode_config.min_height = mode->vdisplay;
+ dev->mode_config.max_height = mode->vdisplay;
+ dev->mode_config.prefer_shadow_fbdev = true;
+ dev->mode_config.preferred_depth = sdev->format->cpp[0] * 8;
+ dev->mode_config.funcs = &simpledrm_mode_config_funcs;
+
+ ret = drm_connector_init(dev, connector, &simpledrm_connector_funcs,
+ DRM_MODE_CONNECTOR_Unknown);
+ if (ret)
+ return ret;
+ drm_connector_helper_add(connector, &simpledrm_connector_helper_funcs);
+
+ formats = simpledrm_device_formats(sdev, &nformats);
+
+ ret = drm_simple_display_pipe_init(dev, pipe, &simpledrm_simple_display_pipe_funcs,
+ formats, nformats, simpledrm_format_modifiers,
+ connector);
+ if (ret)
+ return ret;
+
+ drm_mode_config_reset(dev);
+
+ return 0;
+}
+
+/*
+ * Init / Cleanup
+ */
+
+static struct simpledrm_device *
+simpledrm_device_create(struct drm_driver *drv, struct platform_device *pdev)
+{
+ struct simpledrm_device *sdev;
+ int ret;
+
+ sdev = devm_drm_dev_alloc(&pdev->dev, drv, struct simpledrm_device,
+ dev);
+ if (IS_ERR(sdev))
+ return ERR_CAST(sdev);
+ sdev->pdev = pdev;
+ platform_set_drvdata(pdev, sdev);
+
+ ret = simpledrm_device_init_clocks(sdev);
+ if (ret)
+ return ERR_PTR(ret);
+ ret = simpledrm_device_init_regulators(sdev);
+ if (ret)
+ return ERR_PTR(ret);
+ ret = simpledrm_device_init_fb(sdev);
+ if (ret)
+ return ERR_PTR(ret);
+ ret = simpledrm_device_init_mm(sdev);
+ if (ret)
+ return ERR_PTR(ret);
+ ret = simpledrm_device_init_modeset(sdev);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return sdev;
+}
+
+/*
+ * DRM driver
+ */
+
+DEFINE_DRM_GEM_FOPS(simpledrm_fops);
+
+static struct drm_driver simpledrm_driver = {
+ DRM_GEM_SHMEM_DRIVER_OPS,
+ .name = DRIVER_NAME,
+ .desc = DRIVER_DESC,
+ .date = DRIVER_DATE,
+ .major = DRIVER_MAJOR,
+ .minor = DRIVER_MINOR,
+ .driver_features = DRIVER_ATOMIC | DRIVER_GEM | DRIVER_MODESET,
+ .fops = &simpledrm_fops,
+};
+
+/*
+ * Platform driver
+ */
+
+static int simpledrm_probe(struct platform_device *pdev)
+{
+ struct simpledrm_device *sdev;
+ struct drm_device *dev;
+ int ret;
+
+ sdev = simpledrm_device_create(&simpledrm_driver, pdev);
+ if (IS_ERR(sdev))
+ return PTR_ERR(sdev);
+ dev = &sdev->dev;
+
+ ret = drm_dev_register(dev, 0);
+ if (ret)
+ return ret;
+
+ drm_fbdev_generic_setup(dev, 0);
+
+ return 0;
+}
+
+static int simpledrm_remove(struct platform_device *pdev)
+{
+ struct simpledrm_device *sdev = platform_get_drvdata(pdev);
+ struct drm_device *dev = &sdev->dev;
+
+ drm_dev_unplug(dev);
+
+ return 0;
+}
+
+static const struct of_device_id simpledrm_of_match_table[] = {
+ { .compatible = "simple-framebuffer", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, simpledrm_of_match_table);
+
+static struct platform_driver simpledrm_platform_driver = {
+ .driver = {
+ .name = "simple-framebuffer", /* connect to sysfb */
+ .of_match_table = simpledrm_of_match_table,
+ },
+ .probe = simpledrm_probe,
+ .remove = simpledrm_remove,
+};
+
+module_platform_driver(simpledrm_platform_driver);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 7d216fe9267f..05db980cc047 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -323,16 +323,12 @@ static int st7586_probe(struct spi_device *spi)
bufsize = (st7586_mode.vdisplay + 2) / 3 * st7586_mode.hdisplay;
dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(dbi->reset)) {
- DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
- return PTR_ERR(dbi->reset);
- }
+ if (IS_ERR(dbi->reset))
+ return dev_err_probe(dev, PTR_ERR(dbi->reset), "Failed to get GPIO 'reset'\n");
a0 = devm_gpiod_get(dev, "a0", GPIOD_OUT_LOW);
- if (IS_ERR(a0)) {
- DRM_DEV_ERROR(dev, "Failed to get gpio 'a0'\n");
- return PTR_ERR(a0);
- }
+ if (IS_ERR(a0))
+ return dev_err_probe(dev, PTR_ERR(a0), "Failed to get GPIO 'a0'\n");
device_property_read_u32(dev, "rotation", &rotation);
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index df8872d62cdd..e8b7815d8cae 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -207,16 +207,12 @@ static int st7735r_probe(struct spi_device *spi)
drm = &dbidev->drm;
dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
- if (IS_ERR(dbi->reset)) {
- DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
- return PTR_ERR(dbi->reset);
- }
+ if (IS_ERR(dbi->reset))
+ return dev_err_probe(dev, PTR_ERR(dbi->reset), "Failed to get GPIO 'reset'\n");
dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
- if (IS_ERR(dc)) {
- DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
- return PTR_ERR(dc);
- }
+ if (IS_ERR(dc))
+ return dev_err_probe(dev, PTR_ERR(dc), "Failed to get GPIO 'dc'\n");
dbidev->backlight = devm_of_find_backlight(dev);
if (IS_ERR(dbidev->backlight))
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
index 40e5e9da7953..f906b22959cf 100644
--- a/drivers/gpu/drm/ttm/Makefile
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -4,7 +4,7 @@
ttm-y := ttm_tt.o ttm_bo.o ttm_bo_util.o ttm_bo_vm.o ttm_module.o \
ttm_execbuf_util.o ttm_range_manager.o ttm_resource.o ttm_pool.o \
- ttm_device.o
+ ttm_device.o ttm_sys_manager.o
ttm-$(CONFIG_AGP) += ttm_agp_backend.o
obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 0226ae69d3ab..6ddc16f0fe2b 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -32,8 +32,9 @@
#define pr_fmt(fmt) "[TTM] " fmt
-#include <drm/ttm/ttm_bo_driver.h>
-#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_tt.h>
+#include <drm/ttm/ttm_resource.h>
#include <linux/agp_backend.h>
#include <linux/module.h>
#include <linux/slab.h>
@@ -50,7 +51,6 @@ int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
{
struct ttm_agp_backend *agp_be = container_of(ttm, struct ttm_agp_backend, ttm);
struct page *dummy_read_page = ttm_glob.dummy_read_page;
- struct drm_mm_node *node = bo_mem->mm_node;
struct agp_memory *mem;
int ret, cached = ttm->caching == ttm_cached;
unsigned i;
@@ -76,7 +76,7 @@ int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem)
mem->is_flushed = 1;
mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
- ret = agp_bind_memory(mem, node->start);
+ ret = agp_bind_memory(mem, bo_mem->start);
if (ret)
pr_err("AGP Bind memory failed\n");
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index cfd0b9292397..ca1b098b6a56 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -274,7 +274,7 @@ static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
}
/**
- * function ttm_bo_cleanup_refs
+ * ttm_bo_cleanup_refs
* If bo idle, remove from lru lists, and unref.
* If not idle, block if possible.
*
@@ -401,6 +401,8 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_device *bdev = bo->bdev;
int ret;
+ WARN_ON_ONCE(bo->pin_count);
+
if (!bo->deleted) {
ret = ttm_bo_individualize_resv(bo);
if (ret) {
@@ -434,7 +436,7 @@ static void ttm_bo_release(struct kref *kref)
* FIXME: QXL is triggering this. Can be removed when the
* driver is fixed.
*/
- if (WARN_ON_ONCE(bo->pin_count)) {
+ if (bo->pin_count) {
bo->pin_count = 0;
ttm_bo_move_to_lru_tail(bo, &bo->mem, NULL);
}
@@ -458,8 +460,6 @@ static void ttm_bo_release(struct kref *kref)
atomic_dec(&ttm_glob.bo_count);
dma_fence_put(bo->moving);
- if (!ttm_bo_uses_embedded_gem_object(bo))
- dma_resv_fini(&bo->base._resv);
bo->destroy(bo);
}
@@ -507,11 +507,6 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo,
return ttm_tt_create(bo, false);
}
- evict_mem = bo->mem;
- evict_mem.mm_node = NULL;
- evict_mem.bus.offset = 0;
- evict_mem.bus.addr = NULL;
-
ret = ttm_bo_mem_space(bo, &placement, &evict_mem, ctx);
if (ret) {
if (ret != -ERESTARTSYS) {
@@ -867,12 +862,8 @@ static int ttm_bo_bounce_temp_buffer(struct ttm_buffer_object *bo,
struct ttm_place *hop)
{
struct ttm_placement hop_placement;
+ struct ttm_resource hop_mem;
int ret;
- struct ttm_resource hop_mem = *mem;
-
- hop_mem.mm_node = NULL;
- hop_mem.mem_type = TTM_PL_SYSTEM;
- hop_mem.placement = 0;
hop_placement.num_placement = hop_placement.num_busy_placement = 1;
hop_placement.placement = hop_placement.busy_placement = hop;
@@ -894,20 +885,14 @@ static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
struct ttm_operation_ctx *ctx)
{
- int ret = 0;
struct ttm_place hop;
struct ttm_resource mem;
+ int ret;
dma_resv_assert_held(bo->base.resv);
memset(&hop, 0, sizeof(hop));
- mem.num_pages = PAGE_ALIGN(bo->base.size) >> PAGE_SHIFT;
- mem.page_alignment = bo->mem.page_alignment;
- mem.bus.offset = 0;
- mem.bus.addr = NULL;
- mem.mm_node = NULL;
-
/*
* Determine where to move the buffer.
*
@@ -1028,6 +1013,7 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
struct dma_resv *resv,
void (*destroy) (struct ttm_buffer_object *))
{
+ static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
bool locked;
int ret = 0;
@@ -1038,14 +1024,9 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
INIT_LIST_HEAD(&bo->ddestroy);
bo->bdev = bdev;
bo->type = type;
- bo->mem.mem_type = TTM_PL_SYSTEM;
- bo->mem.num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- bo->mem.mm_node = NULL;
- bo->mem.page_alignment = page_alignment;
- bo->mem.bus.offset = 0;
- bo->mem.bus.addr = NULL;
+ bo->page_alignment = page_alignment;
+ ttm_resource_alloc(bo, &sys_mem, &bo->mem);
bo->moving = NULL;
- bo->mem.placement = 0;
bo->pin_count = 0;
bo->sg = sg;
if (resv) {
@@ -1054,15 +1035,6 @@ int ttm_bo_init_reserved(struct ttm_device *bdev,
} else {
bo->base.resv = &bo->base._resv;
}
- if (!ttm_bo_uses_embedded_gem_object(bo)) {
- /*
- * bo.base is not initialized, so we have to setup the
- * struct elements we want use regardless.
- */
- bo->base.size = size;
- dma_resv_init(&bo->base._resv);
- drm_vma_node_reset(&bo->base.vma_node);
- }
atomic_inc(&ttm_glob.bo_count);
/*
@@ -1194,14 +1166,16 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
if (bo->mem.mem_type != TTM_PL_SYSTEM) {
struct ttm_operation_ctx ctx = { false, false };
struct ttm_resource evict_mem;
- struct ttm_place hop;
+ struct ttm_place place, hop;
+ memset(&place, 0, sizeof(place));
memset(&hop, 0, sizeof(hop));
- evict_mem = bo->mem;
- evict_mem.mm_node = NULL;
- evict_mem.placement = 0;
- evict_mem.mem_type = TTM_PL_SYSTEM;
+ place.mem_type = TTM_PL_SYSTEM;
+
+ ret = ttm_resource_alloc(bo, &place, &evict_mem);
+ if (unlikely(ret))
+ goto out;
ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx, &hop);
if (unlikely(ret != 0)) {
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index efb7e9c34ab4..ae8b61460724 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -664,6 +664,7 @@ EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);
int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
{
+ static const struct ttm_place sys_mem = { .mem_type = TTM_PL_SYSTEM };
struct ttm_buffer_object *ghost;
int ret;
@@ -676,8 +677,7 @@ int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo)
if (ret)
ttm_bo_wait(bo, false, false);
- memset(&bo->mem, 0, sizeof(bo->mem));
- bo->mem.mem_type = TTM_PL_SYSTEM;
+ ttm_resource_alloc(bo, &sys_mem, &bo->mem);
bo->ttm = NULL;
dma_resv_unlock(&ghost->base._resv);
diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c
index 510e3e001dab..460953dcad11 100644
--- a/drivers/gpu/drm/ttm/ttm_device.c
+++ b/drivers/gpu/drm/ttm/ttm_device.c
@@ -36,11 +36,11 @@
#include "ttm_module.h"
-/**
+/*
* ttm_global_mutex - protecting the global state
*/
-DEFINE_MUTEX(ttm_global_mutex);
-unsigned ttm_glob_use_count;
+static DEFINE_MUTEX(ttm_global_mutex);
+static unsigned ttm_glob_use_count;
struct ttm_global ttm_glob;
EXPORT_SYMBOL(ttm_glob);
@@ -104,7 +104,7 @@ out:
return ret;
}
-/**
+/*
* A buffer object shrink method that tries to swap out the first
* buffer object on the global::swap_lru list.
*/
@@ -165,21 +165,6 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx,
}
EXPORT_SYMBOL(ttm_device_swapout);
-static void ttm_init_sysman(struct ttm_device *bdev)
-{
- struct ttm_resource_manager *man = &bdev->sysman;
-
- /*
- * Initialize the system memory buffer type.
- * Other types need to be driver / IOCTL initialized.
- */
- man->use_tt = true;
-
- ttm_resource_manager_init(man, 0);
- ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
- ttm_resource_manager_set_used(man, true);
-}
-
static void ttm_device_delayed_workqueue(struct work_struct *work)
{
struct ttm_device *bdev =
@@ -222,7 +207,7 @@ int ttm_device_init(struct ttm_device *bdev, struct ttm_device_funcs *funcs,
bdev->funcs = funcs;
- ttm_init_sysman(bdev);
+ ttm_sys_man_init(bdev);
ttm_pool_init(&bdev->pool, dev, use_dma_alloc, use_dma32);
bdev->vma_manager = vma_manager;
diff --git a/drivers/gpu/drm/ttm/ttm_module.h b/drivers/gpu/drm/ttm/ttm_module.h
index d7cac5d4b835..767fe22aed48 100644
--- a/drivers/gpu/drm/ttm/ttm_module.h
+++ b/drivers/gpu/drm/ttm/ttm_module.h
@@ -34,7 +34,10 @@
#define TTM_PFX "[TTM] "
struct dentry;
+struct ttm_device;
extern struct dentry *ttm_debugfs_root;
+void ttm_sys_man_init(struct ttm_device *bdev);
+
#endif /* _TTM_MODULE_H_ */
diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c
index 707e5c152896..b9d5da6e6a81 100644
--- a/drivers/gpu/drm/ttm/ttm_range_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_range_manager.c
@@ -48,7 +48,8 @@ struct ttm_range_manager {
spinlock_t lock;
};
-static inline struct ttm_range_manager *to_range_manager(struct ttm_resource_manager *man)
+static inline struct ttm_range_manager *
+to_range_manager(struct ttm_resource_manager *man)
{
return container_of(man, struct ttm_range_manager, manager);
}
@@ -78,9 +79,8 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man,
mode = DRM_MM_INSERT_HIGH;
spin_lock(&rman->lock);
- ret = drm_mm_insert_node_in_range(mm, node,
- mem->num_pages,
- mem->page_alignment, 0,
+ ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
+ bo->page_alignment, 0,
place->fpfn, lpfn, mode);
spin_unlock(&rman->lock);
@@ -109,7 +109,21 @@ static void ttm_range_man_free(struct ttm_resource_manager *man,
}
}
-static const struct ttm_resource_manager_func ttm_range_manager_func;
+static void ttm_range_man_debug(struct ttm_resource_manager *man,
+ struct drm_printer *printer)
+{
+ struct ttm_range_manager *rman = to_range_manager(man);
+
+ spin_lock(&rman->lock);
+ drm_mm_print(&rman->mm, printer);
+ spin_unlock(&rman->lock);
+}
+
+static const struct ttm_resource_manager_func ttm_range_manager_func = {
+ .alloc = ttm_range_man_alloc,
+ .free = ttm_range_man_free,
+ .debug = ttm_range_man_debug
+};
int ttm_range_man_init(struct ttm_device *bdev,
unsigned type, bool use_tt,
@@ -163,19 +177,3 @@ int ttm_range_man_fini(struct ttm_device *bdev,
return 0;
}
EXPORT_SYMBOL(ttm_range_man_fini);
-
-static void ttm_range_man_debug(struct ttm_resource_manager *man,
- struct drm_printer *printer)
-{
- struct ttm_range_manager *rman = to_range_manager(man);
-
- spin_lock(&rman->lock);
- drm_mm_print(&rman->mm, printer);
- spin_unlock(&rman->lock);
-}
-
-static const struct ttm_resource_manager_func ttm_range_manager_func = {
- .alloc = ttm_range_man_alloc,
- .free = ttm_range_man_free,
- .debug = ttm_range_man_debug
-};
diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c
index 04f2eef653ab..59e2b7157e41 100644
--- a/drivers/gpu/drm/ttm/ttm_resource.c
+++ b/drivers/gpu/drm/ttm/ttm_resource.c
@@ -30,11 +30,17 @@ int ttm_resource_alloc(struct ttm_buffer_object *bo,
struct ttm_resource *res)
{
struct ttm_resource_manager *man =
- ttm_manager_type(bo->bdev, res->mem_type);
+ ttm_manager_type(bo->bdev, place->mem_type);
res->mm_node = NULL;
- if (!man->func || !man->func->alloc)
- return 0;
+ res->start = 0;
+ res->num_pages = PFN_UP(bo->base.size);
+ res->mem_type = place->mem_type;
+ res->placement = place->flags;
+ res->bus.addr = NULL;
+ res->bus.offset = 0;
+ res->bus.is_iomem = false;
+ res->bus.caching = ttm_cached;
return man->func->alloc(man, bo, place, res);
}
@@ -44,9 +50,7 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res)
struct ttm_resource_manager *man =
ttm_manager_type(bo->bdev, res->mem_type);
- if (man->func && man->func->free)
- man->func->free(man, res);
-
+ man->func->free(man, res);
res->mm_node = NULL;
res->mem_type = TTM_PL_SYSTEM;
}
@@ -139,7 +143,7 @@ void ttm_resource_manager_debug(struct ttm_resource_manager *man,
drm_printf(p, " use_type: %d\n", man->use_type);
drm_printf(p, " use_tt: %d\n", man->use_tt);
drm_printf(p, " size: %llu\n", man->size);
- if (man->func && man->func->debug)
- (*man->func->debug)(man, p);
+ if (man->func->debug)
+ man->func->debug(man, p);
}
EXPORT_SYMBOL(ttm_resource_manager_debug);
diff --git a/drivers/gpu/drm/ttm/ttm_sys_manager.c b/drivers/gpu/drm/ttm/ttm_sys_manager.c
new file mode 100644
index 000000000000..474221e863d0
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_sys_manager.c
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#include <drm/ttm/ttm_resource.h>
+#include <drm/ttm/ttm_device.h>
+#include <drm/ttm/ttm_placement.h>
+
+#include "ttm_module.h"
+
+static int ttm_sys_man_alloc(struct ttm_resource_manager *man,
+ struct ttm_buffer_object *bo,
+ const struct ttm_place *place,
+ struct ttm_resource *mem)
+{
+ return 0;
+}
+
+static void ttm_sys_man_free(struct ttm_resource_manager *man,
+ struct ttm_resource *mem)
+{
+}
+
+static const struct ttm_resource_manager_func ttm_sys_manager_func = {
+ .alloc = ttm_sys_man_alloc,
+ .free = ttm_sys_man_free,
+};
+
+void ttm_sys_man_init(struct ttm_device *bdev)
+{
+ struct ttm_resource_manager *man = &bdev->sysman;
+
+ /*
+ * Initialize the system memory buffer type.
+ * Other types need to be driver / IOCTL initialized.
+ */
+ man->use_tt = true;
+ man->func = &ttm_sys_manager_func;
+
+ ttm_resource_manager_init(man, 0);
+ ttm_set_driver_manager(bdev, TTM_PL_SYSTEM, man);
+ ttm_resource_manager_set_used(man, true);
+}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a1a25410ec74..539e0232cb3b 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -400,6 +400,21 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED;
}
+#ifdef CONFIG_DEBUG_FS
+
+/* Test the shrinker functions and dump the result */
+static int ttm_tt_debugfs_shrink_show(struct seq_file *m, void *data)
+{
+ struct ttm_operation_ctx ctx = { false, false };
+
+ seq_printf(m, "%d\n", ttm_global_swapout(&ctx, GFP_KERNEL));
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(ttm_tt_debugfs_shrink);
+
+#endif
+
+
/**
* ttm_tt_mgr_init - register with the MM shrinker
*
@@ -407,6 +422,11 @@ void ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm)
*/
void ttm_tt_mgr_init(unsigned long num_pages, unsigned long num_dma32_pages)
{
+#ifdef CONFIG_DEBUG_FS
+ debugfs_create_file("tt_shrink", 0400, ttm_debugfs_root, NULL,
+ &ttm_tt_debugfs_shrink_fops);
+#endif
+
if (!ttm_pages_limit)
ttm_pages_limit = num_pages;
diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c
index e534896b6cfd..6d4b32da9866 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_drv.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c
@@ -12,6 +12,7 @@
#include <linux/pci.h>
#include <linux/vt_kern.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
@@ -42,7 +43,7 @@ static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!vbox_check_supported(VBE_DISPI_ID_HGSMI))
return -ENODEV;
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "vboxvideodrmfb");
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "vboxvideodrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 556ad0f02a0d..9eff45b48869 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -30,6 +30,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_cma_helper.h>
@@ -56,10 +57,8 @@ void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index)
res = platform_get_resource(dev, IORESOURCE_MEM, index);
map = devm_ioremap_resource(&dev->dev, res);
- if (IS_ERR(map)) {
- DRM_ERROR("Failed to map registers: %ld\n", PTR_ERR(map));
+ if (IS_ERR(map))
return map;
- }
return map;
}
@@ -266,7 +265,9 @@ static int vc4_drm_bind(struct device *dev)
if (ret)
goto unbind_all;
- drm_fb_helper_remove_conflicting_framebuffers(NULL, "vc4drmfb", false);
+ ret = drm_aperture_remove_framebuffers(false, "vc4drmfb");
+ if (ret)
+ goto unbind_all;
ret = vc4_kms_load(drm);
if (ret < 0)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index a7500716cf3f..5dceadc61600 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -825,7 +825,7 @@ void vc4_crtc_destroy_state(struct drm_crtc *crtc,
void vc4_crtc_reset(struct drm_crtc *crtc);
void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
void vc4_crtc_get_margins(struct drm_crtc_state *state,
- unsigned int *right, unsigned int *left,
+ unsigned int *left, unsigned int *right,
unsigned int *top, unsigned int *bottom);
/* vc4_debugfs.c */
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c
index 1fda574579af..c27b287d2053 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.c
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.c
@@ -214,6 +214,32 @@ static int vc4_hdmi_connector_get_modes(struct drm_connector *connector)
return ret;
}
+static int vc4_hdmi_connector_atomic_check(struct drm_connector *connector,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector_state *old_state =
+ drm_atomic_get_old_connector_state(state, connector);
+ struct drm_connector_state *new_state =
+ drm_atomic_get_new_connector_state(state, connector);
+ struct drm_crtc *crtc = new_state->crtc;
+
+ if (!crtc)
+ return 0;
+
+ if (old_state->colorspace != new_state->colorspace ||
+ !drm_connector_atomic_hdr_metadata_equal(old_state, new_state)) {
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ crtc_state->mode_changed = true;
+ }
+
+ return 0;
+}
+
static void vc4_hdmi_connector_reset(struct drm_connector *connector)
{
struct vc4_hdmi_connector_state *old_state =
@@ -263,6 +289,7 @@ static const struct drm_connector_funcs vc4_hdmi_connector_funcs = {
static const struct drm_connector_helper_funcs vc4_hdmi_connector_helper_funcs = {
.get_modes = vc4_hdmi_connector_get_modes,
+ .atomic_check = vc4_hdmi_connector_atomic_check,
};
static int vc4_hdmi_connector_init(struct drm_device *dev,
@@ -290,6 +317,11 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
if (ret)
return ret;
+ ret = drm_mode_create_hdmi_colorspace_property(connector);
+ if (ret)
+ return ret;
+
+ drm_connector_attach_colorspace_property(connector);
drm_connector_attach_tv_margin_properties(connector);
drm_connector_attach_max_bpc_property(connector, 8, 12);
@@ -299,6 +331,9 @@ static int vc4_hdmi_connector_init(struct drm_device *dev,
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
+ if (vc4_hdmi->variant->supports_hdr)
+ drm_connector_attach_hdr_output_metadata_property(connector);
+
drm_connector_attach_encoder(connector, encoder);
return 0;
@@ -395,7 +430,7 @@ static void vc4_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
vc4_encoder->limited_rgb_range ?
HDMI_QUANTIZATION_RANGE_LIMITED :
HDMI_QUANTIZATION_RANGE_FULL);
-
+ drm_hdmi_avi_infoframe_colorspace(&frame.avi, cstate);
drm_hdmi_avi_infoframe_bars(&frame.avi, cstate);
vc4_hdmi_write_infoframe(encoder, &frame);
@@ -432,6 +467,25 @@ static void vc4_hdmi_set_audio_infoframe(struct drm_encoder *encoder)
vc4_hdmi_write_infoframe(encoder, &frame);
}
+static void vc4_hdmi_set_hdr_infoframe(struct drm_encoder *encoder)
+{
+ struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
+ struct drm_connector *connector = &vc4_hdmi->connector;
+ struct drm_connector_state *conn_state = connector->state;
+ union hdmi_infoframe frame;
+
+ if (!vc4_hdmi->variant->supports_hdr)
+ return;
+
+ if (!conn_state->hdr_output_metadata)
+ return;
+
+ if (drm_hdmi_infoframe_set_hdr_metadata(&frame.drm, conn_state))
+ return;
+
+ vc4_hdmi_write_infoframe(encoder, &frame);
+}
+
static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder)
{
struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder);
@@ -444,6 +498,8 @@ static void vc4_hdmi_set_infoframes(struct drm_encoder *encoder)
*/
if (vc4_hdmi->audio.streaming)
vc4_hdmi_set_audio_infoframe(encoder);
+
+ vc4_hdmi_set_hdr_infoframe(encoder);
}
static void vc4_hdmi_encoder_post_crtc_disable(struct drm_encoder *encoder,
@@ -2102,6 +2158,7 @@ static const struct vc4_hdmi_variant bcm2835_variant = {
.phy_rng_enable = vc4_hdmi_phy_rng_enable,
.phy_rng_disable = vc4_hdmi_phy_rng_disable,
.channel_map = vc4_hdmi_channel_map,
+ .supports_hdr = false,
};
static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
@@ -2129,6 +2186,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi0_variant = {
.phy_rng_enable = vc5_hdmi_phy_rng_enable,
.phy_rng_disable = vc5_hdmi_phy_rng_disable,
.channel_map = vc5_hdmi_channel_map,
+ .supports_hdr = true,
};
static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
@@ -2156,6 +2214,7 @@ static const struct vc4_hdmi_variant bcm2711_hdmi1_variant = {
.phy_rng_enable = vc5_hdmi_phy_rng_enable,
.phy_rng_disable = vc5_hdmi_phy_rng_disable,
.channel_map = vc5_hdmi_channel_map,
+ .supports_hdr = true,
};
static const struct of_device_id vc4_hdmi_dt_match[] = {
diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h
index 3cebd1fd00fc..060bcaefbeb5 100644
--- a/drivers/gpu/drm/vc4/vc4_hdmi.h
+++ b/drivers/gpu/drm/vc4/vc4_hdmi.h
@@ -99,6 +99,9 @@ struct vc4_hdmi_variant {
/* Callback to get channel map */
u32 (*channel_map)(struct vc4_hdmi *vc4_hdmi, u32 channel_mask);
+
+ /* Enables HDR metadata */
+ bool supports_hdr;
};
/* HDMI audio information */
diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c
index bb5529a7a9c2..f29ac64a5aa5 100644
--- a/drivers/gpu/drm/vc4/vc4_kms.c
+++ b/drivers/gpu/drm/vc4/vc4_kms.c
@@ -899,7 +899,6 @@ int vc4_kms_load(struct drm_device *dev)
dev->mode_config.helper_private = &vc4_mode_config_helpers;
dev->mode_config.preferred_depth = 24;
dev->mode_config.async_page_flip = true;
- dev->mode_config.allow_fb_modifiers = true;
ret = vc4_ctm_obj_init(vc4);
if (ret)
diff --git a/drivers/gpu/drm/via/via_dma.c b/drivers/gpu/drm/via/via_dma.c
index cd56ffa3df58..177b0499abf1 100644
--- a/drivers/gpu/drm/via/via_dma.c
+++ b/drivers/gpu/drm/via/via_dma.c
@@ -38,7 +38,6 @@
#include <linux/uaccess.h>
#include <drm/drm.h>
-#include <drm/drm_agpsupport.h>
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/via_drm.h>
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 5771bb53ce6a..e016a4d62090 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -494,6 +494,7 @@ via_dmablit_workqueue(struct work_struct *work)
{
drm_via_blitq_t *blitq = container_of(work, drm_via_blitq_t, wq);
struct drm_device *dev = blitq->dev;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
unsigned long irqsave;
drm_via_sg_info_t *cur_sg;
int cur_released;
@@ -520,7 +521,7 @@ via_dmablit_workqueue(struct work_struct *work)
wake_up(&blitq->busy_queue);
- via_free_sg_info(dev->pdev, cur_sg);
+ via_free_sg_info(pdev, cur_sg);
kfree(cur_sg);
spin_lock_irqsave(&blitq->blit_lock, irqsave);
@@ -540,9 +541,10 @@ via_init_dmablit(struct drm_device *dev)
{
int i, j;
drm_via_private_t *dev_priv = (drm_via_private_t *)dev->dev_private;
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
drm_via_blitq_t *blitq;
- pci_set_master(dev->pdev);
+ pci_set_master(pdev);
for (i = 0; i < VIA_NUM_BLIT_ENGINES; ++i) {
blitq = dev_priv->blit_queues + i;
@@ -573,6 +575,7 @@ via_init_dmablit(struct drm_device *dev)
static int
via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmablit_t *xfer)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
int draw = xfer->to_fb;
int ret = 0;
@@ -652,17 +655,17 @@ via_build_sg_info(struct drm_device *dev, drm_via_sg_info_t *vsg, drm_via_dmabli
if (0 != (ret = via_lock_all_dma_pages(vsg, xfer))) {
DRM_ERROR("Could not lock DMA pages.\n");
- via_free_sg_info(dev->pdev, vsg);
+ via_free_sg_info(pdev, vsg);
return ret;
}
- via_map_blit_for_device(dev->pdev, xfer, vsg, 0);
+ via_map_blit_for_device(pdev, xfer, vsg, 0);
if (0 != (ret = via_alloc_desc_pages(vsg))) {
DRM_ERROR("Could not allocate DMA descriptor pages.\n");
- via_free_sg_info(dev->pdev, vsg);
+ via_free_sg_info(pdev, vsg);
return ret;
}
- via_map_blit_for_device(dev->pdev, xfer, vsg, 1);
+ via_map_blit_for_device(pdev, xfer, vsg, 1);
return 0;
}
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 255c5066a939..a9f6b0c11966 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -98,6 +98,7 @@ int via_map_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
int via_driver_load(struct drm_device *dev, unsigned long chipset)
{
+ struct pci_dev *pdev = to_pci_dev(dev->dev);
drm_via_private_t *dev_priv;
int ret = 0;
@@ -110,7 +111,7 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->chipset = chipset;
- pci_set_master(dev->pdev);
+ pci_set_master(pdev);
ret = drm_vblank_init(dev, 1);
if (ret) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index a21dc3ad6f88..33bf5f53ae31 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -31,6 +31,7 @@
#include <linux/pci.h>
#include <drm/drm.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
@@ -50,13 +51,16 @@ static int virtio_gpu_pci_quirk(struct drm_device *dev, struct virtio_device *vd
const char *pname = dev_name(&pdev->dev);
bool vga = (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
char unique[20];
+ int ret;
DRM_INFO("pci: %s detected at %s\n",
vga ? "virtio-vga" : "virtio-gpu-pci",
pname);
- if (vga)
- drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
- "virtiodrmfb");
+ if (vga) {
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "virtiodrmfb");
+ if (ret)
+ return ret;
+ }
/*
* Normally the drm_dev_set_unique() call is done by core DRM.
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 8502400b2f9c..2de61b63ef91 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -64,6 +64,7 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
{
struct drm_gem_object *gobj;
struct virtio_gpu_object_params params = { 0 };
+ struct virtio_gpu_device *vgdev = dev->dev_private;
int ret;
uint32_t pitch;
@@ -79,6 +80,13 @@ int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
params.height = args->height;
params.size = args->size;
params.dumb = true;
+
+ if (vgdev->has_resource_blob && !vgdev->has_virgl_3d) {
+ params.blob_mem = VIRTGPU_BLOB_MEM_GUEST;
+ params.blob_flags = VIRTGPU_BLOB_FLAG_USE_SHAREABLE;
+ params.blob = true;
+ }
+
ret = virtio_gpu_gem_create(file_priv, dev, &params, &gobj,
&args->handle);
if (ret)
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 4ff1ec28e630..f648b0e24447 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -254,6 +254,9 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
}
if (params->blob) {
+ if (params->blob_mem == VIRTGPU_BLOB_MEM_GUEST)
+ bo->guest_blob = true;
+
virtio_gpu_cmd_resource_create_blob(vgdev, bo, params,
ents, nents);
} else if (params->virgl) {
diff --git a/drivers/gpu/drm/vkms/vkms_composer.c b/drivers/gpu/drm/vkms/vkms_composer.c
index 66c6842d70db..e49523866e1d 100644
--- a/drivers/gpu/drm/vkms/vkms_composer.c
+++ b/drivers/gpu/drm/vkms/vkms_composer.c
@@ -4,6 +4,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_vblank.h>
@@ -64,7 +65,17 @@ static u8 blend_channel(u8 src, u8 dst, u8 alpha)
return new_color;
}
-static void alpha_blending(const u8 *argb_src, u8 *argb_dst)
+/**
+ * alpha_blend - alpha blending equation
+ * @argb_src: src pixel on premultiplied alpha mode
+ * @argb_dst: dst pixel completely opaque
+ *
+ * blend pixels using premultiplied blend formula. The current DRM assumption
+ * is that pixel color values have been already pre-multiplied with the alpha
+ * channel values. See more drm_plane_create_blend_mode_property(). Also, this
+ * formula assumes a completely opaque background.
+ */
+static void alpha_blend(const u8 *argb_src, u8 *argb_dst)
{
u8 alpha;
@@ -72,8 +83,16 @@ static void alpha_blending(const u8 *argb_src, u8 *argb_dst)
argb_dst[0] = blend_channel(argb_src[0], argb_dst[0], alpha);
argb_dst[1] = blend_channel(argb_src[1], argb_dst[1], alpha);
argb_dst[2] = blend_channel(argb_src[2], argb_dst[2], alpha);
- /* Opaque primary */
- argb_dst[3] = 0xFF;
+}
+
+/**
+ * x_blend - blending equation that ignores the pixel alpha
+ *
+ * overwrites RGB color value from src pixel to dst pixel.
+ */
+static void x_blend(const u8 *xrgb_src, u8 *xrgb_dst)
+{
+ memcpy(xrgb_dst, xrgb_src, sizeof(u8) * 3);
}
/**
@@ -82,16 +101,20 @@ static void alpha_blending(const u8 *argb_src, u8 *argb_dst)
* @vaddr_src: source address
* @dst_composer: destination framebuffer's metadata
* @src_composer: source framebuffer's metadata
+ * @pixel_blend: blending equation based on plane format
*
- * Blend the vaddr_src value with the vaddr_dst value using the pre-multiplied
- * alpha blending equation, since DRM currently assumes that the pixel color
- * values have already been pre-multiplied with the alpha channel values. See
- * more drm_plane_create_blend_mode_property(). This function uses buffer's
- * metadata to locate the new composite values at vaddr_dst.
+ * Blend the vaddr_src value with the vaddr_dst value using a pixel blend
+ * equation according to the supported plane formats DRM_FORMAT_(A/XRGB8888)
+ * and clearing alpha channel to an completely opaque background. This function
+ * uses buffer's metadata to locate the new composite values at vaddr_dst.
+ *
+ * TODO: completely clear the primary plane (a = 0xff) before starting to blend
+ * pixel color values
*/
static void blend(void *vaddr_dst, void *vaddr_src,
struct vkms_composer *dst_composer,
- struct vkms_composer *src_composer)
+ struct vkms_composer *src_composer,
+ void (*pixel_blend)(const u8 *, u8 *))
{
int i, j, j_dst, i_dst;
int offset_src, offset_dst;
@@ -119,36 +142,46 @@ static void blend(void *vaddr_dst, void *vaddr_src,
pixel_src = (u8 *)(vaddr_src + offset_src);
pixel_dst = (u8 *)(vaddr_dst + offset_dst);
- alpha_blending(pixel_src, pixel_dst);
+ pixel_blend(pixel_src, pixel_dst);
+ /* clearing alpha channel (0xff)*/
+ pixel_dst[3] = 0xff;
}
i_dst++;
}
}
-static void compose_cursor(struct vkms_composer *cursor_composer,
- struct vkms_composer *primary_composer,
- void *vaddr_out)
+static void compose_plane(struct vkms_composer *primary_composer,
+ struct vkms_composer *plane_composer,
+ void *vaddr_out)
{
- struct drm_gem_object *cursor_obj;
- struct drm_gem_shmem_object *cursor_shmem_obj;
+ struct drm_gem_object *plane_obj;
+ struct drm_gem_shmem_object *plane_shmem_obj;
+ struct drm_framebuffer *fb = &plane_composer->fb;
+ void (*pixel_blend)(const u8 *p_src, u8 *p_dst);
- cursor_obj = drm_gem_fb_get_obj(&cursor_composer->fb, 0);
- cursor_shmem_obj = to_drm_gem_shmem_obj(cursor_obj);
+ plane_obj = drm_gem_fb_get_obj(&plane_composer->fb, 0);
+ plane_shmem_obj = to_drm_gem_shmem_obj(plane_obj);
- if (WARN_ON(!cursor_shmem_obj->vaddr))
+ if (WARN_ON(!plane_shmem_obj->vaddr))
return;
- blend(vaddr_out, cursor_shmem_obj->vaddr,
- primary_composer, cursor_composer);
+ if (fb->format->format == DRM_FORMAT_ARGB8888)
+ pixel_blend = &alpha_blend;
+ else
+ pixel_blend = &x_blend;
+
+ blend(vaddr_out, plane_shmem_obj->vaddr, primary_composer,
+ plane_composer, pixel_blend);
}
-static int compose_planes(void **vaddr_out,
- struct vkms_composer *primary_composer,
- struct vkms_composer *cursor_composer)
+static int compose_active_planes(void **vaddr_out,
+ struct vkms_composer *primary_composer,
+ struct vkms_crtc_state *crtc_state)
{
struct drm_framebuffer *fb = &primary_composer->fb;
struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
struct drm_gem_shmem_object *shmem_obj = to_drm_gem_shmem_obj(gem_obj);
+ int i;
if (!*vaddr_out) {
*vaddr_out = kzalloc(shmem_obj->base.size, GFP_KERNEL);
@@ -163,8 +196,14 @@ static int compose_planes(void **vaddr_out,
memcpy(*vaddr_out, shmem_obj->vaddr, shmem_obj->base.size);
- if (cursor_composer)
- compose_cursor(cursor_composer, primary_composer, *vaddr_out);
+ /* If there are other planes besides primary, we consider the active
+ * planes should be in z-order and compose them associatively:
+ * ((primary <- overlay) <- cursor)
+ */
+ for (i = 1; i < crtc_state->num_active_planes; i++)
+ compose_plane(primary_composer,
+ crtc_state->active_planes[i]->composer,
+ *vaddr_out);
return 0;
}
@@ -186,7 +225,7 @@ void vkms_composer_worker(struct work_struct *work)
struct drm_crtc *crtc = crtc_state->base.crtc;
struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
struct vkms_composer *primary_composer = NULL;
- struct vkms_composer *cursor_composer = NULL;
+ struct vkms_plane_state *act_plane = NULL;
bool crc_pending, wb_pending;
void *vaddr_out = NULL;
u32 crc32 = 0;
@@ -210,11 +249,11 @@ void vkms_composer_worker(struct work_struct *work)
if (!crc_pending)
return;
- if (crtc_state->num_active_planes >= 1)
- primary_composer = crtc_state->active_planes[0]->composer;
-
- if (crtc_state->num_active_planes == 2)
- cursor_composer = crtc_state->active_planes[1]->composer;
+ if (crtc_state->num_active_planes >= 1) {
+ act_plane = crtc_state->active_planes[0];
+ if (act_plane->base.plane->type == DRM_PLANE_TYPE_PRIMARY)
+ primary_composer = act_plane->composer;
+ }
if (!primary_composer)
return;
@@ -222,7 +261,8 @@ void vkms_composer_worker(struct work_struct *work)
if (wb_pending)
vaddr_out = crtc_state->active_writeback;
- ret = compose_planes(&vaddr_out, primary_composer, cursor_composer);
+ ret = compose_active_planes(&vaddr_out, primary_composer,
+ crtc_state);
if (ret) {
if (ret == -EINVAL && !wb_pending)
kfree(vaddr_out);
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 2173b82606f6..027ffe759440 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -44,6 +44,10 @@ static bool enable_writeback = true;
module_param_named(enable_writeback, enable_writeback, bool, 0444);
MODULE_PARM_DESC(enable_writeback, "Enable/Disable writeback connector support");
+static bool enable_overlay;
+module_param_named(enable_overlay, enable_overlay, bool, 0444);
+MODULE_PARM_DESC(enable_overlay, "Enable/Disable overlay support");
+
DEFINE_DRM_GEM_FOPS(vkms_driver_fops);
static void vkms_release(struct drm_device *dev)
@@ -198,6 +202,7 @@ static int __init vkms_init(void)
config->cursor = enable_cursor;
config->writeback = enable_writeback;
+ config->overlay = enable_overlay;
return vkms_create(config);
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 35540c7c4416..ac8c9c2fa4ed 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -37,6 +37,10 @@ struct vkms_plane_state {
struct vkms_composer *composer;
};
+struct vkms_plane {
+ struct drm_plane base;
+};
+
/**
* vkms_crtc_state - Driver specific CRTC state
* @base: base CRTC state
@@ -85,6 +89,7 @@ struct vkms_device;
struct vkms_config {
bool writeback;
bool cursor;
+ bool overlay;
/* only set when instantiated */
struct vkms_device *dev;
};
@@ -114,8 +119,8 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
int vkms_output_init(struct vkms_device *vkmsdev, int index);
-struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
- enum drm_plane_type type, int index);
+struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
+ enum drm_plane_type type, int index);
/* CRC Support */
const char *const *vkms_get_crc_sources(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/vkms/vkms_output.c b/drivers/gpu/drm/vkms/vkms_output.c
index f5f6f15c362c..04406bd3ff02 100644
--- a/drivers/gpu/drm/vkms/vkms_output.c
+++ b/drivers/gpu/drm/vkms/vkms_output.c
@@ -39,7 +39,7 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
struct drm_connector *connector = &output->connector;
struct drm_encoder *encoder = &output->encoder;
struct drm_crtc *crtc = &output->crtc;
- struct drm_plane *primary, *cursor = NULL;
+ struct vkms_plane *primary, *cursor = NULL, *overlay = NULL;
int ret;
int writeback;
@@ -47,17 +47,24 @@ int vkms_output_init(struct vkms_device *vkmsdev, int index)
if (IS_ERR(primary))
return PTR_ERR(primary);
+ if (vkmsdev->config->overlay) {
+ overlay = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_OVERLAY, index);
+ if (IS_ERR(overlay))
+ return PTR_ERR(overlay);
+
+ if (!overlay->base.possible_crtcs)
+ overlay->base.possible_crtcs = drm_crtc_mask(crtc);
+ }
+
if (vkmsdev->config->cursor) {
cursor = vkms_plane_init(vkmsdev, DRM_PLANE_TYPE_CURSOR, index);
- if (IS_ERR(cursor)) {
- ret = PTR_ERR(cursor);
- goto err_cursor;
- }
+ if (IS_ERR(cursor))
+ return PTR_ERR(cursor);
}
- ret = vkms_crtc_init(dev, crtc, primary, cursor);
+ ret = vkms_crtc_init(dev, crtc, &primary->base, &cursor->base);
if (ret)
- goto err_crtc;
+ return ret;
ret = drm_connector_init(dev, connector, &vkms_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -100,12 +107,5 @@ err_encoder:
err_connector:
drm_crtc_cleanup(crtc);
-err_crtc:
- if (vkmsdev->config->cursor)
- drm_plane_cleanup(cursor);
-
-err_cursor:
- drm_plane_cleanup(primary);
-
return ret;
}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 6d310d31b75d..107521ace597 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -16,8 +16,9 @@ static const u32 vkms_formats[] = {
DRM_FORMAT_XRGB8888,
};
-static const u32 vkms_cursor_formats[] = {
+static const u32 vkms_plane_formats[] = {
DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_XRGB8888
};
static struct drm_plane_state *
@@ -86,7 +87,6 @@ static void vkms_plane_reset(struct drm_plane *plane)
static const struct drm_plane_funcs vkms_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = drm_plane_cleanup,
.reset = vkms_plane_reset,
.atomic_duplicate_state = vkms_plane_duplicate_state,
.atomic_destroy_state = vkms_plane_destroy_state,
@@ -133,7 +133,7 @@ static int vkms_plane_atomic_check(struct drm_plane *plane,
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
- if (plane->type == DRM_PLANE_TYPE_CURSOR)
+ if (plane->type != DRM_PLANE_TYPE_PRIMARY)
can_position = true;
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
@@ -191,39 +191,42 @@ static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
.cleanup_fb = vkms_cleanup_fb,
};
-struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
- enum drm_plane_type type, int index)
+struct vkms_plane *vkms_plane_init(struct vkms_device *vkmsdev,
+ enum drm_plane_type type, int index)
{
struct drm_device *dev = &vkmsdev->drm;
const struct drm_plane_helper_funcs *funcs;
- struct drm_plane *plane;
+ struct vkms_plane *plane;
const u32 *formats;
- int ret, nformats;
+ int nformats;
- plane = kzalloc(sizeof(*plane), GFP_KERNEL);
- if (!plane)
- return ERR_PTR(-ENOMEM);
-
- if (type == DRM_PLANE_TYPE_CURSOR) {
- formats = vkms_cursor_formats;
- nformats = ARRAY_SIZE(vkms_cursor_formats);
+ switch (type) {
+ case DRM_PLANE_TYPE_PRIMARY:
+ formats = vkms_formats;
+ nformats = ARRAY_SIZE(vkms_formats);
funcs = &vkms_primary_helper_funcs;
- } else {
+ break;
+ case DRM_PLANE_TYPE_CURSOR:
+ case DRM_PLANE_TYPE_OVERLAY:
+ formats = vkms_plane_formats;
+ nformats = ARRAY_SIZE(vkms_plane_formats);
+ funcs = &vkms_primary_helper_funcs;
+ break;
+ default:
formats = vkms_formats;
nformats = ARRAY_SIZE(vkms_formats);
funcs = &vkms_primary_helper_funcs;
+ break;
}
- ret = drm_universal_plane_init(dev, plane, 1 << index,
- &vkms_plane_funcs,
- formats, nformats,
- NULL, type, NULL);
- if (ret) {
- kfree(plane);
- return ERR_PTR(ret);
- }
+ plane = drmm_universal_plane_alloc(dev, struct vkms_plane, base, 1 << index,
+ &vkms_plane_funcs,
+ formats, nformats,
+ NULL, type, NULL);
+ if (IS_ERR(plane))
+ return plane;
- drm_plane_helper_add(plane, funcs);
+ drm_plane_helper_add(&plane->base, funcs);
return plane;
}
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index 15acdf2a7c0f..0060ef842b5a 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,13 +1,9 @@
# SPDX-License-Identifier: GPL-2.0
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
- depends on DRM && PCI && X86 && MMU
- select FB_DEFERRED_IO
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ depends on DRM && PCI && MMU
+ depends on X86 || ARM64
select DRM_TTM
- select FB
select MAPPING_DIRTY_HELPERS
# Only needed for the transitional use of drm_crtc_init - can be removed
# again once vmwgfx sets up the primary plane itself.
@@ -20,7 +16,7 @@ config DRM_VMWGFX
The compiled module will be called "vmwgfx.ko".
config DRM_VMWGFX_FBCON
- depends on DRM_VMWGFX && FB
+ depends on DRM_VMWGFX && DRM_FBDEV_EMULATION
bool "Enable framebuffer console under vmwgfx by default"
help
Choose this option if you are shipping a new vmwgfx
diff --git a/drivers/gpu/drm/vmwgfx/Makefile b/drivers/gpu/drm/vmwgfx/Makefile
index 8c02fa5852e7..09f6dcac768b 100644
--- a/drivers/gpu/drm/vmwgfx/Makefile
+++ b/drivers/gpu/drm/vmwgfx/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
- vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
+ vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_ttm_buffer.o \
vmwgfx_cmd.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
vmwgfx_overlay.o vmwgfx_gmrid_manager.o vmwgfx_fence.o \
vmwgfx_bo.o vmwgfx_scrn.o vmwgfx_context.o \
@@ -9,7 +9,9 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
vmwgfx_cotable.o vmwgfx_so.o vmwgfx_binding.o vmwgfx_msg.o \
vmwgfx_simple_resource.o vmwgfx_va.o vmwgfx_blit.o \
vmwgfx_validation.o vmwgfx_page_dirty.o vmwgfx_streamoutput.o \
- ttm_object.o ttm_lock.o ttm_memory.o
+ ttm_object.o ttm_memory.o
+vmwgfx-$(CONFIG_DRM_FBDEV_EMULATION) += vmwgfx_fb.o
vmwgfx-$(CONFIG_TRANSPARENT_HUGEPAGE) += vmwgfx_thp.o
+
obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
index 4db25bd9fa22..127eaf0a0a58 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga3d_surfacedefs.h
@@ -1467,6 +1467,7 @@ struct svga3dsurface_cache {
/**
* struct svga3dsurface_loc - Surface location
+ * @sheet: The multisample sheet.
* @sub_resource: Surface subresource. Defined as layer * num_mip_levels +
* mip_level.
* @x: X coordinate.
@@ -1474,6 +1475,7 @@ struct svga3dsurface_cache {
* @z: Z coordinate.
*/
struct svga3dsurface_loc {
+ u32 sheet;
u32 sub_resource;
u32 x, y, z;
};
@@ -1566,8 +1568,8 @@ svga3dsurface_get_loc(const struct svga3dsurface_cache *cache,
u32 layer;
int i;
- if (offset >= cache->sheet_bytes)
- offset %= cache->sheet_bytes;
+ loc->sheet = offset / cache->sheet_bytes;
+ offset -= loc->sheet * cache->sheet_bytes;
layer = offset / cache->mip_chain_bytes;
offset -= layer * cache->mip_chain_bytes;
@@ -1631,6 +1633,7 @@ svga3dsurface_min_loc(const struct svga3dsurface_cache *cache,
u32 sub_resource,
struct svga3dsurface_loc *loc)
{
+ loc->sheet = 0;
loc->sub_resource = sub_resource;
loc->x = loc->y = loc->z = 0;
}
@@ -1652,6 +1655,7 @@ svga3dsurface_max_loc(const struct svga3dsurface_cache *cache,
const struct drm_vmw_size *size;
u32 mip;
+ loc->sheet = 0;
loc->sub_resource = sub_resource + 1;
mip = sub_resource % cache->num_mip_levels;
size = &cache->mip[mip].size;
diff --git a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
index 19fb9e3299e7..193a57f6aae5 100644
--- a/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/device_include/svga_reg.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/**********************************************************
- * Copyright 1998-2015 VMware, Inc.
+ * Copyright 1998-2021 VMware, Inc.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
@@ -98,6 +98,10 @@ typedef uint32 SVGAMobId;
#define SVGA_MAGIC 0x900000UL
#define SVGA_MAKE_ID(ver) (SVGA_MAGIC << 8 | (ver))
+/* Version 3 has the control bar instead of the FIFO */
+#define SVGA_VERSION_3 3
+#define SVGA_ID_3 SVGA_MAKE_ID(SVGA_VERSION_3)
+
/* Version 2 let the address of the frame buffer be unsigned on Win32 */
#define SVGA_VERSION_2 2
#define SVGA_ID_2 SVGA_MAKE_ID(SVGA_VERSION_2)
@@ -129,11 +133,12 @@ typedef uint32 SVGAMobId;
* Interrupts are only supported when the
* SVGA_CAP_IRQMASK capability is present.
*/
-#define SVGA_IRQFLAG_ANY_FENCE 0x1 /* Any fence was passed */
-#define SVGA_IRQFLAG_FIFO_PROGRESS 0x2 /* Made forward progress in the FIFO */
-#define SVGA_IRQFLAG_FENCE_GOAL 0x4 /* SVGA_FIFO_FENCE_GOAL reached */
-#define SVGA_IRQFLAG_COMMAND_BUFFER 0x8 /* Command buffer completed */
-#define SVGA_IRQFLAG_ERROR 0x10 /* Error while processing commands */
+#define SVGA_IRQFLAG_ANY_FENCE (1 << 0) /* Any fence was passed */
+#define SVGA_IRQFLAG_FIFO_PROGRESS (1 << 1) /* Made forward progress in the FIFO */
+#define SVGA_IRQFLAG_FENCE_GOAL (1 << 2) /* SVGA_FIFO_FENCE_GOAL reached */
+#define SVGA_IRQFLAG_COMMAND_BUFFER (1 << 3) /* Command buffer completed */
+#define SVGA_IRQFLAG_ERROR (1 << 4) /* Error while processing commands */
+#define SVGA_IRQFLAG_MAX (1 << 5)
/*
* The byte-size is the size of the actual cursor data,
@@ -286,7 +291,32 @@ enum {
*/
SVGA_REG_GBOBJECT_MEM_SIZE_KB = 76,
- SVGA_REG_TOP = 77, /* Must be 1 more than the last register */
+ /*
+ + * These registers are for the addresses of the memory BARs for SVGA3
+ */
+ SVGA_REG_REGS_START_HIGH32 = 77,
+ SVGA_REG_REGS_START_LOW32 = 78,
+ SVGA_REG_FB_START_HIGH32 = 79,
+ SVGA_REG_FB_START_LOW32 = 80,
+
+ /*
+ * A hint register that recommends which quality level the guest should
+ * currently use to define multisample surfaces.
+ *
+ * If the register is SVGA_REG_MSHINT_DISABLED,
+ * the guest is only allowed to use SVGA3D_MS_QUALITY_FULL.
+ *
+ * Otherwise, this is a live value that can change while the VM is
+ * powered on with the hint suggestion for which quality level the guest
+ * should be using. Guests are free to ignore the hint and use either
+ * RESOLVE or FULL quality.
+ */
+ SVGA_REG_MSHINT = 81,
+
+ SVGA_REG_IRQ_STATUS = 82,
+ SVGA_REG_DIRTY_TRACKING = 83,
+
+ SVGA_REG_TOP = 84, /* Must be 1 more than the last register */
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
@@ -310,6 +340,17 @@ typedef enum SVGARegGuestDriverId {
SVGA_REG_GUEST_DRIVER_ID_SUBMIT = MAX_UINT32,
} SVGARegGuestDriverId;
+typedef enum SVGARegMSHint {
+ SVGA_REG_MSHINT_DISABLED = 0,
+ SVGA_REG_MSHINT_FULL = 1,
+ SVGA_REG_MSHINT_RESOLVED = 2,
+} SVGARegMSHint;
+
+typedef enum SVGARegDirtyTracking {
+ SVGA_REG_DIRTY_TRACKING_PER_IMAGE = 0,
+ SVGA_REG_DIRTY_TRACKING_PER_SURFACE = 1,
+} SVGARegDirtyTracking;
+
/*
* Guest memory regions (GMRs):
diff --git a/drivers/gpu/drm/vmwgfx/ttm_lock.c b/drivers/gpu/drm/vmwgfx/ttm_lock.c
deleted file mode 100644
index 5971c72e6d10..000000000000
--- a/drivers/gpu/drm/vmwgfx/ttm_lock.c
+++ /dev/null
@@ -1,194 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 OR MIT */
-/**************************************************************************
- *
- * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-#include <linux/atomic.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/sched/signal.h>
-#include "ttm_lock.h"
-#include "ttm_object.h"
-
-#define TTM_WRITE_LOCK_PENDING (1 << 0)
-#define TTM_VT_LOCK_PENDING (1 << 1)
-#define TTM_SUSPEND_LOCK_PENDING (1 << 2)
-#define TTM_VT_LOCK (1 << 3)
-#define TTM_SUSPEND_LOCK (1 << 4)
-
-void ttm_lock_init(struct ttm_lock *lock)
-{
- spin_lock_init(&lock->lock);
- init_waitqueue_head(&lock->queue);
- lock->rw = 0;
- lock->flags = 0;
-}
-
-void ttm_read_unlock(struct ttm_lock *lock)
-{
- spin_lock(&lock->lock);
- if (--lock->rw == 0)
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
-}
-
-static bool __ttm_read_lock(struct ttm_lock *lock)
-{
- bool locked = false;
-
- spin_lock(&lock->lock);
- if (lock->rw >= 0 && lock->flags == 0) {
- ++lock->rw;
- locked = true;
- }
- spin_unlock(&lock->lock);
- return locked;
-}
-
-int ttm_read_lock(struct ttm_lock *lock, bool interruptible)
-{
- int ret = 0;
-
- if (interruptible)
- ret = wait_event_interruptible(lock->queue,
- __ttm_read_lock(lock));
- else
- wait_event(lock->queue, __ttm_read_lock(lock));
- return ret;
-}
-
-static bool __ttm_read_trylock(struct ttm_lock *lock, bool *locked)
-{
- bool block = true;
-
- *locked = false;
-
- spin_lock(&lock->lock);
- if (lock->rw >= 0 && lock->flags == 0) {
- ++lock->rw;
- block = false;
- *locked = true;
- } else if (lock->flags == 0) {
- block = false;
- }
- spin_unlock(&lock->lock);
-
- return !block;
-}
-
-int ttm_read_trylock(struct ttm_lock *lock, bool interruptible)
-{
- int ret = 0;
- bool locked;
-
- if (interruptible)
- ret = wait_event_interruptible
- (lock->queue, __ttm_read_trylock(lock, &locked));
- else
- wait_event(lock->queue, __ttm_read_trylock(lock, &locked));
-
- if (unlikely(ret != 0)) {
- BUG_ON(locked);
- return ret;
- }
-
- return (locked) ? 0 : -EBUSY;
-}
-
-void ttm_write_unlock(struct ttm_lock *lock)
-{
- spin_lock(&lock->lock);
- lock->rw = 0;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
-}
-
-static bool __ttm_write_lock(struct ttm_lock *lock)
-{
- bool locked = false;
-
- spin_lock(&lock->lock);
- if (lock->rw == 0 && ((lock->flags & ~TTM_WRITE_LOCK_PENDING) == 0)) {
- lock->rw = -1;
- lock->flags &= ~TTM_WRITE_LOCK_PENDING;
- locked = true;
- } else {
- lock->flags |= TTM_WRITE_LOCK_PENDING;
- }
- spin_unlock(&lock->lock);
- return locked;
-}
-
-int ttm_write_lock(struct ttm_lock *lock, bool interruptible)
-{
- int ret = 0;
-
- if (interruptible) {
- ret = wait_event_interruptible(lock->queue,
- __ttm_write_lock(lock));
- if (unlikely(ret != 0)) {
- spin_lock(&lock->lock);
- lock->flags &= ~TTM_WRITE_LOCK_PENDING;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
- }
- } else
- wait_event(lock->queue, __ttm_write_lock(lock));
-
- return ret;
-}
-
-void ttm_suspend_unlock(struct ttm_lock *lock)
-{
- spin_lock(&lock->lock);
- lock->flags &= ~TTM_SUSPEND_LOCK;
- wake_up_all(&lock->queue);
- spin_unlock(&lock->lock);
-}
-
-static bool __ttm_suspend_lock(struct ttm_lock *lock)
-{
- bool locked = false;
-
- spin_lock(&lock->lock);
- if (lock->rw == 0) {
- lock->flags &= ~TTM_SUSPEND_LOCK_PENDING;
- lock->flags |= TTM_SUSPEND_LOCK;
- locked = true;
- } else {
- lock->flags |= TTM_SUSPEND_LOCK_PENDING;
- }
- spin_unlock(&lock->lock);
- return locked;
-}
-
-void ttm_suspend_lock(struct ttm_lock *lock)
-{
- wait_event(lock->queue, __ttm_suspend_lock(lock));
-}
diff --git a/drivers/gpu/drm/vmwgfx/ttm_lock.h b/drivers/gpu/drm/vmwgfx/ttm_lock.h
deleted file mode 100644
index af8b28ca546f..000000000000
--- a/drivers/gpu/drm/vmwgfx/ttm_lock.h
+++ /dev/null
@@ -1,218 +0,0 @@
-/**************************************************************************
- *
- * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-/*
- * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
- */
-
-/** @file ttm_lock.h
- * This file implements a simple replacement for the buffer manager use
- * of the DRM heavyweight hardware lock.
- * The lock is a read-write lock. Taking it in read mode and write mode
- * is relatively fast, and intended for in-kernel use only.
- *
- * The vt mode is used only when there is a need to block all
- * user-space processes from validating buffers.
- * It's allowed to leave kernel space with the vt lock held.
- * If a user-space process dies while having the vt-lock,
- * it will be released during the file descriptor release. The vt lock
- * excludes write lock and read lock.
- *
- * The suspend mode is used to lock out all TTM users when preparing for
- * and executing suspend operations.
- *
- */
-
-#ifndef _TTM_LOCK_H_
-#define _TTM_LOCK_H_
-
-#include <linux/atomic.h>
-#include <linux/wait.h>
-
-#include "ttm_object.h"
-
-/**
- * struct ttm_lock
- *
- * @base: ttm base object used solely to release the lock if the client
- * holding the lock dies.
- * @queue: Queue for processes waiting for lock change-of-status.
- * @lock: Spinlock protecting some lock members.
- * @rw: Read-write lock counter. Protected by @lock.
- * @flags: Lock state. Protected by @lock.
- */
-
-struct ttm_lock {
- struct ttm_base_object base;
- wait_queue_head_t queue;
- spinlock_t lock;
- int32_t rw;
- uint32_t flags;
-};
-
-
-/**
- * ttm_lock_init
- *
- * @lock: Pointer to a struct ttm_lock
- * Initializes the lock.
- */
-extern void ttm_lock_init(struct ttm_lock *lock);
-
-/**
- * ttm_read_unlock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Releases a read lock.
- */
-extern void ttm_read_unlock(struct ttm_lock *lock);
-
-/**
- * ttm_read_lock
- *
- * @lock: Pointer to a struct ttm_lock
- * @interruptible: Interruptible sleeping while waiting for a lock.
- *
- * Takes the lock in read mode.
- * Returns:
- * -ERESTARTSYS If interrupted by a signal and interruptible is true.
- */
-extern int ttm_read_lock(struct ttm_lock *lock, bool interruptible);
-
-/**
- * ttm_read_trylock
- *
- * @lock: Pointer to a struct ttm_lock
- * @interruptible: Interruptible sleeping while waiting for a lock.
- *
- * Tries to take the lock in read mode. If the lock is already held
- * in write mode, the function will return -EBUSY. If the lock is held
- * in vt or suspend mode, the function will sleep until these modes
- * are unlocked.
- *
- * Returns:
- * -EBUSY The lock was already held in write mode.
- * -ERESTARTSYS If interrupted by a signal and interruptible is true.
- */
-extern int ttm_read_trylock(struct ttm_lock *lock, bool interruptible);
-
-/**
- * ttm_write_unlock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Releases a write lock.
- */
-extern void ttm_write_unlock(struct ttm_lock *lock);
-
-/**
- * ttm_write_lock
- *
- * @lock: Pointer to a struct ttm_lock
- * @interruptible: Interruptible sleeping while waiting for a lock.
- *
- * Takes the lock in write mode.
- * Returns:
- * -ERESTARTSYS If interrupted by a signal and interruptible is true.
- */
-extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
-
-/**
- * ttm_lock_downgrade
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Downgrades a write lock to a read lock.
- */
-extern void ttm_lock_downgrade(struct ttm_lock *lock);
-
-/**
- * ttm_suspend_lock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Takes the lock in suspend mode. Excludes read and write mode.
- */
-extern void ttm_suspend_lock(struct ttm_lock *lock);
-
-/**
- * ttm_suspend_unlock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Releases a suspend lock
- */
-extern void ttm_suspend_unlock(struct ttm_lock *lock);
-
-/**
- * ttm_vt_lock
- *
- * @lock: Pointer to a struct ttm_lock
- * @interruptible: Interruptible sleeping while waiting for a lock.
- * @tfile: Pointer to a struct ttm_object_file to register the lock with.
- *
- * Takes the lock in vt mode.
- * Returns:
- * -ERESTARTSYS If interrupted by a signal and interruptible is true.
- * -ENOMEM: Out of memory when locking.
- */
-extern int ttm_vt_lock(struct ttm_lock *lock, bool interruptible,
- struct ttm_object_file *tfile);
-
-/**
- * ttm_vt_unlock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Releases a vt lock.
- * Returns:
- * -EINVAL If the lock was not held.
- */
-extern int ttm_vt_unlock(struct ttm_lock *lock);
-
-/**
- * ttm_write_unlock
- *
- * @lock: Pointer to a struct ttm_lock
- *
- * Releases a write lock.
- */
-extern void ttm_write_unlock(struct ttm_lock *lock);
-
-/**
- * ttm_write_lock
- *
- * @lock: Pointer to a struct ttm_lock
- * @interruptible: Interruptible sleeping while waiting for a lock.
- *
- * Takes the lock in write mode.
- * Returns:
- * -ERESTARTSYS If interrupted by a signal and interruptible is true.
- */
-extern int ttm_write_lock(struct ttm_lock *lock, bool interruptible);
-
-#endif
diff --git a/drivers/gpu/drm/vmwgfx/ttm_object.c b/drivers/gpu/drm/vmwgfx/ttm_object.c
index 112394dd0ab6..04789b2bb2a2 100644
--- a/drivers/gpu/drm/vmwgfx/ttm_object.c
+++ b/drivers/gpu/drm/vmwgfx/ttm_object.c
@@ -540,7 +540,7 @@ ttm_object_device_init(struct ttm_mem_global *mem_glob,
if (ret != 0)
goto out_no_object_hash;
- idr_init(&tdev->idr);
+ idr_init_base(&tdev->idr, 1);
tdev->ops = *ops;
tdev->dmabuf_release = tdev->ops.release;
tdev->ops.release = ttm_prime_dmabuf_release;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
index 81f525a82b77..05b324825900 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_binding.c
@@ -788,7 +788,7 @@ static void vmw_collect_dirty_view_ids(struct vmw_ctx_binding_state *cbs,
}
/**
- * vmw_binding_emit_set_sr - Issue delayed DX shader resource binding commands
+ * vmw_emit_set_sr - Issue delayed DX shader resource binding commands
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
* @shader_slot: The shader slot of the binding.
@@ -832,7 +832,7 @@ static int vmw_emit_set_sr(struct vmw_ctx_binding_state *cbs,
}
/**
- * vmw_binding_emit_set_rt - Issue delayed DX rendertarget binding commands
+ * vmw_emit_set_rt - Issue delayed DX rendertarget binding commands
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
*/
@@ -1024,7 +1024,7 @@ static void vmw_collect_dirty_vbs(struct vmw_ctx_binding_state *cbs,
}
/**
- * vmw_binding_emit_set_vb - Issue delayed vertex buffer binding commands
+ * vmw_emit_set_vb - Issue delayed vertex buffer binding commands
*
* @cbs: Pointer to the context's struct vmw_ctx_binding_state
*
@@ -1394,7 +1394,7 @@ struct list_head *vmw_binding_state_list(struct vmw_ctx_binding_state *cbs)
}
/**
- * vmwgfx_binding_state_reset - clear a struct vmw_ctx_binding_state
+ * vmw_binding_state_reset - clear a struct vmw_ctx_binding_state
*
* @cbs: Pointer to the struct vmw_ctx_binding_state to be cleared
*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 3a438ae4d3f4..cdbd5a870711 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -421,7 +421,7 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
}
/**
- * ttm_bo_cpu_blit - in-kernel cpu blit.
+ * vmw_bo_cpu_blit - in-kernel cpu blit.
*
* @dst: Destination buffer object.
* @dst_offset: Destination offset of blit start in bytes.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index 50e529a01677..04dd49c4c257 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -96,10 +96,6 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
int ret;
uint32_t new_flags;
- ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
@@ -116,9 +112,7 @@ int vmw_bo_pin_in_placement(struct vmw_private *dev_priv,
vmw_bo_pin_reserved(buf, true);
ttm_bo_unreserve(bo);
-
err:
- ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -144,10 +138,6 @@ int vmw_bo_pin_in_vram_or_gmr(struct vmw_private *dev_priv,
int ret;
uint32_t new_flags;
- ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
@@ -172,7 +162,6 @@ out_unreserve:
ttm_bo_unreserve(bo);
err:
- ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -228,10 +217,6 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
placement.num_busy_placement = 1;
placement.busy_placement = &place;
- ret = ttm_write_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
vmw_execbuf_release_pinned_bo(dev_priv);
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
@@ -263,7 +248,6 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv,
ttm_bo_unreserve(bo);
err_unlock:
- ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -287,10 +271,6 @@ int vmw_bo_unpin(struct vmw_private *dev_priv,
struct ttm_buffer_object *bo = &buf->base;
int ret;
- ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible);
- if (unlikely(ret != 0))
- return ret;
-
ret = ttm_bo_reserve(bo, interruptible, false, NULL);
if (unlikely(ret != 0))
goto err;
@@ -300,7 +280,6 @@ int vmw_bo_unpin(struct vmw_private *dev_priv,
ttm_bo_unreserve(bo);
err:
- ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -460,6 +439,7 @@ void vmw_bo_bo_free(struct ttm_buffer_object *bo)
WARN_ON(vmw_bo->dirty);
WARN_ON(!RB_EMPTY_ROOT(&vmw_bo->res_tree));
vmw_bo_unmap(vmw_bo);
+ dma_resv_fini(&bo->base._resv);
kfree(vmw_bo);
}
@@ -512,6 +492,11 @@ int vmw_bo_create_kernel(struct vmw_private *dev_priv, unsigned long size,
if (unlikely(ret))
goto error_free;
+
+ bo->base.size = size;
+ dma_resv_init(&bo->base._resv);
+ drm_vma_node_reset(&bo->base.vma_node);
+
ret = ttm_bo_init_reserved(&dev_priv->bdev, bo, size,
ttm_bo_type_device, placement, 0,
&ctx, NULL, NULL, NULL);
@@ -570,6 +555,10 @@ int vmw_bo_init(struct vmw_private *dev_priv,
if (unlikely(ret))
return ret;
+ vmw_bo->base.base.size = size;
+ dma_resv_init(&vmw_bo->base.base._resv);
+ drm_vma_node_reset(&vmw_bo->base.base.vma_node);
+
ret = ttm_bo_init_reserved(bdev, &vmw_bo->base, size,
ttm_bo_type_device, placement,
0, &ctx, NULL, NULL, bo_free);
@@ -611,7 +600,7 @@ static void vmw_user_bo_release(struct ttm_base_object **p_base)
/**
- * vmw_user_bo_ref_obj-release - TTM synccpu reference object release callback
+ * vmw_user_bo_ref_obj_release - TTM synccpu reference object release callback
* for vmw user buffer objects
*
* @base: Pointer to the TTM base object
@@ -896,10 +885,6 @@ int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
uint32_t handle;
int ret;
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
req->size, false, &handle, &vbo,
NULL);
@@ -914,7 +899,6 @@ int vmw_bo_alloc_ioctl(struct drm_device *dev, void *data,
vmw_bo_unreference(&vbo);
out_no_bo:
- ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -1109,10 +1093,6 @@ int vmw_dumb_create(struct drm_file *file_priv,
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
ret = vmw_user_bo_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
args->size, false, &args->handle,
&vbo, NULL);
@@ -1121,7 +1101,6 @@ int vmw_dumb_create(struct drm_file *file_priv,
vmw_bo_unreference(&vbo);
out_no_bo:
- ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
index 20246a7c97c9..027d7d504e78 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmd.c
@@ -31,15 +31,10 @@
#include "vmwgfx_drv.h"
-struct vmw_temp_set_context {
- SVGA3dCmdHeader header;
- SVGA3dCmdDXTempSetContext body;
-};
-
bool vmw_supports_3d(struct vmw_private *dev_priv)
{
uint32_t fifo_min, hwversion;
- const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+ const struct vmw_fifo_state *fifo = dev_priv->fifo;
if (!(dev_priv->capabilities & SVGA_CAP_3D))
return false;
@@ -61,6 +56,8 @@ bool vmw_supports_3d(struct vmw_private *dev_priv)
if (!(dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO))
return false;
+ BUG_ON(vmw_is_svga_v3(dev_priv));
+
fifo_min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
if (fifo_min <= SVGA_FIFO_3D_HWVERSION * sizeof(unsigned int))
return false;
@@ -98,16 +95,20 @@ bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv)
return false;
}
-int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv)
{
+ struct vmw_fifo_state *fifo;
uint32_t max;
uint32_t min;
- fifo->dx = false;
+ if (!dev_priv->fifo_mem)
+ return NULL;
+
+ fifo = kzalloc(sizeof(*fifo), GFP_KERNEL);
fifo->static_buffer_size = VMWGFX_FIFO_STATIC_SIZE;
fifo->static_buffer = vmalloc(fifo->static_buffer_size);
if (unlikely(fifo->static_buffer == NULL))
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
fifo->dynamic_buffer = NULL;
fifo->reserved_size = 0;
@@ -115,20 +116,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
mutex_init(&fifo->fifo_mutex);
init_rwsem(&fifo->rwsem);
-
- DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
- DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
- DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
-
- dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
- dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
- dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
-
- vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
- SVGA_REG_ENABLE_HIDE);
-
- vmw_write(dev_priv, SVGA_REG_TRACES, 0);
-
min = 4;
if (dev_priv->capabilities & SVGA_CAP_EXTENDED_FIFO)
min = vmw_read(dev_priv, SVGA_REG_MEM_REGS);
@@ -155,35 +142,23 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
(unsigned int) max,
(unsigned int) min,
(unsigned int) fifo->capabilities);
-
- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, dev_priv->last_read_seqno);
-
- return 0;
+ return fifo;
}
void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
u32 *fifo_mem = dev_priv->fifo_mem;
-
- if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
+ if (fifo_mem && cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
+
}
-void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
+void vmw_fifo_destroy(struct vmw_private *dev_priv)
{
- vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
- while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
- ;
-
- dev_priv->last_read_seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
+ struct vmw_fifo_state *fifo = dev_priv->fifo;
- vmw_write(dev_priv, SVGA_REG_CONFIG_DONE,
- dev_priv->config_done_state);
- vmw_write(dev_priv, SVGA_REG_ENABLE,
- dev_priv->enable_state);
- vmw_write(dev_priv, SVGA_REG_TRACES,
- dev_priv->traces_state);
+ if (!fifo)
+ return;
if (likely(fifo->static_buffer != NULL)) {
vfree(fifo->static_buffer);
@@ -194,6 +169,8 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
vfree(fifo->dynamic_buffer);
fifo->dynamic_buffer = NULL;
}
+ kfree(fifo);
+ dev_priv->fifo = NULL;
}
static bool vmw_fifo_is_full(struct vmw_private *dev_priv, uint32_t bytes)
@@ -289,7 +266,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
static void *vmw_local_fifo_reserve(struct vmw_private *dev_priv,
uint32_t bytes)
{
- struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ struct vmw_fifo_state *fifo_state = dev_priv->fifo;
u32 *fifo_mem = dev_priv->fifo_mem;
uint32_t max;
uint32_t min;
@@ -438,16 +415,12 @@ static void vmw_fifo_slow_copy(struct vmw_fifo_state *fifo_state,
static void vmw_local_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes)
{
- struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ struct vmw_fifo_state *fifo_state = dev_priv->fifo;
uint32_t next_cmd = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_NEXT_CMD);
uint32_t max = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MAX);
uint32_t min = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_MIN);
bool reserveable = fifo_state->capabilities & SVGA_FIFO_CAP_RESERVE;
- if (fifo_state->dx)
- bytes += sizeof(struct vmw_temp_set_context);
-
- fifo_state->dx = false;
BUG_ON((bytes & 3) != 0);
BUG_ON(bytes > fifo_state->reserved_size);
@@ -495,7 +468,7 @@ void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes)
/**
- * vmw_fifo_commit_flush - Commit fifo space and flush any buffered commands.
+ * vmw_cmd_commit_flush - Commit fifo space and flush any buffered commands.
*
* @dev_priv: Pointer to device private structure.
* @bytes: Number of bytes to commit.
@@ -509,7 +482,7 @@ void vmw_cmd_commit_flush(struct vmw_private *dev_priv, uint32_t bytes)
}
/**
- * vmw_fifo_flush - Flush any buffered commands and make sure command processing
+ * vmw_cmd_flush - Flush any buffered commands and make sure command processing
* starts.
*
* @dev_priv: Pointer to device private structure.
@@ -527,7 +500,6 @@ int vmw_cmd_flush(struct vmw_private *dev_priv, bool interruptible)
int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
{
- struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
struct svga_fifo_cmd_fence *cmd_fence;
u32 *fm;
int ret = 0;
@@ -546,7 +518,7 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
*seqno = atomic_add_return(1, &dev_priv->marker_seq);
} while (*seqno == 0);
- if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
+ if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE)) {
/*
* Don't request hardware to send a fence. The
@@ -561,22 +533,22 @@ int vmw_cmd_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
cmd_fence = (struct svga_fifo_cmd_fence *) fm;
cmd_fence->fence = *seqno;
vmw_cmd_commit_flush(dev_priv, bytes);
- vmw_update_seqno(dev_priv, fifo_state);
+ vmw_update_seqno(dev_priv);
out_err:
return ret;
}
/**
- * vmw_fifo_emit_dummy_legacy_query - emits a dummy query to the fifo using
+ * vmw_cmd_emit_dummy_legacy_query - emits a dummy query to the fifo using
* legacy query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
- * See the vmw_fifo_emit_dummy_query documentation.
+ * See the vmw_cmd_emit_dummy_query documentation.
*/
-static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
+static int vmw_cmd_emit_dummy_legacy_query(struct vmw_private *dev_priv,
uint32_t cid)
{
/*
@@ -614,16 +586,16 @@ static int vmw_fifo_emit_dummy_legacy_query(struct vmw_private *dev_priv,
}
/**
- * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * vmw_cmd_emit_dummy_gb_query - emits a dummy query to the fifo using
* guest-backed resource query commands.
*
* @dev_priv: The device private structure.
* @cid: The hardware context id used for the query.
*
- * See the vmw_fifo_emit_dummy_query documentation.
+ * See the vmw_cmd_emit_dummy_query documentation.
*/
-static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
- uint32_t cid)
+static int vmw_cmd_emit_dummy_gb_query(struct vmw_private *dev_priv,
+ uint32_t cid)
{
/*
* A query wait without a preceding query end will
@@ -656,7 +628,7 @@ static int vmw_fifo_emit_dummy_gb_query(struct vmw_private *dev_priv,
/**
- * vmw_fifo_emit_dummy_gb_query - emits a dummy query to the fifo using
+ * vmw_cmd_emit_dummy_query - emits a dummy query to the fifo using
* appropriate resource query commands.
*
* @dev_priv: The device private structure.
@@ -677,7 +649,27 @@ int vmw_cmd_emit_dummy_query(struct vmw_private *dev_priv,
uint32_t cid)
{
if (dev_priv->has_mob)
- return vmw_fifo_emit_dummy_gb_query(dev_priv, cid);
+ return vmw_cmd_emit_dummy_gb_query(dev_priv, cid);
- return vmw_fifo_emit_dummy_legacy_query(dev_priv, cid);
+ return vmw_cmd_emit_dummy_legacy_query(dev_priv, cid);
+}
+
+
+/**
+ * vmw_cmd_supported - returns true if the given device supports
+ * command queues.
+ *
+ * @vmw: The device private structure.
+ *
+ * Returns true if we can issue commands.
+ */
+bool vmw_cmd_supported(struct vmw_private *vmw)
+{
+ if ((vmw->capabilities & (SVGA_CAP_COMMAND_BUFFERS |
+ SVGA_CAP_CMD_BUFFERS_2)) != 0)
+ return true;
+ /*
+ * We have FIFO cmd's
+ */
+ return vmw->fifo_mem != NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 2e23e537cdf5..05ca310ed61a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -295,7 +295,7 @@ void vmw_cmdbuf_header_free(struct vmw_cmdbuf_header *header)
/**
- * vmw_cmbuf_header_submit: Submit a command buffer to hardware.
+ * vmw_cmdbuf_header_submit: Submit a command buffer to hardware.
*
* @header: The header of the buffer to submit.
*/
@@ -620,7 +620,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
}
/**
- * vmw_cmdbuf_man idle - Check whether the command buffer manager is idle.
+ * vmw_cmdbuf_man_idle - Check whether the command buffer manager is idle.
*
* @man: The command buffer manager.
* @check_preempted: Check also the preempted queue for pending command buffers.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 4a5a3e246216..3ed9914cb994 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -748,10 +748,6 @@ static int vmw_context_define(struct drm_device *dev, void *data,
((dev_priv->has_mob) ? vmw_cmdbuf_res_man_size() : 0) +
+ VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE;
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
vmw_user_context_size,
&ttm_opt_ctx);
@@ -759,7 +755,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for context"
" creation.\n");
- goto out_unlock;
+ goto out_ret;
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
@@ -767,7 +763,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
ttm_mem_global_free(vmw_mem_glob(dev_priv),
vmw_user_context_size);
ret = -ENOMEM;
- goto out_unlock;
+ goto out_ret;
}
res = &ctx->res;
@@ -780,7 +776,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
ret = vmw_context_init(dev_priv, res, vmw_user_context_free, dx);
if (unlikely(ret != 0))
- goto out_unlock;
+ goto out_ret;
tmp = vmw_resource_reference(&ctx->res);
ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
@@ -794,8 +790,7 @@ static int vmw_context_define(struct drm_device *dev, void *data,
arg->cid = ctx->base.handle;
out_err:
vmw_resource_unreference(&res);
-out_unlock:
- ttm_read_unlock(&dev_priv->reservation_sem);
+out_ret:
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
index d782b49c7236..b40aa002bf2b 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c
@@ -653,7 +653,7 @@ int vmw_cotable_notify(struct vmw_resource *res, int id)
}
/**
- * vmw_cotable_add_view - add a view to the cotable's list of active views.
+ * vmw_cotable_add_resource - add a view to the cotable's list of active views.
*
* @res: pointer struct vmw_resource representing the cotable.
* @head: pointer to the struct list_head member of the resource, dedicated
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 399f70d340eb..5cf3a5bf539f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -31,12 +31,13 @@
#include <linux/pci.h>
#include <linux/mem_encrypt.h>
+#include <drm/drm_aperture.h>
#include <drm/drm_drv.h>
-#include <drm/drm_fb_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_sysfs.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <generated/utsrelease.h>
#include "ttm_object.h"
#include "vmwgfx_binding.h"
@@ -50,7 +51,7 @@
#define VMWGFX_VALIDATION_MEM_GRAN (16*PAGE_SIZE)
-/**
+/*
* Fully encoded drm commands. Might move to vmw_drm.h
*/
@@ -246,6 +247,7 @@ static const struct drm_ioctl_desc vmw_ioctls[] = {
static const struct pci_device_id vmw_pci_id_list[] = {
{ PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA2) },
+ { PCI_DEVICE(0x15ad, VMWGFX_PCI_ID_SVGA3) },
{ }
};
MODULE_DEVICE_TABLE(pci, vmw_pci_id_list);
@@ -393,6 +395,60 @@ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
return ret;
}
+static int vmw_device_init(struct vmw_private *dev_priv)
+{
+ bool uses_fb_traces = false;
+
+ DRM_INFO("width %d\n", vmw_read(dev_priv, SVGA_REG_WIDTH));
+ DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
+ DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
+
+ dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
+ dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
+ dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
+
+ vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE |
+ SVGA_REG_ENABLE_HIDE);
+
+ uses_fb_traces = !vmw_cmd_supported(dev_priv) &&
+ (dev_priv->capabilities & SVGA_CAP_TRACES) != 0;
+
+ vmw_write(dev_priv, SVGA_REG_TRACES, uses_fb_traces);
+ dev_priv->fifo = vmw_fifo_create(dev_priv);
+ if (IS_ERR(dev_priv->fifo)) {
+ int err = PTR_ERR(dev_priv->fifo);
+ dev_priv->fifo = NULL;
+ return err;
+ } else if (!dev_priv->fifo) {
+ vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
+ }
+
+ dev_priv->last_read_seqno = vmw_fence_read(dev_priv);
+ atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
+ return 0;
+}
+
+static void vmw_device_fini(struct vmw_private *vmw)
+{
+ /*
+ * Legacy sync
+ */
+ vmw_write(vmw, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
+ while (vmw_read(vmw, SVGA_REG_BUSY) != 0)
+ ;
+
+ vmw->last_read_seqno = vmw_fence_read(vmw);
+
+ vmw_write(vmw, SVGA_REG_CONFIG_DONE,
+ vmw->config_done_state);
+ vmw_write(vmw, SVGA_REG_ENABLE,
+ vmw->enable_state);
+ vmw_write(vmw, SVGA_REG_TRACES,
+ vmw->traces_state);
+
+ vmw_fifo_destroy(vmw);
+}
+
/**
* vmw_request_device_late - Perform late device setup
*
@@ -433,9 +489,9 @@ static int vmw_request_device(struct vmw_private *dev_priv)
{
int ret;
- ret = vmw_fifo_init(dev_priv, &dev_priv->fifo);
+ ret = vmw_device_init(dev_priv);
if (unlikely(ret != 0)) {
- DRM_ERROR("Unable to initialize FIFO.\n");
+ DRM_ERROR("Unable to initialize the device.\n");
return ret;
}
vmw_fence_fifo_up(dev_priv->fman);
@@ -469,7 +525,7 @@ out_no_query_bo:
vmw_cmdbuf_man_destroy(dev_priv->cman);
out_no_mob:
vmw_fence_fifo_down(dev_priv->fman);
- vmw_fifo_release(dev_priv, &dev_priv->fifo);
+ vmw_device_fini(dev_priv);
return ret;
}
@@ -517,7 +573,7 @@ static void vmw_release_device_late(struct vmw_private *dev_priv)
if (dev_priv->cman)
vmw_cmdbuf_man_destroy(dev_priv->cman);
- vmw_fifo_release(dev_priv, &dev_priv->fifo);
+ vmw_device_fini(dev_priv);
}
/*
@@ -638,6 +694,8 @@ static void vmw_vram_manager_fini(struct vmw_private *dev_priv)
static int vmw_setup_pci_resources(struct vmw_private *dev,
unsigned long pci_id)
{
+ resource_size_t rmmio_start;
+ resource_size_t rmmio_size;
resource_size_t fifo_start;
resource_size_t fifo_size;
int ret;
@@ -649,23 +707,45 @@ static int vmw_setup_pci_resources(struct vmw_private *dev,
if (ret)
return ret;
- dev->io_start = pci_resource_start(pdev, 0);
- dev->vram_start = pci_resource_start(pdev, 1);
- dev->vram_size = pci_resource_len(pdev, 1);
- fifo_start = pci_resource_start(pdev, 2);
- fifo_size = pci_resource_len(pdev, 2);
-
- DRM_INFO("FIFO at %pa size is %llu kiB\n",
- &fifo_start, (uint64_t)fifo_size / 1024);
- dev->fifo_mem = devm_memremap(dev->drm.dev,
- fifo_start,
- fifo_size,
- MEMREMAP_WB);
-
- if (IS_ERR(dev->fifo_mem)) {
- DRM_ERROR("Failed mapping FIFO memory.\n");
+ dev->pci_id = pci_id;
+ if (pci_id == VMWGFX_PCI_ID_SVGA3) {
+ rmmio_start = pci_resource_start(pdev, 0);
+ rmmio_size = pci_resource_len(pdev, 0);
+ dev->vram_start = pci_resource_start(pdev, 2);
+ dev->vram_size = pci_resource_len(pdev, 2);
+
+ DRM_INFO("Register MMIO at 0x%pa size is %llu kiB\n",
+ &rmmio_start, (uint64_t)rmmio_size / 1024);
+ dev->rmmio = devm_ioremap(dev->drm.dev,
+ rmmio_start,
+ rmmio_size);
+ if (IS_ERR(dev->rmmio)) {
+ DRM_ERROR("Failed mapping registers mmio memory.\n");
+ pci_release_regions(pdev);
+ return PTR_ERR(dev->rmmio);
+ }
+ } else if (pci_id == VMWGFX_PCI_ID_SVGA2) {
+ dev->io_start = pci_resource_start(pdev, 0);
+ dev->vram_start = pci_resource_start(pdev, 1);
+ dev->vram_size = pci_resource_len(pdev, 1);
+ fifo_start = pci_resource_start(pdev, 2);
+ fifo_size = pci_resource_len(pdev, 2);
+
+ DRM_INFO("FIFO at %pa size is %llu kiB\n",
+ &fifo_start, (uint64_t)fifo_size / 1024);
+ dev->fifo_mem = devm_memremap(dev->drm.dev,
+ fifo_start,
+ fifo_size,
+ MEMREMAP_WB);
+
+ if (IS_ERR(dev->fifo_mem)) {
+ DRM_ERROR("Failed mapping FIFO memory.\n");
+ pci_release_regions(pdev);
+ return PTR_ERR(dev->fifo_mem);
+ }
+ } else {
pci_release_regions(pdev);
- return PTR_ERR(dev->fifo_mem);
+ return -EINVAL;
}
/*
@@ -684,13 +764,16 @@ static int vmw_detect_version(struct vmw_private *dev)
{
uint32_t svga_id;
- vmw_write(dev, SVGA_REG_ID, SVGA_ID_2);
+ vmw_write(dev, SVGA_REG_ID, vmw_is_svga_v3(dev) ?
+ SVGA_ID_3 : SVGA_ID_2);
svga_id = vmw_read(dev, SVGA_REG_ID);
- if (svga_id != SVGA_ID_2) {
+ if (svga_id != SVGA_ID_2 && svga_id != SVGA_ID_3) {
DRM_ERROR("Unsupported SVGA ID 0x%x on chipset 0x%x\n",
svga_id, dev->vmw_chipset);
return -ENOSYS;
}
+ BUG_ON(vmw_is_svga_v3(dev) && (svga_id != SVGA_ID_3));
+ DRM_INFO("Running on SVGA version %d.\n", (svga_id & 0xff));
return 0;
}
@@ -699,16 +782,13 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
int ret;
enum vmw_res_type i;
bool refuse_dma = false;
- char host_log[100] = {0};
struct pci_dev *pdev = to_pci_dev(dev_priv->drm.dev);
dev_priv->vmw_chipset = pci_id;
- dev_priv->last_read_seqno = (uint32_t) -100;
dev_priv->drm.dev_private = dev_priv;
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->binding_mutex);
- ttm_lock_init(&dev_priv->reservation_sem);
spin_lock_init(&dev_priv->resource_lock);
spin_lock_init(&dev_priv->hw_lock);
spin_lock_init(&dev_priv->waiter_lock);
@@ -724,7 +804,7 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
for (i = vmw_res_context; i < vmw_res_max; ++i) {
- idr_init(&dev_priv->res_idr[i]);
+ idr_init_base(&dev_priv->res_idr[i], 1);
INIT_LIST_HEAD(&dev_priv->res_lru[i]);
}
@@ -825,6 +905,8 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
vmw_print_capabilities(dev_priv->capabilities);
if (dev_priv->capabilities & SVGA_CAP_CAP2_REGISTER)
vmw_print_capabilities2(dev_priv->capabilities2);
+ DRM_INFO("Supports command queues = %d\n",
+ vmw_cmd_supported((dev_priv)));
ret = vmw_dma_masks(dev_priv);
if (unlikely(ret != 0))
@@ -966,11 +1048,11 @@ static int vmw_driver_load(struct vmw_private *dev_priv, u32 pci_id)
DRM_INFO("SM4_1 support available.\n");
if (dev_priv->sm_type == VMW_SM_4)
DRM_INFO("SM4 support available.\n");
+ DRM_INFO("Running without reservation semaphore\n");
- snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d",
- VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
- VMWGFX_DRIVER_PATCHLEVEL);
- vmw_host_log(host_log);
+ vmw_host_printf("vmwgfx: Module Version: %d.%d.%d (kernel: %s)",
+ VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR,
+ VMWGFX_DRIVER_PATCHLEVEL, UTS_RELEASE);
if (dev_priv->enable_fb) {
vmw_fifo_resource_inc(dev_priv);
@@ -1179,7 +1261,7 @@ static void __vmw_svga_enable(struct vmw_private *dev_priv)
struct ttm_resource_manager *man = ttm_manager_type(&dev_priv->bdev, TTM_PL_VRAM);
if (!ttm_resource_manager_used(man)) {
- vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE);
+ vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_ENABLE);
ttm_resource_manager_set_used(man, true);
}
}
@@ -1191,9 +1273,7 @@ static void __vmw_svga_enable(struct vmw_private *dev_priv)
*/
void vmw_svga_enable(struct vmw_private *dev_priv)
{
- (void) ttm_read_lock(&dev_priv->reservation_sem, false);
__vmw_svga_enable(dev_priv);
- ttm_read_unlock(&dev_priv->reservation_sem);
}
/**
@@ -1238,7 +1318,6 @@ void vmw_svga_disable(struct vmw_private *dev_priv)
*
*/
vmw_kms_lost_device(&dev_priv->drm);
- ttm_write_lock(&dev_priv->reservation_sem, false);
if (ttm_resource_manager_used(man)) {
if (ttm_resource_manager_evict_all(&dev_priv->bdev, man))
DRM_ERROR("Failed evicting VRAM buffers.\n");
@@ -1247,7 +1326,6 @@ void vmw_svga_disable(struct vmw_private *dev_priv)
SVGA_REG_ENABLE_HIDE |
SVGA_REG_ENABLE_ENABLE);
}
- ttm_write_unlock(&dev_priv->reservation_sem);
}
static void vmw_remove(struct pci_dev *pdev)
@@ -1287,14 +1365,12 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
* Once user-space processes have been frozen, we can release
* the lock again.
*/
- ttm_suspend_lock(&dev_priv->reservation_sem);
dev_priv->suspend_locked = true;
break;
case PM_POST_HIBERNATION:
case PM_POST_RESTORE:
if (READ_ONCE(dev_priv->suspend_locked)) {
dev_priv->suspend_locked = false;
- ttm_suspend_unlock(&dev_priv->reservation_sem);
}
break;
default:
@@ -1353,20 +1429,16 @@ static int vmw_pm_freeze(struct device *kdev)
int ret;
/*
- * Unlock for vmw_kms_suspend.
* No user-space processes should be running now.
*/
- ttm_suspend_unlock(&dev_priv->reservation_sem);
ret = vmw_kms_suspend(&dev_priv->drm);
if (ret) {
- ttm_suspend_lock(&dev_priv->reservation_sem);
DRM_ERROR("Failed to freeze modesetting.\n");
return ret;
}
if (dev_priv->enable_fb)
vmw_fb_off(dev_priv);
- ttm_suspend_lock(&dev_priv->reservation_sem);
vmw_execbuf_release_pinned_bo(dev_priv);
vmw_resource_evict_all(dev_priv);
vmw_release_device_early(dev_priv);
@@ -1379,7 +1451,6 @@ static int vmw_pm_freeze(struct device *kdev)
vmw_fifo_resource_inc(dev_priv);
WARN_ON(vmw_request_device_late(dev_priv));
dev_priv->suspend_locked = false;
- ttm_suspend_unlock(&dev_priv->reservation_sem);
if (dev_priv->suspend_state)
vmw_kms_resume(dev);
if (dev_priv->enable_fb)
@@ -1401,8 +1472,7 @@ static int vmw_pm_restore(struct device *kdev)
struct vmw_private *dev_priv = vmw_priv(dev);
int ret;
- vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
- (void) vmw_read(dev_priv, SVGA_REG_ID);
+ vmw_detect_version(dev_priv);
if (dev_priv->enable_fb)
vmw_fifo_resource_inc(dev_priv);
@@ -1416,7 +1486,6 @@ static int vmw_pm_restore(struct device *kdev)
vmw_fence_fifo_up(dev_priv->fman);
dev_priv->suspend_locked = false;
- ttm_suspend_unlock(&dev_priv->reservation_sem);
if (dev_priv->suspend_state)
vmw_kms_resume(&dev_priv->drm);
@@ -1440,8 +1509,8 @@ static const struct file_operations vmwgfx_driver_fops = {
.release = drm_release,
.unlocked_ioctl = vmw_unlocked_ioctl,
.mmap = vmw_mmap,
- .poll = vmw_fops_poll,
- .read = vmw_fops_read,
+ .poll = drm_poll,
+ .read = drm_read,
#if defined(CONFIG_COMPAT)
.compat_ioctl = vmw_compat_ioctl,
#endif
@@ -1490,7 +1559,7 @@ static int vmw_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct vmw_private *vmw;
int ret;
- ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb");
+ ret = drm_aperture_remove_conflicting_pci_framebuffers(pdev, "svgadrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index c6b1eb5952bc..d1cef3b69e9d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,7 +40,6 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_execbuf_util.h>
-#include "ttm_lock.h"
#include "ttm_object.h"
#include "vmwgfx_fence.h"
@@ -67,6 +66,7 @@
#define VMWGFX_ENABLE_SCREEN_TARGET_OTABLE 1
#define VMWGFX_PCI_ID_SVGA2 0x0405
+#define VMWGFX_PCI_ID_SVGA3 0x0406
/*
* Perhaps we should have sysfs entries for these.
@@ -285,7 +285,6 @@ struct vmw_fifo_state {
uint32_t capabilities;
struct mutex fifo_mutex;
struct rw_semaphore rwsem;
- bool dx;
};
/**
@@ -486,14 +485,14 @@ struct vmw_private {
struct drm_device drm;
struct ttm_device bdev;
- struct vmw_fifo_state fifo;
-
struct drm_vma_offset_manager vma_manager;
+ unsigned long pci_id;
u32 vmw_chipset;
resource_size_t io_start;
resource_size_t vram_start;
resource_size_t vram_size;
resource_size_t prim_bb_mem;
+ void __iomem *rmmio;
u32 *fifo_mem;
resource_size_t fifo_mem_size;
uint32_t fb_max_width;
@@ -594,11 +593,6 @@ struct vmw_private {
atomic_t num_fifo_resources;
/*
- * Replace this with an rwsem as soon as we have down_xx_interruptible()
- */
- struct ttm_lock reservation_sem;
-
- /*
* Query processing. These members
* are protected by the cmdbuf mutex.
*/
@@ -629,6 +623,7 @@ struct vmw_private {
*/
struct vmw_otable_batch otable_batch;
+ struct vmw_fifo_state *fifo;
struct vmw_cmdbuf_man *cman;
DECLARE_BITMAP(irqthread_pending, VMW_IRQTHREAD_MAX);
@@ -652,6 +647,14 @@ static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
}
/*
+ * SVGA v3 has mmio register access and lacks fifo cmds
+ */
+static inline bool vmw_is_svga_v3(const struct vmw_private *dev)
+{
+ return dev->pci_id == VMWGFX_PCI_ID_SVGA3;
+}
+
+/*
* The locking here is fine-grained, so that it is performed once
* for every read- and write operation. This is of course costly, but we
* don't perform much register access in the timing critical paths anyway.
@@ -661,10 +664,14 @@ static inline struct vmw_fpriv *vmw_fpriv(struct drm_file *file_priv)
static inline void vmw_write(struct vmw_private *dev_priv,
unsigned int offset, uint32_t value)
{
- spin_lock(&dev_priv->hw_lock);
- outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
- outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
- spin_unlock(&dev_priv->hw_lock);
+ if (vmw_is_svga_v3(dev_priv)) {
+ iowrite32(value, dev_priv->rmmio + offset);
+ } else {
+ spin_lock(&dev_priv->hw_lock);
+ outl(offset, dev_priv->io_start + SVGA_INDEX_PORT);
+ outl(value, dev_priv->io_start + SVGA_VALUE_PORT);
+ spin_unlock(&dev_priv->hw_lock);
+ }
}
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
@@ -672,10 +679,14 @@ static inline uint32_t vmw_read(struct vmw_private *dev_priv,
{
u32 val;
- spin_lock(&dev_priv->hw_lock);
- outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
- val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
- spin_unlock(&dev_priv->hw_lock);
+ if (vmw_is_svga_v3(dev_priv)) {
+ val = ioread32(dev_priv->rmmio + offset);
+ } else {
+ spin_lock(&dev_priv->hw_lock);
+ outl(offset, dev_priv->io_start + SVGA_INDEX_PORT);
+ val = inl(dev_priv->io_start + SVGA_VALUE_PORT);
+ spin_unlock(&dev_priv->hw_lock);
+ }
return val;
}
@@ -938,19 +949,14 @@ extern int vmw_present_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern __poll_t vmw_fops_poll(struct file *filp,
- struct poll_table_struct *wait);
-extern ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *offset);
/**
* Fifo utilities - vmwgfx_fifo.c
*/
-extern int vmw_fifo_init(struct vmw_private *dev_priv,
- struct vmw_fifo_state *fifo);
-extern void vmw_fifo_release(struct vmw_private *dev_priv,
- struct vmw_fifo_state *fifo);
+extern struct vmw_fifo_state *vmw_fifo_create(struct vmw_private *dev_priv);
+extern void vmw_fifo_destroy(struct vmw_private *dev_priv);
+extern bool vmw_cmd_supported(struct vmw_private *vmw);
extern void *
vmw_cmd_ctx_reserve(struct vmw_private *dev_priv, uint32_t bytes, int ctx_id);
extern void vmw_cmd_commit(struct vmw_private *dev_priv, uint32_t bytes);
@@ -976,6 +982,31 @@ extern int vmw_cmd_flush(struct vmw_private *dev_priv,
#define VMW_CMD_RESERVE(__priv, __bytes) \
VMW_CMD_CTX_RESERVE(__priv, __bytes, SVGA3D_INVALID_ID)
+
+/**
+ * vmw_fifo_caps - Returns the capabilities of the FIFO command
+ * queue or 0 if fifo memory isn't present.
+ * @dev_priv: The device private context
+ */
+static inline uint32_t vmw_fifo_caps(const struct vmw_private *dev_priv)
+{
+ if (!dev_priv->fifo_mem || !dev_priv->fifo)
+ return 0;
+ return dev_priv->fifo->capabilities;
+}
+
+
+/**
+ * vmw_is_cursor_bypass3_enabled - Returns TRUE iff Cursor Bypass 3
+ * is enabled in the FIFO.
+ * @dev_priv: The device private context
+ */
+static inline bool
+vmw_is_cursor_bypass3_enabled(const struct vmw_private *dev_priv)
+{
+ return (vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_CURSOR_BYPASS_3) != 0;
+}
+
/**
* TTM glue - vmwgfx_ttm_glue.c
*/
@@ -1085,9 +1116,6 @@ bool vmw_cmd_describe(const void *buf, u32 *size, char const **cmd);
* IRQs and wating - vmwgfx_irq.c
*/
-extern int vmw_wait_seqno(struct vmw_private *dev_priv, bool lazy,
- uint32_t seqno, bool interruptible,
- unsigned long timeout);
extern int vmw_irq_install(struct drm_device *dev, int irq);
extern void vmw_irq_uninstall(struct drm_device *dev);
extern bool vmw_seqno_passed(struct vmw_private *dev_priv,
@@ -1098,8 +1126,7 @@ extern int vmw_fallback_wait(struct vmw_private *dev_priv,
uint32_t seqno,
bool interruptible,
unsigned long timeout);
-extern void vmw_update_seqno(struct vmw_private *dev_priv,
- struct vmw_fifo_state *fifo_state);
+extern void vmw_update_seqno(struct vmw_private *dev_priv);
extern void vmw_seqno_waiter_add(struct vmw_private *dev_priv);
extern void vmw_seqno_waiter_remove(struct vmw_private *dev_priv);
extern void vmw_goal_waiter_add(struct vmw_private *dev_priv);
@@ -1114,10 +1141,29 @@ extern void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
* Kernel framebuffer - vmwgfx_fb.c
*/
+#ifdef CONFIG_DRM_FBDEV_EMULATION
int vmw_fb_init(struct vmw_private *vmw_priv);
int vmw_fb_close(struct vmw_private *dev_priv);
int vmw_fb_off(struct vmw_private *vmw_priv);
int vmw_fb_on(struct vmw_private *vmw_priv);
+#else
+static inline int vmw_fb_init(struct vmw_private *vmw_priv)
+{
+ return 0;
+}
+static inline int vmw_fb_close(struct vmw_private *dev_priv)
+{
+ return 0;
+}
+static inline int vmw_fb_off(struct vmw_private *vmw_priv)
+{
+ return 0;
+}
+static inline int vmw_fb_on(struct vmw_private *vmw_priv)
+{
+ return 0;
+}
+#endif
/**
* Kernel modesetting - vmwgfx_kms.c
@@ -1452,7 +1498,7 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
/* Host messaging -vmwgfx_msg.c: */
int vmw_host_get_guestinfo(const char *guest_info_param,
char *buffer, size_t *length);
-int vmw_host_log(const char *log);
+__printf(1, 2) int vmw_host_printf(const char *fmt, ...);
int vmw_msg_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1559,6 +1605,7 @@ static inline void vmw_fifo_resource_dec(struct vmw_private *dev_priv)
*/
static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg)
{
+ BUG_ON(vmw_is_svga_v3(vmw));
return READ_ONCE(*(vmw->fifo_mem + fifo_reg));
}
@@ -1573,6 +1620,44 @@ static inline u32 vmw_fifo_mem_read(struct vmw_private *vmw, uint32 fifo_reg)
static inline void vmw_fifo_mem_write(struct vmw_private *vmw, u32 fifo_reg,
u32 value)
{
+ BUG_ON(vmw_is_svga_v3(vmw));
WRITE_ONCE(*(vmw->fifo_mem + fifo_reg), value);
}
+
+static inline u32 vmw_fence_read(struct vmw_private *dev_priv)
+{
+ u32 fence;
+ if (vmw_is_svga_v3(dev_priv))
+ fence = vmw_read(dev_priv, SVGA_REG_FENCE);
+ else
+ fence = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
+ return fence;
+}
+
+static inline void vmw_fence_write(struct vmw_private *dev_priv,
+ u32 fence)
+{
+ BUG_ON(vmw_is_svga_v3(dev_priv));
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, fence);
+}
+
+static inline u32 vmw_irq_status_read(struct vmw_private *vmw)
+{
+ u32 status;
+ if (vmw_is_svga_v3(vmw))
+ status = vmw_read(vmw, SVGA_REG_IRQ_STATUS);
+ else
+ status = inl(vmw->io_start + SVGA_IRQSTATUS_PORT);
+ return status;
+}
+
+static inline void vmw_irq_status_write(struct vmw_private *vmw,
+ uint32 status)
+{
+ if (vmw_is_svga_v3(vmw))
+ vmw_write(vmw, SVGA_REG_IRQ_STATUS, status);
+ else
+ outl(status, vmw->io_start + SVGA_IRQSTATUS_PORT);
+}
+
#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 7a24196f92c3..32a84dff3fbf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -711,7 +711,7 @@ vmw_cmd_res_check(struct vmw_private *dev_priv,
}
/**
- * vmw_rebind_dx_query - Rebind DX query associated with the context
+ * vmw_rebind_all_dx_query - Rebind DX query associated with the context
*
* @ctx_res: context the query belongs to
*
@@ -1140,7 +1140,7 @@ static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
}
/**
- * vmw_translate_mob_pointer - Prepare to translate a user-space buffer handle
+ * vmw_translate_mob_ptr - Prepare to translate a user-space buffer handle
* to a MOB id.
*
* @dev_priv: Pointer to a device private structure.
@@ -1195,7 +1195,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
}
/**
- * vmw_translate_guest_pointer - Prepare to translate a user-space buffer handle
+ * vmw_translate_guest_ptr - Prepare to translate a user-space buffer handle
* to a valid SVGAGuestPtr
*
* @dev_priv: Pointer to a device private structure.
@@ -2308,7 +2308,7 @@ static int vmw_cmd_dx_set_vertex_buffers(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_ia_set_vertex_buffers - Validate
+ * vmw_cmd_dx_set_index_buffer - Validate
* SVGA_3D_CMD_DX_IA_SET_INDEX_BUFFER command.
*
* @dev_priv: Pointer to a device private struct.
@@ -2347,7 +2347,7 @@ static int vmw_cmd_dx_set_index_buffer(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_set_rendertarget - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
+ * vmw_cmd_dx_set_rendertargets - Validate SVGA_3D_CMD_DX_SET_RENDERTARGETS
* command
*
* @dev_priv: Pointer to a device private struct.
@@ -2402,7 +2402,7 @@ static int vmw_cmd_dx_clear_rendertarget_view(struct vmw_private *dev_priv,
}
/**
- * vmw_cmd_dx_clear_rendertarget_view - Validate
+ * vmw_cmd_dx_clear_depthstencil_view - Validate
* SVGA_3D_CMD_DX_CLEAR_DEPTHSTENCIL_VIEW command
*
* @dev_priv: Pointer to a device private struct.
@@ -2513,7 +2513,7 @@ static int vmw_cmd_dx_set_so_targets(struct vmw_private *dev_priv,
binding.bi.ctx = ctx_node->ctx;
binding.bi.res = res;
- binding.bi.bt = vmw_ctx_binding_so_target,
+ binding.bi.bt = vmw_ctx_binding_so_target;
binding.offset = cmd->targets[i].offset;
binding.size = cmd->targets[i].sizeInBytes;
binding.slot = i;
@@ -2763,12 +2763,24 @@ static int vmw_cmd_dx_genmips(struct vmw_private *dev_priv,
{
VMW_DECLARE_CMD_VAR(*cmd, SVGA3dCmdDXGenMips) =
container_of(header, typeof(*cmd), header);
- struct vmw_resource *ret;
+ struct vmw_resource *view;
+ struct vmw_res_cache_entry *rcache;
- ret = vmw_view_id_val_add(sw_context, vmw_view_sr,
- cmd->body.shaderResourceViewId);
+ view = vmw_view_id_val_add(sw_context, vmw_view_sr,
+ cmd->body.shaderResourceViewId);
+ if (IS_ERR(view))
+ return PTR_ERR(view);
- return PTR_ERR_OR_ZERO(ret);
+ /*
+ * Normally the shader-resource view is not gpu-dirtying, but for
+ * this particular command it is...
+ * So mark the last looked-up surface, which is the surface
+ * the view points to, gpu-dirty.
+ */
+ rcache = &sw_context->res_cache[vmw_res_surface];
+ vmw_validation_res_set_dirty(sw_context->ctx, rcache->private,
+ VMW_RES_DIRTY_SET);
+ return 0;
}
/**
@@ -3829,7 +3841,7 @@ vmw_execbuf_copy_fence_user(struct vmw_private *dev_priv,
fence_rep.handle = fence_handle;
fence_rep.seqno = fence->base.seqno;
- vmw_update_seqno(dev_priv, &dev_priv->fifo);
+ vmw_update_seqno(dev_priv);
fence_rep.passed_seqno = dev_priv->last_read_seqno;
}
@@ -4431,10 +4443,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
goto out;
}
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
ret = vmw_execbuf_process(file_priv, dev_priv,
(void __user *)(unsigned long)arg->commands,
NULL, arg->command_size, arg->throttle_us,
@@ -4442,7 +4450,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
(void __user *)(unsigned long)arg->fence_rep,
NULL, arg->flags);
- ttm_read_unlock(&dev_priv->reservation_sem);
if (unlikely(ret != 0))
goto out;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 33f07abfc3ae..d18c6a56e3dc 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -195,7 +195,6 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
if (!cur_fb)
goto out_unlock;
- (void) ttm_read_lock(&vmw_priv->reservation_sem, false);
(void) ttm_bo_reserve(&vbo->base, false, false, NULL);
virtual = vmw_bo_map_and_cache(vbo);
if (!virtual)
@@ -254,7 +253,6 @@ static void vmw_fb_dirty_flush(struct work_struct *work)
out_unreserve:
ttm_bo_unreserve(&vbo->base);
- ttm_read_unlock(&vmw_priv->reservation_sem);
if (w && h) {
WARN_ON_ONCE(par->set_fb->funcs->dirty(cur_fb, NULL, 0, 0,
&clip, 1));
@@ -396,8 +394,6 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
struct vmw_buffer_object *vmw_bo;
int ret;
- (void) ttm_write_lock(&vmw_priv->reservation_sem, false);
-
vmw_bo = kmalloc(sizeof(*vmw_bo), GFP_KERNEL);
if (!vmw_bo) {
ret = -ENOMEM;
@@ -412,12 +408,8 @@ static int vmw_fb_create_bo(struct vmw_private *vmw_priv,
goto err_unlock; /* init frees the buffer on failure */
*out = vmw_bo;
- ttm_write_unlock(&vmw_priv->reservation_sem);
-
- return 0;
err_unlock:
- ttm_write_unlock(&vmw_priv->reservation_sem);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index 23523eb3cac2..9fe12329a4d5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -139,12 +139,10 @@ static bool vmw_fence_enable_signaling(struct dma_fence *f)
struct vmw_fence_manager *fman = fman_from_fence(fence);
struct vmw_private *dev_priv = fman->dev_priv;
- u32 seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
+ u32 seqno = vmw_fence_read(dev_priv);
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
return false;
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
-
return true;
}
@@ -177,7 +175,6 @@ static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
if (likely(vmw_fence_obj_signaled(fence)))
return timeout;
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
vmw_seqno_waiter_add(dev_priv);
spin_lock(f->lock);
@@ -464,7 +461,7 @@ static void __vmw_fences_update(struct vmw_fence_manager *fman)
bool needs_rerun;
uint32_t seqno, new_seqno;
- seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
+ seqno = vmw_fence_read(fman->dev_priv);
rerun:
list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
@@ -486,7 +483,7 @@ rerun:
needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
if (unlikely(needs_rerun)) {
- new_seqno = vmw_fifo_mem_read(fman->dev_priv, SVGA_FIFO_FENCE);
+ new_seqno = vmw_fence_read(fman->dev_priv);
if (new_seqno != seqno) {
seqno = new_seqno;
goto rerun;
@@ -529,13 +526,6 @@ int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
return ret;
}
-void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
-{
- struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
-
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
-}
-
static void vmw_fence_destroy(struct vmw_fence_obj *fence)
{
dma_fence_free(&fence->base);
@@ -992,7 +982,7 @@ static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
}
/**
- * vmw_event_fence_action_create - Post an event for sending when a fence
+ * vmw_event_fence_action_queue - Post an event for sending when a fence
* object seqno has passed.
*
* @file_priv: The file connection on which the event should be posted.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
index 50e9fdd7acf1..079ab4f3ba51 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.h
@@ -94,8 +94,6 @@ extern int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
bool lazy,
bool interruptible, unsigned long timeout);
-extern void vmw_fence_obj_flush(struct vmw_fence_obj *fence);
-
extern int vmw_fence_create(struct vmw_fence_manager *fman,
uint32_t seqno,
struct vmw_fence_obj **p_fence);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 964ddf1ca57a..c482e5298e11 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -72,7 +72,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
SVGA_REMAP_GMR2_PPN64 : SVGA_REMAP_GMR2_PPN32;
while (num_pages > 0) {
- unsigned long nr = min(num_pages, (unsigned long)VMW_PPN_PER_REMAP);
+ unsigned long nr = min_t(unsigned long, num_pages, VMW_PPN_PER_REMAP);
remap_cmd.offsetPages = remap_pos;
remap_cmd.numPages = nr;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index b36032964b2f..4fdacf9924e6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -60,15 +60,13 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
param->value = dev_priv->capabilities2;
break;
case DRM_VMW_PARAM_FIFO_CAPS:
- param->value = dev_priv->fifo.capabilities;
+ param->value = vmw_fifo_caps(dev_priv);
break;
case DRM_VMW_PARAM_MAX_FB_SIZE:
param->value = dev_priv->prim_bb_mem;
break;
case DRM_VMW_PARAM_FIFO_HW_VERSION:
{
- const struct vmw_fifo_state *fifo = &dev_priv->fifo;
-
if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
param->value = SVGA3D_HWVERSION_WS8_B1;
break;
@@ -76,7 +74,7 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
param->value =
vmw_fifo_mem_read(dev_priv,
- ((fifo->capabilities &
+ ((vmw_fifo_caps(dev_priv) &
SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ?
SVGA_FIFO_3D_HWVERSION_REVISED :
SVGA_FIFO_3D_HWVERSION));
@@ -302,10 +300,6 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
}
vfb = vmw_framebuffer_to_vfb(fb);
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- goto out_no_ttm_lock;
-
ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid,
user_surface_converter,
&res);
@@ -322,8 +316,6 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
vmw_surface_unreference(&surface);
out_no_surface:
- ttm_read_unlock(&dev_priv->reservation_sem);
-out_no_ttm_lock:
drm_framebuffer_put(fb);
out_no_fb:
drm_modeset_unlock_all(dev);
@@ -391,15 +383,10 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
goto out_no_ttm_lock;
}
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- goto out_no_ttm_lock;
-
ret = vmw_kms_readback(dev_priv, file_priv,
vfb, user_fence_rep,
clips, num_clips);
- ttm_read_unlock(&dev_priv->reservation_sem);
out_no_ttm_lock:
drm_framebuffer_put(fb);
out_no_fb:
@@ -409,46 +396,3 @@ out_no_copy:
out_clips:
return ret;
}
-
-
-/**
- * vmw_fops_poll - wrapper around the drm_poll function
- *
- * @filp: See the linux fops poll documentation.
- * @wait: See the linux fops poll documentation.
- *
- * Wrapper around the drm_poll function that makes sure the device is
- * processing the fifo if drm_poll decides to wait.
- */
-__poll_t vmw_fops_poll(struct file *filp, struct poll_table_struct *wait)
-{
- struct drm_file *file_priv = filp->private_data;
- struct vmw_private *dev_priv =
- vmw_priv(file_priv->minor->dev);
-
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
- return drm_poll(filp, wait);
-}
-
-
-/**
- * vmw_fops_read - wrapper around the drm_read function
- *
- * @filp: See the linux fops read documentation.
- * @buffer: See the linux fops read documentation.
- * @count: See the linux fops read documentation.
- * @offset: See the linux fops read documentation.
- *
- * Wrapper around the drm_read function that makes sure the device is
- * processing the fifo if drm_read decides to wait.
- */
-ssize_t vmw_fops_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *offset)
-{
- struct drm_file *file_priv = filp->private_data;
- struct vmw_private *dev_priv =
- vmw_priv(file_priv->minor->dev);
-
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
- return drm_read(filp, buffer, count, offset);
-}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 6c2a569f1fcb..b9a9b7ddadbd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -65,7 +65,7 @@ static irqreturn_t vmw_thread_fn(int irq, void *arg)
}
/**
- * vmw_irq_handler irq handler
+ * vmw_irq_handler: irq handler
*
* @irq: irq number
* @arg: Closure argument. Pointer to a struct drm_device cast to void *
@@ -82,11 +82,11 @@ static irqreturn_t vmw_irq_handler(int irq, void *arg)
uint32_t status, masked_status;
irqreturn_t ret = IRQ_HANDLED;
- status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ status = vmw_irq_status_read(dev_priv);
masked_status = status & READ_ONCE(dev_priv->irq_mask);
if (likely(status))
- outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ vmw_irq_status_write(dev_priv, status);
if (!status)
return IRQ_NONE;
@@ -114,10 +114,9 @@ static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
}
-void vmw_update_seqno(struct vmw_private *dev_priv,
- struct vmw_fifo_state *fifo_state)
+void vmw_update_seqno(struct vmw_private *dev_priv)
{
- uint32_t seqno = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_FENCE);
+ uint32_t seqno = vmw_fence_read(dev_priv);
if (dev_priv->last_read_seqno != seqno) {
dev_priv->last_read_seqno = seqno;
@@ -128,18 +127,16 @@ void vmw_update_seqno(struct vmw_private *dev_priv,
bool vmw_seqno_passed(struct vmw_private *dev_priv,
uint32_t seqno)
{
- struct vmw_fifo_state *fifo_state;
bool ret;
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return true;
- fifo_state = &dev_priv->fifo;
- vmw_update_seqno(dev_priv, fifo_state);
+ vmw_update_seqno(dev_priv);
if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
return true;
- if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
+ if (!(vmw_fifo_caps(dev_priv) & SVGA_FIFO_CAP_FENCE) &&
vmw_fifo_idle(dev_priv, seqno))
return true;
@@ -161,7 +158,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
bool interruptible,
unsigned long timeout)
{
- struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
+ struct vmw_fifo_state *fifo_state = dev_priv->fifo;
uint32_t count = 0;
uint32_t signal_seq;
@@ -221,7 +218,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
}
finish_wait(&dev_priv->fence_queue, &__wait);
if (ret == 0 && fifo_idle)
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_FENCE, signal_seq);
+ vmw_fence_write(dev_priv, signal_seq);
wake_up_all(&dev_priv->fence_queue);
out_err:
@@ -236,7 +233,7 @@ void vmw_generic_waiter_add(struct vmw_private *dev_priv,
{
spin_lock_bh(&dev_priv->waiter_lock);
if ((*waiter_count)++ == 0) {
- outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ vmw_irq_status_write(dev_priv, flag);
dev_priv->irq_mask |= flag;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
}
@@ -278,59 +275,13 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
&dev_priv->goal_queue_waiters);
}
-int vmw_wait_seqno(struct vmw_private *dev_priv,
- bool lazy, uint32_t seqno,
- bool interruptible, unsigned long timeout)
-{
- long ret;
- struct vmw_fifo_state *fifo = &dev_priv->fifo;
-
- if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
- return 0;
-
- if (likely(vmw_seqno_passed(dev_priv, seqno)))
- return 0;
-
- vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
-
- if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
- return vmw_fallback_wait(dev_priv, lazy, true, seqno,
- interruptible, timeout);
-
- if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
- return vmw_fallback_wait(dev_priv, lazy, false, seqno,
- interruptible, timeout);
-
- vmw_seqno_waiter_add(dev_priv);
-
- if (interruptible)
- ret = wait_event_interruptible_timeout
- (dev_priv->fence_queue,
- vmw_seqno_passed(dev_priv, seqno),
- timeout);
- else
- ret = wait_event_timeout
- (dev_priv->fence_queue,
- vmw_seqno_passed(dev_priv, seqno),
- timeout);
-
- vmw_seqno_waiter_remove(dev_priv);
-
- if (unlikely(ret == 0))
- ret = -EBUSY;
- else if (likely(ret > 0))
- ret = 0;
-
- return ret;
-}
-
static void vmw_irq_preinstall(struct drm_device *dev)
{
struct vmw_private *dev_priv = vmw_priv(dev);
uint32_t status;
- status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
- outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ status = vmw_irq_status_read(dev_priv);
+ vmw_irq_status_write(dev_priv, status);
}
void vmw_irq_uninstall(struct drm_device *dev)
@@ -346,8 +297,8 @@ void vmw_irq_uninstall(struct drm_device *dev)
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
- status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
- outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
+ status = vmw_irq_status_read(dev_priv);
+ vmw_irq_status_write(dev_priv, status);
dev->irq_enabled = false;
free_irq(dev->irq, dev);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index abbca8b0b3c5..220f9fd0d420 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -38,8 +38,10 @@
void vmw_du_cleanup(struct vmw_display_unit *du)
{
+ struct vmw_private *dev_priv = vmw_priv(du->primary.dev);
drm_plane_cleanup(&du->primary);
- drm_plane_cleanup(&du->cursor);
+ if (vmw_cmd_supported(dev_priv))
+ drm_plane_cleanup(&du->cursor);
drm_connector_unregister(&du->connector);
drm_crtc_cleanup(&du->crtc);
@@ -128,11 +130,17 @@ static void vmw_cursor_update_position(struct vmw_private *dev_priv,
uint32_t count;
spin_lock(&dev_priv->cursor_lock);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, show ? 1 : 0);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
- count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
- vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
+ if (vmw_is_cursor_bypass3_enabled(dev_priv)) {
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_ON, show ? 1 : 0);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_X, x);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_Y, y);
+ count = vmw_fifo_mem_read(dev_priv, SVGA_FIFO_CURSOR_COUNT);
+ vmw_fifo_mem_write(dev_priv, SVGA_FIFO_CURSOR_COUNT, ++count);
+ } else {
+ vmw_write(dev_priv, SVGA_REG_CURSOR_X, x);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_Y, y);
+ vmw_write(dev_priv, SVGA_REG_CURSOR_ON, show ? 1 : 0);
+ }
spin_unlock(&dev_priv->cursor_lock);
}
@@ -289,7 +297,7 @@ void vmw_du_primary_plane_destroy(struct drm_plane *plane)
/**
- * vmw_du_vps_unpin_surf - unpins resource associated with a framebuffer surface
+ * vmw_du_plane_unpin_surf - unpins resource associated with a framebuffer surface
*
* @vps: plane state associated with the display surface
* @unreference: true if we also want to unreference the display.
@@ -474,7 +482,7 @@ int vmw_du_primary_plane_atomic_check(struct drm_plane *plane,
* vmw_du_cursor_plane_atomic_check - check if the new state is okay
*
* @plane: cursor plane
- * @new_state: info on the new plane state
+ * @state: info on the new plane state
*
* This is a chance to fail if the new cursor state does not fit
* our requirements.
@@ -1008,12 +1016,6 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
drm_modeset_lock_all(&dev_priv->drm);
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0)) {
- drm_modeset_unlock_all(&dev_priv->drm);
- return ret;
- }
-
if (!num_clips) {
num_clips = 1;
clips = &norect;
@@ -1037,7 +1039,6 @@ static int vmw_framebuffer_bo_dirty(struct drm_framebuffer *framebuffer,
}
vmw_cmd_flush(dev_priv, false);
- ttm_read_unlock(&dev_priv->reservation_sem);
drm_modeset_unlock_all(&dev_priv->drm);
@@ -1052,7 +1053,8 @@ static int vmw_framebuffer_bo_dirty_ext(struct drm_framebuffer *framebuffer,
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
- if (dev_priv->active_display_unit == vmw_du_legacy)
+ if (dev_priv->active_display_unit == vmw_du_legacy &&
+ vmw_cmd_supported(dev_priv))
return vmw_framebuffer_bo_dirty(framebuffer, file_priv, flags,
color, clips, num_clips);
@@ -2640,7 +2642,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
}
/**
- * vmw_kms_create_implicit_placement_proparty - Set up the implicit placement
+ * vmw_kms_create_implicit_placement_property - Set up the implicit placement
* property.
*
* @dev_priv: Pointer to a device private struct.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 87e0b303d900..d85c7eab9469 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -404,19 +404,24 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
drm_plane_helper_add(primary, &vmw_ldu_primary_plane_helper_funcs);
- /* Initialize cursor plane */
- ret = drm_universal_plane_init(dev, &ldu->base.cursor,
- 0, &vmw_ldu_cursor_funcs,
- vmw_cursor_plane_formats,
- ARRAY_SIZE(vmw_cursor_plane_formats),
- NULL, DRM_PLANE_TYPE_CURSOR, NULL);
- if (ret) {
- DRM_ERROR("Failed to initialize cursor plane");
- drm_plane_cleanup(&ldu->base.primary);
- goto err_free;
- }
+ /*
+ * We're going to be using traces and software cursors
+ */
+ if (vmw_cmd_supported(dev_priv)) {
+ /* Initialize cursor plane */
+ ret = drm_universal_plane_init(dev, &ldu->base.cursor,
+ 0, &vmw_ldu_cursor_funcs,
+ vmw_cursor_plane_formats,
+ ARRAY_SIZE(vmw_cursor_plane_formats),
+ NULL, DRM_PLANE_TYPE_CURSOR, NULL);
+ if (ret) {
+ DRM_ERROR("Failed to initialize cursor plane");
+ drm_plane_cleanup(&ldu->base.primary);
+ goto err_free;
+ }
- drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs);
+ drm_plane_helper_add(cursor, &vmw_ldu_cursor_plane_helper_funcs);
+ }
ret = drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
DRM_MODE_CONNECTOR_VIRTUAL);
@@ -445,9 +450,10 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
goto err_free_encoder;
}
- ret = drm_crtc_init_with_planes(dev, crtc, &ldu->base.primary,
- &ldu->base.cursor,
- &vmw_legacy_crtc_funcs, NULL);
+ ret = drm_crtc_init_with_planes(
+ dev, crtc, &ldu->base.primary,
+ vmw_cmd_supported(dev_priv) ? &ldu->base.cursor : NULL,
+ &vmw_legacy_crtc_funcs, NULL);
if (ret) {
DRM_ERROR("Failed to initialize CRTC\n");
goto err_free_unregister;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
index 609269625468..3d08f5700bdb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.c
@@ -33,7 +33,8 @@
#include <asm/hypervisor.h>
#include "vmwgfx_drv.h"
-#include "vmwgfx_msg.h"
+#include "vmwgfx_msg_x86.h"
+#include "vmwgfx_msg_arm64.h"
#define MESSAGE_STATUS_SUCCESS 0x0001
#define MESSAGE_STATUS_DORECV 0x0002
@@ -473,30 +474,40 @@ out_open:
}
-
/**
- * vmw_host_log: Sends a log message to the host
+ * vmw_host_printf: Sends a log message to the host
*
- * @log: NULL terminated string
+ * @fmt: Regular printf format string and arguments
*
* Returns: 0 on success
*/
-int vmw_host_log(const char *log)
+__printf(1, 2)
+int vmw_host_printf(const char *fmt, ...)
{
+ va_list ap;
struct rpc_channel channel;
char *msg;
+ char *log;
int ret = 0;
-
if (!vmw_msg_enabled)
return -ENODEV;
- if (!log)
+ if (!fmt)
return ret;
+ va_start(ap, fmt);
+ log = kvasprintf(GFP_KERNEL, fmt, ap);
+ va_end(ap);
+ if (!log) {
+ DRM_ERROR("Cannot allocate memory for the log message.\n");
+ return -ENOMEM;
+ }
+
msg = kasprintf(GFP_KERNEL, "log %s", log);
if (!msg) {
DRM_ERROR("Cannot allocate memory for host log message.\n");
+ kfree(log);
return -ENOMEM;
}
@@ -508,6 +519,7 @@ int vmw_host_log(const char *log)
vmw_close_channel(&channel);
kfree(msg);
+ kfree(log);
return 0;
@@ -515,6 +527,7 @@ out_msg:
vmw_close_channel(&channel);
out_open:
kfree(msg);
+ kfree(log);
DRM_ERROR("Failed to send host log message.\n");
return -EINVAL;
@@ -537,7 +550,7 @@ int vmw_msg_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_msg_arg *arg =
- (struct drm_vmw_msg_arg *) data;
+ (struct drm_vmw_msg_arg *)data;
struct rpc_channel channel;
char *msg;
int length;
@@ -577,7 +590,7 @@ int vmw_msg_ioctl(struct drm_device *dev, void *data,
}
if (reply && reply_len > 0) {
if (copy_to_user((void __user *)((unsigned long)arg->receive),
- reply, reply_len)) {
+ reply, reply_len)) {
DRM_ERROR("Failed to copy message to userspace.\n");
kfree(reply);
goto out_msg;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
deleted file mode 100644
index f685c7071dec..000000000000
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_msg.h
+++ /dev/null
@@ -1,214 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
-/**************************************************************************
- *
- * Copyright 2016 VMware, Inc., Palo Alto, CA., USA
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the
- * "Software"), to deal in the Software without restriction, including
- * without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
- * permit persons to whom the Software is furnished to do so, subject to
- * the following conditions:
- *
- * The above copyright notice and this permission notice (including the
- * next paragraph) shall be included in all copies or substantial portions
- * of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
- * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
- * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
- * USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************
- *
- * Based on code from vmware.c and vmmouse.c.
- * Author:
- * Sinclair Yeh <syeh@vmware.com>
- */
-#ifndef _VMWGFX_MSG_H
-#define _VMWGFX_MSG_H
-
-#include <asm/vmware.h>
-
-/**
- * Hypervisor-specific bi-directional communication channel. Should never
- * execute on bare metal hardware. The caller must make sure to check for
- * supported hypervisor before using these macros.
- *
- * The last two parameters are both input and output and must be initialized.
- *
- * @cmd: [IN] Message Cmd
- * @in_ebx: [IN] Message Len, through EBX
- * @in_si: [IN] Input argument through SI, set to 0 if not used
- * @in_di: [IN] Input argument through DI, set ot 0 if not used
- * @flags: [IN] hypercall flags + [channel id]
- * @magic: [IN] hypervisor magic value
- * @eax: [OUT] value of EAX register
- * @ebx: [OUT] e.g. status from an HB message status command
- * @ecx: [OUT] e.g. status from a non-HB message status command
- * @edx: [OUT] e.g. channel id
- * @si: [OUT]
- * @di: [OUT]
- */
-#define VMW_PORT(cmd, in_ebx, in_si, in_di, \
- flags, magic, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile (VMWARE_HYPERCALL : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(in_ebx), \
- "c"(cmd), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di) : \
- "memory"); \
-})
-
-
-/**
- * Hypervisor-specific bi-directional communication channel. Should never
- * execute on bare metal hardware. The caller must make sure to check for
- * supported hypervisor before using these macros.
- *
- * The last 3 parameters are both input and output and must be initialized.
- *
- * @cmd: [IN] Message Cmd
- * @in_ecx: [IN] Message Len, through ECX
- * @in_si: [IN] Input argument through SI, set to 0 if not used
- * @in_di: [IN] Input argument through DI, set to 0 if not used
- * @flags: [IN] hypercall flags + [channel id]
- * @magic: [IN] hypervisor magic value
- * @bp: [IN]
- * @eax: [OUT] value of EAX register
- * @ebx: [OUT] e.g. status from an HB message status command
- * @ecx: [OUT] e.g. status from a non-HB message status command
- * @edx: [OUT] e.g. channel id
- * @si: [OUT]
- * @di: [OUT]
- */
-#ifdef __x86_64__
-
-#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
- flags, magic, bp, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile ("push %%rbp;" \
- "mov %12, %%rbp;" \
- VMWARE_HYPERCALL_HB_OUT \
- "pop %%rbp;" : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(cmd), \
- "c"(in_ecx), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di), \
- "r"(bp) : \
- "memory", "cc"); \
-})
-
-
-#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
- flags, magic, bp, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile ("push %%rbp;" \
- "mov %12, %%rbp;" \
- VMWARE_HYPERCALL_HB_IN \
- "pop %%rbp" : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(cmd), \
- "c"(in_ecx), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di), \
- "r"(bp) : \
- "memory", "cc"); \
-})
-
-#else
-
-/*
- * In the 32-bit version of this macro, we store bp in a memory location
- * because we've ran out of registers.
- * Now we can't reference that memory location while we've modified
- * %esp or %ebp, so we first push it on the stack, just before we push
- * %ebp, and then when we need it we read it from the stack where we
- * just pushed it.
- */
-#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
- flags, magic, bp, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile ("push %12;" \
- "push %%ebp;" \
- "mov 0x04(%%esp), %%ebp;" \
- VMWARE_HYPERCALL_HB_OUT \
- "pop %%ebp;" \
- "add $0x04, %%esp;" : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(cmd), \
- "c"(in_ecx), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di), \
- "m"(bp) : \
- "memory", "cc"); \
-})
-
-
-#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
- flags, magic, bp, \
- eax, ebx, ecx, edx, si, di) \
-({ \
- asm volatile ("push %12;" \
- "push %%ebp;" \
- "mov 0x04(%%esp), %%ebp;" \
- VMWARE_HYPERCALL_HB_IN \
- "pop %%ebp;" \
- "add $0x04, %%esp;" : \
- "=a"(eax), \
- "=b"(ebx), \
- "=c"(ecx), \
- "=d"(edx), \
- "=S"(si), \
- "=D"(di) : \
- "a"(magic), \
- "b"(cmd), \
- "c"(in_ecx), \
- "d"(flags), \
- "S"(in_si), \
- "D"(in_di), \
- "m"(bp) : \
- "memory", "cc"); \
-})
-#endif /* #if __x86_64__ */
-
-#endif
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
new file mode 100755
index 000000000000..4f40167ad61f
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_arm64.h
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+/*
+ * Copyright 2021 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef _VMWGFX_MSG_ARM64_H
+#define _VMWGFX_MSG_ARM64_H
+
+#if defined(__aarch64__)
+
+#define VMWARE_HYPERVISOR_PORT 0x5658
+#define VMWARE_HYPERVISOR_PORT_HB 0x5659
+
+#define VMWARE_HYPERVISOR_HB BIT(0)
+#define VMWARE_HYPERVISOR_OUT BIT(1)
+
+#define X86_IO_MAGIC 0x86
+
+#define X86_IO_W7_SIZE_SHIFT 0
+#define X86_IO_W7_SIZE_MASK (0x3 << X86_IO_W7_SIZE_SHIFT)
+#define X86_IO_W7_DIR (1 << 2)
+#define X86_IO_W7_WITH (1 << 3)
+#define X86_IO_W7_STR (1 << 4)
+#define X86_IO_W7_DF (1 << 5)
+#define X86_IO_W7_IMM_SHIFT 5
+#define X86_IO_W7_IMM_MASK (0xff << X86_IO_W7_IMM_SHIFT)
+
+static inline void vmw_port(unsigned long cmd, unsigned long in_ebx,
+ unsigned long in_si, unsigned long in_di,
+ unsigned long flags, unsigned long magic,
+ unsigned long *eax, unsigned long *ebx,
+ unsigned long *ecx, unsigned long *edx,
+ unsigned long *si, unsigned long *di)
+{
+ register u64 x0 asm("x0") = magic;
+ register u64 x1 asm("x1") = in_ebx;
+ register u64 x2 asm("x2") = cmd;
+ register u64 x3 asm("x3") = flags | VMWARE_HYPERVISOR_PORT;
+ register u64 x4 asm("x4") = in_si;
+ register u64 x5 asm("x5") = in_di;
+
+ register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
+ X86_IO_W7_WITH |
+ X86_IO_W7_DIR |
+ (2 << X86_IO_W7_SIZE_SHIFT);
+
+ asm volatile("mrs xzr, mdccsr_el0 \n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2),
+ "+r"(x3), "+r"(x4), "+r"(x5)
+ : "r"(x7)
+ :);
+ *eax = x0;
+ *ebx = x1;
+ *ecx = x2;
+ *edx = x3;
+ *si = x4;
+ *di = x5;
+}
+
+static inline void vmw_port_hb(unsigned long cmd, unsigned long in_ecx,
+ unsigned long in_si, unsigned long in_di,
+ unsigned long flags, unsigned long magic,
+ unsigned long bp, u32 w7dir,
+ unsigned long *eax, unsigned long *ebx,
+ unsigned long *ecx, unsigned long *edx,
+ unsigned long *si, unsigned long *di)
+{
+ register u64 x0 asm("x0") = magic;
+ register u64 x1 asm("x1") = cmd;
+ register u64 x2 asm("x2") = in_ecx;
+ register u64 x3 asm("x3") = flags | VMWARE_HYPERVISOR_PORT_HB;
+ register u64 x4 asm("x4") = in_si;
+ register u64 x5 asm("x5") = in_di;
+ register u64 x6 asm("x6") = bp;
+ register u64 x7 asm("x7") = ((u64)X86_IO_MAGIC << 32) |
+ X86_IO_W7_STR |
+ X86_IO_W7_WITH |
+ w7dir;
+
+ asm volatile("mrs xzr, mdccsr_el0 \n\t"
+ : "+r"(x0), "+r"(x1), "+r"(x2),
+ "+r"(x3), "+r"(x4), "+r"(x5)
+ : "r"(x6), "r"(x7)
+ :);
+ *eax = x0;
+ *ebx = x1;
+ *ecx = x2;
+ *edx = x3;
+ *si = x4;
+ *di = x5;
+}
+
+#define VMW_PORT(cmd, in_ebx, in_si, in_di, flags, magic, eax, ebx, ecx, edx, \
+ si, di) \
+ vmw_port(cmd, in_ebx, in_si, in_di, flags, magic, &eax, &ebx, &ecx, \
+ &edx, &si, &di)
+
+#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, flags, magic, bp, eax, ebx, \
+ ecx, edx, si, di) \
+ vmw_port_hb(cmd, in_ecx, in_si, in_di, flags, magic, bp, \
+ 0, &eax, &ebx, &ecx, &edx, &si, &di)
+
+#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, flags, magic, bp, eax, ebx, \
+ ecx, edx, si, di) \
+ vmw_port_hb(cmd, in_ecx, in_si, in_di, flags, magic, bp, \
+ X86_IO_W7_DIR, &eax, &ebx, &ecx, &edx, &si, &di)
+
+#endif
+
+#endif /* _VMWGFX_MSG_ARM64_H */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h
new file mode 100644
index 000000000000..0b74ca2dfb7b
--- /dev/null
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_msg_x86.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0+ OR MIT */
+/**************************************************************************
+ *
+ * Copyright 2016-2021 VMware, Inc., Palo Alto, CA., USA
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************
+ *
+ * Based on code from vmware.c and vmmouse.c.
+ * Author:
+ * Sinclair Yeh <syeh@vmware.com>
+ */
+#ifndef _VMWGFX_MSG_X86_H
+#define _VMWGFX_MSG_X86_H
+
+
+#if defined(__i386__) || defined(__x86_64__)
+
+#include <asm/vmware.h>
+
+/**
+ * Hypervisor-specific bi-directional communication channel. Should never
+ * execute on bare metal hardware. The caller must make sure to check for
+ * supported hypervisor before using these macros.
+ *
+ * The last two parameters are both input and output and must be initialized.
+ *
+ * @cmd: [IN] Message Cmd
+ * @in_ebx: [IN] Message Len, through EBX
+ * @in_si: [IN] Input argument through SI, set to 0 if not used
+ * @in_di: [IN] Input argument through DI, set ot 0 if not used
+ * @flags: [IN] hypercall flags + [channel id]
+ * @magic: [IN] hypervisor magic value
+ * @eax: [OUT] value of EAX register
+ * @ebx: [OUT] e.g. status from an HB message status command
+ * @ecx: [OUT] e.g. status from a non-HB message status command
+ * @edx: [OUT] e.g. channel id
+ * @si: [OUT]
+ * @di: [OUT]
+ */
+#define VMW_PORT(cmd, in_ebx, in_si, in_di, \
+ flags, magic, \
+ eax, ebx, ecx, edx, si, di) \
+({ \
+ asm volatile (VMWARE_HYPERCALL : \
+ "=a"(eax), \
+ "=b"(ebx), \
+ "=c"(ecx), \
+ "=d"(edx), \
+ "=S"(si), \
+ "=D"(di) : \
+ "a"(magic), \
+ "b"(in_ebx), \
+ "c"(cmd), \
+ "d"(flags), \
+ "S"(in_si), \
+ "D"(in_di) : \
+ "memory"); \
+})
+
+
+/**
+ * Hypervisor-specific bi-directional communication channel. Should never
+ * execute on bare metal hardware. The caller must make sure to check for
+ * supported hypervisor before using these macros.
+ *
+ * The last 3 parameters are both input and output and must be initialized.
+ *
+ * @cmd: [IN] Message Cmd
+ * @in_ecx: [IN] Message Len, through ECX
+ * @in_si: [IN] Input argument through SI, set to 0 if not used
+ * @in_di: [IN] Input argument through DI, set to 0 if not used
+ * @flags: [IN] hypercall flags + [channel id]
+ * @magic: [IN] hypervisor magic value
+ * @bp: [IN]
+ * @eax: [OUT] value of EAX register
+ * @ebx: [OUT] e.g. status from an HB message status command
+ * @ecx: [OUT] e.g. status from a non-HB message status command
+ * @edx: [OUT] e.g. channel id
+ * @si: [OUT]
+ * @di: [OUT]
+ */
+#ifdef __x86_64__
+
+#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
+ flags, magic, bp, \
+ eax, ebx, ecx, edx, si, di) \
+({ \
+ asm volatile ("push %%rbp;" \
+ "mov %12, %%rbp;" \
+ VMWARE_HYPERCALL_HB_OUT \
+ "pop %%rbp;" : \
+ "=a"(eax), \
+ "=b"(ebx), \
+ "=c"(ecx), \
+ "=d"(edx), \
+ "=S"(si), \
+ "=D"(di) : \
+ "a"(magic), \
+ "b"(cmd), \
+ "c"(in_ecx), \
+ "d"(flags), \
+ "S"(in_si), \
+ "D"(in_di), \
+ "r"(bp) : \
+ "memory", "cc"); \
+})
+
+
+#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
+ flags, magic, bp, \
+ eax, ebx, ecx, edx, si, di) \
+({ \
+ asm volatile ("push %%rbp;" \
+ "mov %12, %%rbp;" \
+ VMWARE_HYPERCALL_HB_IN \
+ "pop %%rbp" : \
+ "=a"(eax), \
+ "=b"(ebx), \
+ "=c"(ecx), \
+ "=d"(edx), \
+ "=S"(si), \
+ "=D"(di) : \
+ "a"(magic), \
+ "b"(cmd), \
+ "c"(in_ecx), \
+ "d"(flags), \
+ "S"(in_si), \
+ "D"(in_di), \
+ "r"(bp) : \
+ "memory", "cc"); \
+})
+
+#elif defined(__i386__)
+
+/*
+ * In the 32-bit version of this macro, we store bp in a memory location
+ * because we've ran out of registers.
+ * Now we can't reference that memory location while we've modified
+ * %esp or %ebp, so we first push it on the stack, just before we push
+ * %ebp, and then when we need it we read it from the stack where we
+ * just pushed it.
+ */
+#define VMW_PORT_HB_OUT(cmd, in_ecx, in_si, in_di, \
+ flags, magic, bp, \
+ eax, ebx, ecx, edx, si, di) \
+({ \
+ asm volatile ("push %12;" \
+ "push %%ebp;" \
+ "mov 0x04(%%esp), %%ebp;" \
+ VMWARE_HYPERCALL_HB_OUT \
+ "pop %%ebp;" \
+ "add $0x04, %%esp;" : \
+ "=a"(eax), \
+ "=b"(ebx), \
+ "=c"(ecx), \
+ "=d"(edx), \
+ "=S"(si), \
+ "=D"(di) : \
+ "a"(magic), \
+ "b"(cmd), \
+ "c"(in_ecx), \
+ "d"(flags), \
+ "S"(in_si), \
+ "D"(in_di), \
+ "m"(bp) : \
+ "memory", "cc"); \
+})
+
+
+#define VMW_PORT_HB_IN(cmd, in_ecx, in_si, in_di, \
+ flags, magic, bp, \
+ eax, ebx, ecx, edx, si, di) \
+({ \
+ asm volatile ("push %12;" \
+ "push %%ebp;" \
+ "mov 0x04(%%esp), %%ebp;" \
+ VMWARE_HYPERCALL_HB_IN \
+ "pop %%ebp;" \
+ "add $0x04, %%esp;" : \
+ "=a"(eax), \
+ "=b"(ebx), \
+ "=c"(ecx), \
+ "=d"(edx), \
+ "=S"(si), \
+ "=D"(di) : \
+ "a"(magic), \
+ "b"(cmd), \
+ "c"(in_ecx), \
+ "d"(flags), \
+ "S"(in_si), \
+ "D"(in_di), \
+ "m"(bp) : \
+ "memory", "cc"); \
+})
+#endif /* defined(__i386__) */
+
+#endif /* defined(__i386__) || defined(__x86_64__) */
+
+#endif /* _VMWGFX_MSG_X86_H */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index ac4a9b722279..54c5d16eb3b7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -421,7 +421,7 @@ int vmw_overlay_pause_all(struct vmw_private *dev_priv)
static bool vmw_overlay_available(const struct vmw_private *dev_priv)
{
return (dev_priv->overlay_priv != NULL &&
- ((dev_priv->fifo.capabilities & VMW_OVERLAY_CAP_MASK) ==
+ ((vmw_fifo_caps(dev_priv) & VMW_OVERLAY_CAP_MASK) ==
VMW_OVERLAY_CAP_MASK));
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
index e99f6cdbb091..cf585dfe5669 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_reg.h
@@ -34,10 +34,6 @@
#include <linux/types.h>
-#define VMWGFX_INDEX_PORT 0x0
-#define VMWGFX_VALUE_PORT 0x1
-#define VMWGFX_IRQSTATUS_PORT 0x8
-
struct svga_guest_mem_descriptor {
u32 ppn;
u32 num_pages;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 35f02958ee2c..62ea920addc3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -280,7 +280,7 @@ out_bad_resource:
}
/**
- * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * vmw_user_resource_noref_lookup_handle - lookup a struct resource from a
* TTM user-space handle and perform basic type checks
*
* @dev_priv: Pointer to a device private struct
@@ -990,7 +990,6 @@ int vmw_resource_pin(struct vmw_resource *res, bool interruptible)
struct vmw_private *dev_priv = res->dev_priv;
int ret;
- ttm_write_lock(&dev_priv->reservation_sem, interruptible);
mutex_lock(&dev_priv->cmdbuf_mutex);
ret = vmw_resource_reserve(res, interruptible, false);
if (ret)
@@ -1029,7 +1028,6 @@ out_no_validate:
vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
out_no_reserve:
mutex_unlock(&dev_priv->cmdbuf_mutex);
- ttm_write_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -1047,7 +1045,6 @@ void vmw_resource_unpin(struct vmw_resource *res)
struct vmw_private *dev_priv = res->dev_priv;
int ret;
- (void) ttm_read_lock(&dev_priv->reservation_sem, false);
mutex_lock(&dev_priv->cmdbuf_mutex);
ret = vmw_resource_reserve(res, false, true);
@@ -1065,7 +1062,6 @@ void vmw_resource_unpin(struct vmw_resource *res)
vmw_resource_unreserve(res, false, false, false, NULL, 0UL);
mutex_unlock(&dev_priv->cmdbuf_mutex);
- ttm_read_unlock(&dev_priv->reservation_sem);
}
/**
@@ -1079,7 +1075,7 @@ enum vmw_res_type vmw_res_type(const struct vmw_resource *res)
}
/**
- * vmw_resource_update_dirty - Update a resource's dirty tracker with a
+ * vmw_resource_dirty_update - Update a resource's dirty tracker with a
* sequential range of touched backing store memory.
* @res: The resource.
* @start: The first page touched.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index 9bc9a0714664..145430d14219 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -1222,7 +1222,7 @@ static void vmw_sou_bo_clip(struct vmw_kms_dirty *dirty)
}
/**
- * vmw_kms_do_bo_dirty - Dirty part of a buffer-object backed framebuffer
+ * vmw_kms_sou_do_bo_dirty - Dirty part of a buffer-object backed framebuffer
*
* @dev_priv: Pointer to the device private structure.
* @framebuffer: Pointer to the buffer-object backed framebuffer.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index a0db06564013..b391975871a5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -876,15 +876,9 @@ static int vmw_shader_define(struct drm_device *dev, struct drm_file *file_priv,
goto out_bad_arg;
}
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- goto out_bad_arg;
-
ret = vmw_user_shader_alloc(dev_priv, buffer, size, offset,
shader_type, num_input_sig,
num_output_sig, tfile, shader_handle);
-
- ttm_read_unlock(&dev_priv->reservation_sem);
out_bad_arg:
vmw_bo_unreference(&buffer);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
index 73e9a487e659..33b69a70cfe3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_simple_resource.c
@@ -162,13 +162,8 @@ vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data,
account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE +
TTM_OBJ_EXTRA_SIZE;
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (ret)
- return ret;
-
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), account_size,
&ctx);
- ttm_read_unlock(&dev_priv->reservation_sem);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("Out of graphics memory for %s"
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
index 2877c7b43bd7..c3a8d6e8380e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_so.c
@@ -33,7 +33,7 @@
* The currently only reason we need to keep track of views is that if we
* destroy a hardware surface, all views pointing to it must also be destroyed,
* otherwise the device will error.
- * So in particuar if a surface is evicted, we must destroy all views pointing
+ * So in particular if a surface is evicted, we must destroy all views pointing
* to it, and all context bindings of that view. Similarly we must restore
* the view bindings, views and surfaces pointed to by the views when a
* context is referenced in the command stream.
@@ -90,7 +90,7 @@ static const struct vmw_res_func vmw_view_func = {
};
/**
- * struct vmw_view - view define command body stub
+ * struct vmw_view_define - view define command body stub
*
* @view_id: The device id of the view being defined
* @sid: The surface id of the view being defined
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 7b11f0285786..9e236f9f8a8a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -742,7 +742,7 @@ out_unref:
}
/**
- * vmw_stdu_surface_clip - Callback to encode a surface copy command cliprect
+ * vmw_kms_stdu_surface_clip - Callback to encode a surface copy command cliprect
*
* @dirty: The closure structure.
*
@@ -780,7 +780,7 @@ static void vmw_kms_stdu_surface_clip(struct vmw_kms_dirty *dirty)
}
/**
- * vmw_stdu_surface_fifo_commit - Callback to fill in and submit a surface
+ * vmw_kms_stdu_surface_fifo_commit - Callback to fill in and submit a surface
* copy command.
*
* @dirty: The closure structure.
@@ -1571,7 +1571,7 @@ static int vmw_stdu_plane_update_surface(struct vmw_private *dev_priv,
/**
* vmw_stdu_primary_plane_atomic_update - formally switches STDU to new plane
* @plane: display plane
- * @old_state: Only used to get crtc info
+ * @state: Only used to get crtc info
*
* Formally update stdu->display_srf to the new plane, and bind the new
* plane STDU. This function is called during the commit phase when
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index c3e55c1376eb..8ead06574850 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -680,7 +680,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
}
/**
- * vmw_user_surface_free - User visible surface TTM base object destructor
+ * vmw_user_surface_base_release - User visible surface TTM base object destructor
*
* @p_base: Pointer to a pointer to a TTM base object
* embedded in a struct vmw_user_surface.
@@ -702,7 +702,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
}
/**
- * vmw_user_surface_destroy_ioctl - Ioctl function implementing
+ * vmw_surface_destroy_ioctl - Ioctl function implementing
* the user surface destroy functionality.
*
* @dev: Pointer to a struct drm_device.
@@ -719,7 +719,7 @@ int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
}
/**
- * vmw_user_surface_define_ioctl - Ioctl function implementing
+ * vmw_surface_define_ioctl - Ioctl function implementing
* the user surface define functionality.
*
* @dev: Pointer to a struct drm_device.
@@ -779,10 +779,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
size, &ctx);
if (unlikely(ret != 0)) {
@@ -913,7 +909,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
rep->sid = user_srf->prime.base.handle;
vmw_resource_unreference(&res);
- ttm_read_unlock(&dev_priv->reservation_sem);
return 0;
out_no_copy:
kfree(srf->offsets);
@@ -924,7 +919,6 @@ out_no_sizes:
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
out_unlock:
- ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -1007,7 +1001,7 @@ out_no_lookup:
}
/**
- * vmw_user_surface_define_ioctl - Ioctl function implementing
+ * vmw_surface_reference_ioctl - Ioctl function implementing
* the user surface reference functionality.
*
* @dev: Pointer to a struct drm_device.
@@ -1061,7 +1055,7 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
}
/**
- * vmw_surface_define_encode - Encode a surface_define command.
+ * vmw_gb_surface_create - Encode a surface_define command.
*
* @res: Pointer to a struct vmw_resource embedded in a struct
* vmw_surface.
@@ -1542,10 +1536,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
if (drm_is_primary_client(file_priv))
user_srf->master = drm_master_get(file_priv->master);
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
res = &user_srf->srf.res;
if (req->base.buffer_handle != SVGA3D_INVALID_ID) {
@@ -1627,7 +1617,6 @@ vmw_gb_surface_define_internal(struct drm_device *dev,
vmw_resource_unreference(&res);
out_unlock:
- ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -1804,6 +1793,19 @@ static void vmw_surface_tex_dirty_range_add(struct vmw_resource *res,
svga3dsurface_get_loc(cache, &loc2, end - 1);
svga3dsurface_inc_loc(cache, &loc2);
+ if (loc1.sheet != loc2.sheet) {
+ u32 sub_res;
+
+ /*
+ * Multiple multisample sheets. To do this in an optimized
+ * fashion, compute the dirty region for each sheet and the
+ * resulting union. Since this is not a common case, just dirty
+ * the whole surface.
+ */
+ for (sub_res = 0; sub_res < dirty->num_subres; ++sub_res)
+ vmw_subres_dirty_full(dirty, sub_res);
+ return;
+ }
if (loc1.sub_resource + 1 == loc2.sub_resource) {
/* Dirty range covers a single sub-resource */
vmw_subres_dirty_add(dirty, &loc1, &loc2);
@@ -2112,10 +2114,6 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
if (req->sizes != NULL)
return -EINVAL;
- ret = ttm_read_lock(&dev_priv->reservation_sem, true);
- if (unlikely(ret != 0))
- return ret;
-
ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
user_accounting_size, &ctx);
if (ret != 0) {
@@ -2179,13 +2177,11 @@ int vmw_gb_surface_define(struct vmw_private *dev_priv,
*/
ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
- ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
out_no_user_srf:
ttm_mem_global_free(vmw_mem_glob(dev_priv), user_accounting_size);
out_unlock:
- ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
index eb63cbe64909..5ccc35b3194c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c
@@ -28,15 +28,16 @@ static struct vmw_thp_manager *to_thp_manager(struct ttm_resource_manager *man)
static const struct ttm_resource_manager_func vmw_thp_func;
-static int vmw_thp_insert_aligned(struct drm_mm *mm, struct drm_mm_node *node,
+static int vmw_thp_insert_aligned(struct ttm_buffer_object *bo,
+ struct drm_mm *mm, struct drm_mm_node *node,
unsigned long align_pages,
const struct ttm_place *place,
struct ttm_resource *mem,
unsigned long lpfn,
enum drm_mm_insert_mode mode)
{
- if (align_pages >= mem->page_alignment &&
- (!mem->page_alignment || align_pages % mem->page_alignment == 0)) {
+ if (align_pages >= bo->page_alignment &&
+ (!bo->page_alignment || align_pages % bo->page_alignment == 0)) {
return drm_mm_insert_node_in_range(mm, node,
mem->num_pages,
align_pages, 0,
@@ -75,7 +76,7 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
if (IS_ENABLED(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)) {
align_pages = (HPAGE_PUD_SIZE >> PAGE_SHIFT);
if (mem->num_pages >= align_pages) {
- ret = vmw_thp_insert_aligned(mm, node, align_pages,
+ ret = vmw_thp_insert_aligned(bo, mm, node, align_pages,
place, mem, lpfn, mode);
if (!ret)
goto found_unlock;
@@ -84,14 +85,14 @@ static int vmw_thp_get_node(struct ttm_resource_manager *man,
align_pages = (HPAGE_PMD_SIZE >> PAGE_SHIFT);
if (mem->num_pages >= align_pages) {
- ret = vmw_thp_insert_aligned(mm, node, align_pages, place, mem,
- lpfn, mode);
+ ret = vmw_thp_insert_aligned(bo, mm, node, align_pages, place,
+ mem, lpfn, mode);
if (!ret)
goto found_unlock;
}
ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages,
- mem->page_alignment, 0,
+ bo->page_alignment, 0,
place->fpfn, lpfn, mode);
found_unlock:
spin_unlock(&rman->lock);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index 2dc031fe4a90..7bfe83c936ff 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -200,7 +200,8 @@ struct vmw_ttm_tt {
const size_t vmw_tt_size = sizeof(struct vmw_ttm_tt);
/**
- * Helper functions to advance a struct vmw_piter iterator.
+ * __vmw_piter_non_sg_next: Helper functions to advance
+ * a struct vmw_piter iterator.
*
* @viter: Pointer to the iterator.
*
@@ -222,7 +223,8 @@ static bool __vmw_piter_sg_next(struct vmw_piter *viter)
/**
- * Helper functions to return a pointer to the current page.
+ * __vmw_piter_non_sg_page: Helper functions to return a pointer
+ * to the current page.
*
* @viter: Pointer to the iterator
*
@@ -236,7 +238,8 @@ static struct page *__vmw_piter_non_sg_page(struct vmw_piter *viter)
}
/**
- * Helper functions to return the DMA address of the current page.
+ * __vmw_piter_phys_addr: Helper functions to return the DMA
+ * address of the current page.
*
* @viter: Pointer to the iterator
*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
index e7570f422400..8338b1d20f2a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_validation.c
@@ -82,7 +82,7 @@ struct vmw_validation_res_node {
u32 reserved : 1;
u32 dirty : 1;
u32 dirty_set : 1;
- unsigned long private[0];
+ unsigned long private[];
};
/**
@@ -809,7 +809,7 @@ void vmw_validation_revert(struct vmw_validation_context *ctx)
}
/**
- * vmw_validation_cone - Commit validation actions after command submission
+ * vmw_validation_done - Commit validation actions after command submission
* success.
* @ctx: The validation context.
* @fence: Fence with which to fence all buffer objects taking part in the
diff --git a/drivers/gpu/drm/xlnx/zynqmp_dp.c b/drivers/gpu/drm/xlnx/zynqmp_dp.c
index 59d1fb017da0..82430ca9b913 100644
--- a/drivers/gpu/drm/xlnx/zynqmp_dp.c
+++ b/drivers/gpu/drm/xlnx/zynqmp_dp.c
@@ -713,7 +713,7 @@ static int zynqmp_dp_link_train_cr(struct zynqmp_dp *dp)
if (ret)
return ret;
- drm_dp_link_train_clock_recovery_delay(dp->dpcd);
+ drm_dp_link_train_clock_recovery_delay(&dp->aux, dp->dpcd);
ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
if (ret < 0)
return ret;
@@ -778,7 +778,7 @@ static int zynqmp_dp_link_train_ce(struct zynqmp_dp *dp)
if (ret)
return ret;
- drm_dp_link_train_channel_eq_delay(dp->dpcd);
+ drm_dp_link_train_channel_eq_delay(&dp->aux, dp->dpcd);
ret = drm_dp_dpcd_read_link_status(&dp->aux, link_status);
if (ret < 0)
return ret;
@@ -1069,6 +1069,7 @@ static int zynqmp_dp_aux_init(struct zynqmp_dp *dp)
dp->aux.name = "ZynqMP DP AUX";
dp->aux.dev = dp->dev;
+ dp->aux.drm_dev = dp->drm;
dp->aux.transfer = zynqmp_dp_aux_transfer;
return drm_dp_aux_register(&dp->aux);
diff --git a/drivers/gpu/drm/zte/Kconfig b/drivers/gpu/drm/zte/Kconfig
index 90ebaedc11fd..aa8594190b50 100644
--- a/drivers/gpu/drm/zte/Kconfig
+++ b/drivers/gpu/drm/zte/Kconfig
@@ -3,7 +3,6 @@ config DRM_ZTE
tristate "DRM Support for ZTE SoCs"
depends on DRM && ARCH_ZX
select DRM_KMS_CMA_HELPER
- select DRM_KMS_FB_HELPER
select DRM_KMS_HELPER
select SND_SOC_HDMI_CODEC if SND_SOC
select VIDEOMODE_HELPERS
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 4f02db65dede..5e1ad0c92bfc 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -2192,7 +2192,7 @@ config FB_HYPERV
config FB_SIMPLE
bool "Simple framebuffer support"
- depends on (FB = y)
+ depends on (FB = y) && !DRM_SIMPLEDRM
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index 7f8debd2da06..ad598257ab38 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -992,7 +992,7 @@ static int imxfb_probe(struct platform_device *pdev)
info->screen_buffer = dma_alloc_wc(&pdev->dev, fbi->map_size,
&fbi->map_dma, GFP_KERNEL);
if (!info->screen_buffer) {
- dev_err(&pdev->dev, "Failed to allocate video RAM: %d\n", ret);
+ dev_err(&pdev->dev, "Failed to allocate video RAM\n");
ret = -ENOMEM;
goto failed_map;
}
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index ea34ca146b82..6a5716655619 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -153,6 +153,8 @@ struct dw_hdmi_plat_data {
const struct dw_hdmi_phy_config *phy_config;
int (*configure_phy)(struct dw_hdmi *hdmi, void *data,
unsigned long mpixelclock);
+
+ unsigned int disable_cec : 1;
};
struct dw_hdmi *dw_hdmi_probe(struct platform_device *pdev,
diff --git a/include/drm/drm_agpsupport.h b/include/drm/drm_agpsupport.h
deleted file mode 100644
index f3136750c490..000000000000
--- a/include/drm/drm_agpsupport.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _DRM_AGPSUPPORT_H_
-#define _DRM_AGPSUPPORT_H_
-
-#include <linux/agp_backend.h>
-#include <linux/kernel.h>
-#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/mutex.h>
-#include <linux/types.h>
-#include <uapi/drm/drm.h>
-
-struct drm_device;
-struct drm_file;
-
-struct drm_agp_head {
- struct agp_kern_info agp_info;
- struct list_head memory;
- unsigned long mode;
- struct agp_bridge_data *bridge;
- int enabled;
- int acquired;
- unsigned long base;
- int agp_mtrr;
- int cant_use_aperture;
- unsigned long page_mask;
-};
-
-#if IS_ENABLED(CONFIG_AGP)
-
-struct drm_agp_head *drm_agp_init(struct drm_device *dev);
-void drm_legacy_agp_clear(struct drm_device *dev);
-int drm_agp_acquire(struct drm_device *dev);
-int drm_agp_acquire_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_release(struct drm_device *dev);
-int drm_agp_release_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
-int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_info(struct drm_device *dev, struct drm_agp_info *info);
-int drm_agp_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
-int drm_agp_alloc_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
-int drm_agp_free_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
-int drm_agp_unbind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
-int drm_agp_bind_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-#else /* CONFIG_AGP */
-
-static inline struct drm_agp_head *drm_agp_init(struct drm_device *dev)
-{
- return NULL;
-}
-
-static inline void drm_legacy_agp_clear(struct drm_device *dev)
-{
-}
-
-static inline int drm_agp_acquire(struct drm_device *dev)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_release(struct drm_device *dev)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_enable(struct drm_device *dev,
- struct drm_agp_mode mode)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_info(struct drm_device *dev,
- struct drm_agp_info *info)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_alloc(struct drm_device *dev,
- struct drm_agp_buffer *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_free(struct drm_device *dev,
- struct drm_agp_buffer *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_unbind(struct drm_device *dev,
- struct drm_agp_binding *request)
-{
- return -ENODEV;
-}
-
-static inline int drm_agp_bind(struct drm_device *dev,
- struct drm_agp_binding *request)
-{
- return -ENODEV;
-}
-
-#endif /* CONFIG_AGP */
-
-#endif /* _DRM_AGPSUPPORT_H_ */
diff --git a/include/drm/drm_aperture.h b/include/drm/drm_aperture.h
new file mode 100644
index 000000000000..6c148078780c
--- /dev/null
+++ b/include/drm/drm_aperture.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: MIT */
+
+#ifndef _DRM_APERTURE_H_
+#define _DRM_APERTURE_H_
+
+#include <linux/types.h>
+
+struct drm_device;
+struct pci_dev;
+
+int devm_aperture_acquire_from_firmware(struct drm_device *dev, resource_size_t base,
+ resource_size_t size);
+
+int drm_aperture_remove_conflicting_framebuffers(resource_size_t base, resource_size_t size,
+ bool primary, const char *name);
+
+int drm_aperture_remove_conflicting_pci_framebuffers(struct pci_dev *pdev, const char *name);
+
+/**
+ * drm_aperture_remove_framebuffers - remove all existing framebuffers
+ * @primary: also kick vga16fb if present
+ * @name: requesting driver name
+ *
+ * This function removes all graphics device drivers. Use this function on systems
+ * that can have their framebuffer located anywhere in memory.
+ *
+ * Returns:
+ * 0 on success, or a negative errno code otherwise
+ */
+static inline int drm_aperture_remove_framebuffers(bool primary, const char *name)
+{
+ return drm_aperture_remove_conflicting_framebuffers(0, (resource_size_t)-1, primary, name);
+}
+
+#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 1922b278ffad..714d1a01c065 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -1671,6 +1671,10 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
int drm_connector_attach_vrr_capable_property(
struct drm_connector *connector);
+int drm_connector_attach_colorspace_property(struct drm_connector *connector);
+int drm_connector_attach_hdr_output_metadata_property(struct drm_connector *connector);
+bool drm_connector_atomic_hdr_metadata_equal(struct drm_connector_state *old_state,
+ struct drm_connector_state *new_state);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector);
int drm_mode_create_dp_colorspace_property(struct drm_connector *connector);
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index d647223e8390..f588f967bb14 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -276,12 +276,6 @@ struct drm_device {
*/
spinlock_t event_lock;
- /** @agp: AGP data */
- struct drm_agp_head *agp;
-
- /** @pdev: PCI device structure */
- struct pci_dev *pdev;
-
/** @num_crtcs: Number of CRTCs on this device */
unsigned int num_crtcs;
@@ -329,6 +323,9 @@ struct drm_device {
struct pci_controller *hose;
#endif
+ /* AGP data */
+ struct drm_agp_head *agp;
+
/* Context handle management - linked list of context handles */
struct list_head ctxlist;
diff --git a/include/drm/drm_dp_dual_mode_helper.h b/include/drm/drm_dp_dual_mode_helper.h
index 4c42db81fcb4..7ee482265087 100644
--- a/include/drm/drm_dp_dual_mode_helper.h
+++ b/include/drm/drm_dp_dual_mode_helper.h
@@ -62,6 +62,7 @@
#define DP_DUAL_MODE_LSPCON_CURRENT_MODE 0x41
#define DP_DUAL_MODE_LSPCON_MODE_PCON 0x1
+struct drm_device;
struct i2c_adapter;
ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter,
@@ -103,17 +104,18 @@ enum drm_dp_dual_mode_type {
DRM_DP_DUAL_MODE_LSPCON,
};
-enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(struct i2c_adapter *adapter);
-int drm_dp_dual_mode_max_tmds_clock(enum drm_dp_dual_mode_type type,
+enum drm_dp_dual_mode_type
+drm_dp_dual_mode_detect(const struct drm_device *dev, struct i2c_adapter *adapter);
+int drm_dp_dual_mode_max_tmds_clock(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter);
-int drm_dp_dual_mode_get_tmds_output(enum drm_dp_dual_mode_type type,
+int drm_dp_dual_mode_get_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool *enabled);
-int drm_dp_dual_mode_set_tmds_output(enum drm_dp_dual_mode_type type,
+int drm_dp_dual_mode_set_tmds_output(const struct drm_device *dev, enum drm_dp_dual_mode_type type,
struct i2c_adapter *adapter, bool enable);
const char *drm_dp_get_dual_mode_type_name(enum drm_dp_dual_mode_type type);
-int drm_lspcon_get_mode(struct i2c_adapter *adapter,
+int drm_lspcon_get_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
enum drm_lspcon_mode *current_mode);
-int drm_lspcon_set_mode(struct i2c_adapter *adapter,
+int drm_lspcon_set_mode(const struct drm_device *dev, struct i2c_adapter *adapter,
enum drm_lspcon_mode reqd_mode);
#endif
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 1e85c2021f2f..e932b2c40095 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -29,6 +29,7 @@
#include <drm/drm_connector.h>
struct drm_device;
+struct drm_dp_aux;
/*
* Unless otherwise noted, all values are from the DP 1.1a spec. Note that
@@ -1482,10 +1483,13 @@ u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZ
#define DP_LTTPR_COMMON_CAP_SIZE 8
#define DP_LTTPR_PHY_CAP_SIZE 3
-void drm_dp_link_train_clock_recovery_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_link_train_clock_recovery_delay(const struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
void drm_dp_lttpr_link_train_clock_recovery_delay(void);
-void drm_dp_link_train_channel_eq_delay(const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
-void drm_dp_lttpr_link_train_channel_eq_delay(const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
+void drm_dp_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
+ const u8 dpcd[DP_RECEIVER_CAP_SIZE]);
+void drm_dp_lttpr_link_train_channel_eq_delay(const struct drm_dp_aux *aux,
+ const u8 caps[DP_LTTPR_PHY_CAP_SIZE]);
u8 drm_dp_link_rate_to_bw_code(int link_rate);
int drm_dp_bw_code_to_link_rate(u8 link_bw);
@@ -1840,6 +1844,8 @@ struct drm_dp_aux_cec {
* @name: user-visible name of this AUX channel and the I2C-over-AUX adapter
* @ddc: I2C adapter that can be used for I2C-over-AUX communication
* @dev: pointer to struct device that is the parent for this AUX channel
+ * @drm_dev: pointer to the &drm_device that owns this AUX channel. Beware, this
+ * may be %NULL before drm_dp_aux_register() has been called.
* @crtc: backpointer to the crtc that is currently using this AUX channel
* @hw_mutex: internal mutex used for locking transfers
* @crc_work: worker that captures CRCs for each frame
@@ -1847,7 +1853,11 @@ struct drm_dp_aux_cec {
* @transfer: transfers a message representing a single AUX transaction
*
* The @dev field should be set to a pointer to the device that implements the
- * AUX channel.
+ * AUX channel. As well, the @drm_dev field should be set to the &drm_device
+ * that will be using this AUX channel as early as possible. For many graphics
+ * drivers this should happen before drm_dp_aux_init(), however it's perfectly
+ * fine to set this field later so long as it's assigned before calling
+ * drm_dp_aux_register().
*
* The @name field may be used to specify the name of the I2C adapter. If set to
* %NULL, dev_name() of @dev will be used.
@@ -1879,6 +1889,7 @@ struct drm_dp_aux {
const char *name;
struct i2c_adapter ddc;
struct device *dev;
+ struct drm_device *drm_dev;
struct drm_crtc *crtc;
struct mutex hw_mutex;
struct work_struct crc_work;
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index bd1c39907b92..c87a829b6498 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -594,6 +594,14 @@ struct drm_dp_mst_topology_mgr {
*/
int max_payloads;
/**
+ * @max_lane_count: maximum number of lanes the GPU can drive.
+ */
+ u8 max_lane_count;
+ /**
+ * @max_link_rate: maximum link rate per lane GPU can output.
+ */
+ u8 max_link_rate;
+ /**
* @conn_base_id: DRM connector ID this mgr is connected to. Only used
* to build the MST connector path value.
*/
@@ -765,7 +773,9 @@ struct drm_dp_mst_topology_mgr {
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
struct drm_device *dev, struct drm_dp_aux *aux,
int max_dpcd_transaction_bytes,
- int max_payloads, int conn_base_id);
+ int max_payloads,
+ u8 max_lane_count, u8 max_link_rate,
+ int conn_base_id);
void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr);
@@ -783,7 +793,8 @@ drm_dp_mst_detect_port(struct drm_connector *connector,
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
-int drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count);
+int drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
+ int link_rate, int link_lane_count);
int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 3b273f9ca39a..3af4624368d8 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -36,7 +36,6 @@ struct drm_fb_helper;
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <linux/kgdb.h>
-#include <linux/vgaarb.h>
enum mode_set_atomic {
LEAVE_ATOMIC_MODE_SET,
@@ -451,54 +450,4 @@ drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
#endif
-/**
- * drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers
- * @a: memory range, users of which are to be removed
- * @name: requesting driver name
- * @primary: also kick vga16fb if present
- *
- * This function removes framebuffer devices (initialized by firmware/bootloader)
- * which use memory range described by @a. If @a is NULL all such devices are
- * removed.
- */
-static inline int
-drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a,
- const char *name, bool primary)
-{
-#if IS_REACHABLE(CONFIG_FB)
- return remove_conflicting_framebuffers(a, name, primary);
-#else
- return 0;
-#endif
-}
-
-/**
- * drm_fb_helper_remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
- * @pdev: PCI device
- * @name: requesting driver name
- *
- * This function removes framebuffer devices (eg. initialized by firmware)
- * using memory range configured for any of @pdev's memory bars.
- *
- * The function assumes that PCI device with shadowed ROM drives a primary
- * display and so kicks out vga16fb.
- */
-static inline int
-drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
- const char *name)
-{
- int ret = 0;
-
- /*
- * WARNING: Apparently we must kick fbdev drivers before vgacon,
- * otherwise the vga fbdev driver falls over.
- */
-#if IS_REACHABLE(CONFIG_FB)
- ret = remove_conflicting_pci_framebuffers(pdev, name);
-#endif
- if (ret == 0)
- ret = vga_remove_vgacon(pdev);
- return ret;
-}
-
#endif
diff --git a/include/drm/drm_format_helper.h b/include/drm/drm_format_helper.h
index 5f9e37032468..4e0258a61311 100644
--- a/include/drm/drm_format_helper.h
+++ b/include/drm/drm_format_helper.h
@@ -11,7 +11,7 @@ struct drm_rect;
void drm_fb_memcpy(void *dst, void *vaddr, struct drm_framebuffer *fb,
struct drm_rect *clip);
-void drm_fb_memcpy_dstclip(void __iomem *dst, void *vaddr,
+void drm_fb_memcpy_dstclip(void __iomem *dst, unsigned int dst_pitch, void *vaddr,
struct drm_framebuffer *fb,
struct drm_rect *clip);
void drm_fb_swab(void *dst, void *src, struct drm_framebuffer *fb,
@@ -28,4 +28,12 @@ void drm_fb_xrgb8888_to_rgb888_dstclip(void __iomem *dst, unsigned int dst_pitch
void drm_fb_xrgb8888_to_gray8(u8 *dst, void *vaddr, struct drm_framebuffer *fb,
struct drm_rect *clip);
+int drm_fb_blit_rect_dstclip(void __iomem *dst, unsigned int dst_pitch,
+ uint32_t dst_format, void *vmap,
+ struct drm_framebuffer *fb,
+ struct drm_rect *rect);
+int drm_fb_blit_dstclip(void __iomem *dst, unsigned int dst_pitch,
+ uint32_t dst_format, void *vmap,
+ struct drm_framebuffer *fb);
+
#endif /* __LINUX_DRM_FORMAT_HELPER_H */
diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h
index 7c6d874910b8..c1aa02bd4c89 100644
--- a/include/drm/drm_gem_ttm_helper.h
+++ b/include/drm/drm_gem_ttm_helper.h
@@ -5,8 +5,8 @@
#include <linux/kernel.h>
-#include <drm/drm_gem.h>
#include <drm/drm_device.h>
+#include <drm/drm_gem.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
@@ -24,4 +24,7 @@ void drm_gem_ttm_vunmap(struct drm_gem_object *gem,
int drm_gem_ttm_mmap(struct drm_gem_object *gem,
struct vm_area_struct *vma);
+int drm_gem_ttm_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
+
#endif
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index 288055d397d9..27ed7e9243b9 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -5,6 +5,7 @@
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
+#include <drm/drm_gem_ttm_helper.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_modes.h>
#include <drm/ttm/ttm_bo_api.h>
@@ -93,7 +94,6 @@ struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
size_t size,
unsigned long pg_align);
void drm_gem_vram_put(struct drm_gem_vram_object *gbo);
-u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo);
s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo);
int drm_gem_vram_pin(struct drm_gem_vram_object *gbo, unsigned long pl_flag);
int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo);
@@ -113,9 +113,6 @@ int drm_gem_vram_fill_create_dumb(struct drm_file *file,
int drm_gem_vram_driver_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset);
/*
* Helpers for struct drm_plane_helper_funcs
@@ -149,7 +146,7 @@ void drm_gem_vram_simple_display_pipe_cleanup_fb(
#define DRM_GEM_VRAM_DRIVER \
.debugfs_init = drm_vram_mm_debugfs_init, \
.dumb_create = drm_gem_vram_driver_dumb_create, \
- .dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset, \
+ .dumb_map_offset = drm_gem_ttm_dumb_map_offset, \
.gem_prime_mmap = drm_gem_prime_mmap
/*
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index 8ed04e9be997..b17e79e12bc2 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -33,6 +33,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/agp_backend.h>
+
#include <drm/drm.h>
#include <drm/drm_auth.h>
#include <drm/drm_hashtab.h>
@@ -194,10 +196,6 @@ void drm_legacy_idlelock_release(struct drm_lock_data *lock);
#ifdef CONFIG_PCI
-struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
- size_t align);
-void drm_pci_free(struct drm_device *dev, struct drm_dma_handle *dmah);
-
int drm_legacy_pci_init(const struct drm_driver *driver,
struct pci_driver *pdriver);
void drm_legacy_pci_exit(const struct drm_driver *driver,
@@ -229,6 +227,86 @@ static inline void drm_legacy_pci_exit(const struct drm_driver *driver,
#endif
+/*
+ * AGP Support
+ */
+
+struct drm_agp_head {
+ struct agp_kern_info agp_info;
+ struct list_head memory;
+ unsigned long mode;
+ struct agp_bridge_data *bridge;
+ int enabled;
+ int acquired;
+ unsigned long base;
+ int agp_mtrr;
+ int cant_use_aperture;
+ unsigned long page_mask;
+};
+
+#if IS_ENABLED(CONFIG_DRM_LEGACY) && IS_ENABLED(CONFIG_AGP)
+struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev);
+int drm_legacy_agp_acquire(struct drm_device *dev);
+int drm_legacy_agp_release(struct drm_device *dev);
+int drm_legacy_agp_enable(struct drm_device *dev, struct drm_agp_mode mode);
+int drm_legacy_agp_info(struct drm_device *dev, struct drm_agp_info *info);
+int drm_legacy_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request);
+int drm_legacy_agp_free(struct drm_device *dev, struct drm_agp_buffer *request);
+int drm_legacy_agp_unbind(struct drm_device *dev, struct drm_agp_binding *request);
+int drm_legacy_agp_bind(struct drm_device *dev, struct drm_agp_binding *request);
+#else
+static inline struct drm_agp_head *drm_legacy_agp_init(struct drm_device *dev)
+{
+ return NULL;
+}
+
+static inline int drm_legacy_agp_acquire(struct drm_device *dev)
+{
+ return -ENODEV;
+}
+
+static inline int drm_legacy_agp_release(struct drm_device *dev)
+{
+ return -ENODEV;
+}
+
+static inline int drm_legacy_agp_enable(struct drm_device *dev,
+ struct drm_agp_mode mode)
+{
+ return -ENODEV;
+}
+
+static inline int drm_legacy_agp_info(struct drm_device *dev,
+ struct drm_agp_info *info)
+{
+ return -ENODEV;
+}
+
+static inline int drm_legacy_agp_alloc(struct drm_device *dev,
+ struct drm_agp_buffer *request)
+{
+ return -ENODEV;
+}
+
+static inline int drm_legacy_agp_free(struct drm_device *dev,
+ struct drm_agp_buffer *request)
+{
+ return -ENODEV;
+}
+
+static inline int drm_legacy_agp_unbind(struct drm_device *dev,
+ struct drm_agp_binding *request)
+{
+ return -ENODEV;
+}
+
+static inline int drm_legacy_agp_bind(struct drm_device *dev,
+ struct drm_agp_binding *request)
+{
+ return -ENODEV;
+}
+#endif
+
/* drm_memory.c */
void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
void drm_legacy_ioremap_wc(struct drm_local_map *map, struct drm_device *dev);
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index ab424ddd7665..1ddf7783fdf7 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -909,6 +909,8 @@ struct drm_mode_config {
* @allow_fb_modifiers:
*
* Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call.
+ * Note that drivers should not set this directly, it is automatically
+ * set in drm_universal_plane_init().
*
* IMPORTANT:
*
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index a3c58c941bdc..9b66be54dd16 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -443,25 +443,25 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
#define drm_dbg_core(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_CORE, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_CORE, fmt, ##__VA_ARGS__)
#define drm_dbg(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
#define drm_dbg_kms(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__)
#define drm_dbg_prime(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
#define drm_dbg_atomic(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
#define drm_dbg_vbl(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_VBL, fmt, ##__VA_ARGS__)
#define drm_dbg_state(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_STATE, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_STATE, fmt, ##__VA_ARGS__)
#define drm_dbg_lease(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
#define drm_dbg_dp(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_DP, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DP, fmt, ##__VA_ARGS__)
#define drm_dbg_drmres(drm, fmt, ...) \
- drm_dev_dbg((drm)->dev, DRM_UT_DRMRES, fmt, ##__VA_ARGS__)
+ drm_dev_dbg((drm) ? (drm)->dev : NULL, DRM_UT_DRMRES, fmt, ##__VA_ARGS__)
/*
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 2155e2e38aec..639521880c29 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -86,6 +86,7 @@ struct ttm_tt;
* @base: drm_gem_object superclass data.
* @bdev: Pointer to the buffer object device structure.
* @type: The bo type.
+ * @page_alignment: Page alignment.
* @destroy: Destruction function. If NULL, kfree is used.
* @num_pages: Actual number of pages.
* @kref: Reference count of this buffer object. When this refcount reaches
@@ -123,6 +124,7 @@ struct ttm_buffer_object {
struct ttm_device *bdev;
enum ttm_bo_type type;
+ uint32_t page_alignment;
void (*destroy) (struct ttm_buffer_object *);
/**
@@ -563,25 +565,6 @@ int ttm_bo_swapout(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx,
gfp_t gfp_flags);
/**
- * ttm_bo_uses_embedded_gem_object - check if the given bo uses the
- * embedded drm_gem_object.
- *
- * Most ttm drivers are using gem too, so the embedded
- * ttm_buffer_object.base will be initialized by the driver (before
- * calling ttm_bo_init). It is also possible to use ttm without gem
- * though (vmwgfx does that).
- *
- * This helper will figure whenever a given ttm bo is a gem object too
- * or not.
- *
- * @bo: The bo to check.
- */
-static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo)
-{
- return bo->base.dev != NULL;
-}
-
-/**
* ttm_bo_pin - Pin the buffer object.
* @bo: The buffer object to pin
*
diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h
index 6164ccf4f308..890b9d369519 100644
--- a/include/drm/ttm/ttm_resource.h
+++ b/include/drm/ttm/ttm_resource.h
@@ -161,7 +161,6 @@ struct ttm_bus_placement {
* @mm_node: Memory manager node.
* @size: Requested size of memory region.
* @num_pages: Actual size of memory region in pages.
- * @page_alignment: Page alignment.
* @placement: Placement flags.
* @bus: Placement on io bus accessible to the CPU
*
@@ -172,7 +171,6 @@ struct ttm_resource {
void *mm_node;
unsigned long start;
unsigned long num_pages;
- uint32_t page_alignment;
uint32_t mem_type;
uint32_t placement;
struct ttm_bus_placement bus;
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index a5e76aa06ad5..9b6722d45f36 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -413,9 +413,10 @@ enum drm_mode_subconnector {
*
* **Force-probing a connector**
*
- * If the @count_modes field is set to zero, the kernel will perform a forced
- * probe on the connector to refresh the connector status, modes and EDID.
- * A forced-probe can be slow, might cause flickering and the ioctl will block.
+ * If the @count_modes field is set to zero and the DRM client is the current
+ * DRM master, the kernel will perform a forced probe on the connector to
+ * refresh the connector status, modes and EDID. A forced-probe can be slow,
+ * might cause flickering and the ioctl will block.
*
* User-space needs to force-probe connectors to ensure their metadata is
* up-to-date at startup and after receiving a hot-plug event. User-space