summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Makefile1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c166
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c116
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c106
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c133
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v11_0.c129
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c13
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c54
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c18
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c135
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h24
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.c102
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_spl_translate.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c68
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c588
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl.c1363
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c15
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h15
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c351
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h35
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c1726
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h38
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c91
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h55
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters_old.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h72
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c151
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h29
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_debug.h25
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c497
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h525
-rw-r--r--drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h30
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h40
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h75
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c4
-rw-r--r--drivers/gpu/drm/arm/display/komeda/komeda_kms.c10
-rw-r--r--drivers/gpu/drm/bridge/Kconfig1
-rw-r--r--drivers/gpu/drm/display/Kconfig24
-rw-r--r--drivers/gpu/drm/display/Makefile2
-rw-r--r--drivers/gpu/drm/display/drm_bridge_connector.c (renamed from drivers/gpu/drm/drm_bridge_connector.c)13
-rw-r--r--drivers/gpu/drm/drm_fbdev_dma.c83
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c2
-rw-r--r--drivers/gpu/drm/i915/display/g4x_dp.c152
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c34
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c14
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_driver.c8
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_irq.c13
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power_well.c15
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_reset.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.c130
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c105
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.c77
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_tunnel.h11
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll.c9
-rw-r--r--drivers/gpu/drm/i915/display/intel_frontbuffer.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c10
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c67
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c500
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug_irq.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_pch_display.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c563
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.h10
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c848
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c68
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h37
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_regs.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c9
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c5
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h5
-rw-r--r--drivers/gpu/drm/i915/i915_driver.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_hwmon.c88
-rw-r--r--drivers/gpu/drm/i915/i915_module.c9
-rw-r--r--drivers/gpu/drm/i915/i915_params.c3
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c8
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c2
-rw-r--r--drivers/gpu/drm/imagination/pvr_vm.c4
-rw-r--r--drivers/gpu/drm/imx/dcss/Kconfig2
-rw-r--r--drivers/gpu/drm/imx/lcdc/Kconfig2
-rw-r--r--drivers/gpu/drm/ingenic/Kconfig2
-rw-r--r--drivers/gpu/drm/kmb/Kconfig2
-rw-r--r--drivers/gpu/drm/mediatek/Kconfig2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_crtc.c82
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c36
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c106
-rw-r--r--drivers/gpu/drm/mediatek/mtk_ethdr.c13
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.c29
-rw-r--r--drivers/gpu/drm/mediatek/mtk_plane.h3
-rw-r--r--drivers/gpu/drm/meson/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile1
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_catalog.c11
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c16
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c30
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c141
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c21
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c89
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c46
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c15
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h51
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c6
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c41
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h7
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c12
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c10
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c12
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy.c5
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c779
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c1
-rw-r--r--drivers/gpu/drm/msm/registers/adreno/a6xx.xml1118
-rw-r--r--drivers/gpu/drm/msm/registers/display/hdmi.xml89
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c2
-rw-r--r--drivers/gpu/drm/omapdrm/Kconfig2
-rw-r--r--drivers/gpu/drm/panel/panel-newvision-nv3052c.c2
-rw-r--r--drivers/gpu/drm/panthor/panthor_drv.c23
-rw-r--r--drivers/gpu/drm/panthor/panthor_fw.c8
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.c21
-rw-r--r--drivers/gpu/drm/panthor/panthor_mmu.h1
-rw-r--r--drivers/gpu/drm/panthor/panthor_sched.c2
-rw-r--r--drivers/gpu/drm/renesas/rcar-du/Kconfig2
-rw-r--r--drivers/gpu/drm/renesas/rz-du/Kconfig2
-rw-r--r--drivers/gpu/drm/renesas/shmobile/Kconfig2
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig4
-rw-r--r--drivers/gpu/drm/tegra/Kconfig1
-rw-r--r--drivers/gpu/drm/tidss/Kconfig2
-rw-r--r--drivers/gpu/drm/v3d/v3d_sched.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_blit.c114
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.c13
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_bo.h3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c6
-rw-r--r--drivers/gpu/drm/xe/Makefile1
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h13
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h8
-rw-r--r--drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h7
-rw-r--r--drivers/gpu/drm/xe/display/intel_fbdev_fb.c1
-rw-r--r--drivers/gpu/drm/xe/display/xe_display.c10
-rw-r--r--drivers/gpu/drm/xe/display/xe_dsb_buffer.c1
-rw-r--r--drivers/gpu/drm/xe/display/xe_fb_pin.c1
-rw-r--r--drivers/gpu/drm/xe/display/xe_hdcp_gsc.c1
-rw-r--r--drivers/gpu/drm/xe/regs/xe_gsc_regs.h4
-rw-r--r--drivers/gpu/drm/xe/tests/xe_dma_buf.c2
-rw-r--r--drivers/gpu/drm/xe/xe_assert.h2
-rw-r--r--drivers/gpu/drm/xe/xe_bo.c13
-rw-r--r--drivers/gpu/drm/xe/xe_device.c2
-rw-r--r--drivers/gpu/drm/xe/xe_device.h8
-rw-r--r--drivers/gpu/drm/xe/xe_device_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_drm_client.c2
-rw-r--r--drivers/gpu/drm/xe/xe_exec.c2
-rw-r--r--drivers/gpu/drm/xe/xe_exec_queue.c2
-rw-r--r--drivers/gpu/drm/xe/xe_execlist.c23
-rw-r--r--drivers/gpu/drm/xe/xe_execlist_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.c61
-rw-r--r--drivers/gpu/drm/xe/xe_gsc.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_debugfs.c71
-rw-r--r--drivers/gpu/drm/xe/xe_gsc_debugfs.h14
-rw-r--r--drivers/gpu/drm/xe/xe_gt.c9
-rw-r--r--drivers/gpu/drm/xe/xe_gt_pagefault.c8
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf.c6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c103
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h1
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c1260
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h3
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h107
-rw-r--r--drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_gt_types.h6
-rw-r--r--drivers/gpu/drm/xe/xe_guc_pc.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine.c17
-rw-r--r--drivers/gpu/drm/xe/xe_hw_engine_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c97
-rw-r--r--drivers/gpu/drm/xe/xe_migrate.c2
-rw-r--r--drivers/gpu/drm/xe/xe_oa.c2
-rw-r--r--drivers/gpu/drm/xe/xe_oa_types.h2
-rw-r--r--drivers/gpu/drm/xe/xe_observation.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pat.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pci.c2
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.c104
-rw-r--r--drivers/gpu/drm/xe/xe_pcode.h16
-rw-r--r--drivers/gpu/drm/xe/xe_pm.c11
-rw-r--r--drivers/gpu/drm/xe/xe_pm.h1
-rw-r--r--drivers/gpu/drm/xe/xe_pt.c117
-rw-r--r--drivers/gpu/drm/xe/xe_query.c2
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c2
-rw-r--r--drivers/gpu/drm/xe/xe_sched_job.c2
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c2
-rw-r--r--drivers/gpu/drm/xe/xe_tile.c3
-rw-r--r--drivers/gpu/drm/xe/xe_uc_debugfs.c2
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.c13
-rw-r--r--drivers/gpu/drm/xe/xe_uc_fw.h9
-rw-r--r--drivers/gpu/drm/xe/xe_vm.c2
-rw-r--r--drivers/gpu/drm/xe/xe_vram_freq.c6
-rw-r--r--drivers/gpu/drm/xe/xe_wait_user_fence.c2
-rw-r--r--drivers/gpu/drm/xlnx/Kconfig1
264 files changed, 12621 insertions, 3152 deletions
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index c62339b89d46..784229d4504d 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -129,7 +129,6 @@ obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
drm_kms_helper-y := \
drm_atomic_helper.o \
drm_atomic_state_helper.o \
- drm_bridge_connector.o \
drm_crtc_helper.o \
drm_damage_helper.o \
drm_encoder_slave.o \
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 7fe41a3c2541..e095572458cd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1164,6 +1164,7 @@ struct amdgpu_device {
bool debug_disable_soft_recovery;
bool debug_use_vram_fw_buf;
bool debug_enable_ras_aca;
+ bool debug_exp_resets;
bool enforce_isolation[MAX_XCP];
/* Added this mutex for cleaner shader isolation between GFX and compute processes */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index c63528a4e894..1254a43ec96b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -1151,6 +1151,10 @@ uint64_t kgd_gfx_v9_hqd_get_pq_addr(struct amdgpu_device *adev,
uint32_t low, high;
uint64_t queue_addr = 0;
+ if (!adev->debug_exp_resets &&
+ !adev->gfx.num_gfx_rings)
+ return 0;
+
kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst);
amdgpu_gfx_rlc_enter_safe_mode(adev, inst);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
index cf2b4dd4d865..5ac59b62020c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.c
@@ -28,8 +28,8 @@
#include "atom.h"
#ifndef CONFIG_DEV_COREDUMP
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context)
+void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
+ bool vram_lost, struct amdgpu_job *job)
{
}
#else
@@ -315,7 +315,9 @@ amdgpu_devcoredump_read(char *buffer, loff_t offset, size_t count,
}
}
- if (coredump->reset_vram_lost)
+ if (coredump->skip_vram_check)
+ drm_printf(&p, "VRAM lost check is skipped!\n");
+ else if (coredump->reset_vram_lost)
drm_printf(&p, "VRAM is lost due to GPU reset!\n");
return count - iter.remain;
@@ -326,12 +328,11 @@ static void amdgpu_devcoredump_free(void *data)
kfree(data);
}
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context)
+void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
+ bool vram_lost, struct amdgpu_job *job)
{
- struct amdgpu_coredump_info *coredump;
struct drm_device *dev = adev_to_drm(adev);
- struct amdgpu_job *job = reset_context->job;
+ struct amdgpu_coredump_info *coredump;
struct drm_sched_job *s_job;
coredump = kzalloc(sizeof(*coredump), GFP_NOWAIT);
@@ -341,11 +342,12 @@ void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
return;
}
+ coredump->skip_vram_check = skip_vram_check;
coredump->reset_vram_lost = vram_lost;
- if (reset_context->job && reset_context->job->vm) {
+ if (job && job->vm) {
+ struct amdgpu_vm *vm = job->vm;
struct amdgpu_task_info *ti;
- struct amdgpu_vm *vm = reset_context->job->vm;
ti = amdgpu_vm_get_task_info_vm(vm);
if (ti) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
index 52459512cb2b..ef9772c6bcc9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dev_coredump.h
@@ -26,7 +26,6 @@
#define __AMDGPU_DEV_COREDUMP_H__
#include "amdgpu.h"
-#include "amdgpu_reset.h"
#ifdef CONFIG_DEV_COREDUMP
@@ -36,12 +35,12 @@ struct amdgpu_coredump_info {
struct amdgpu_device *adev;
struct amdgpu_task_info reset_task_info;
struct timespec64 reset_time;
+ bool skip_vram_check;
bool reset_vram_lost;
struct amdgpu_ring *ring;
};
#endif
-void amdgpu_coredump(struct amdgpu_device *adev, bool vram_lost,
- struct amdgpu_reset_context *reset_context);
-
+void amdgpu_coredump(struct amdgpu_device *adev, bool skip_vram_check,
+ bool vram_lost, struct amdgpu_job *job);
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 0aad05b4f32e..e479ff33a0a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4531,6 +4531,9 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
{
dev_info(adev->dev, "amdgpu: finishing device.\n");
flush_delayed_work(&adev->delayed_init_work);
+
+ if (adev->mman.initialized)
+ drain_workqueue(adev->mman.bdev.wq);
adev->shutdown = true;
/* make sure IB test finished before entering exclusive mode
@@ -4551,9 +4554,6 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev)
}
amdgpu_fence_driver_hw_fini(adev);
- if (adev->mman.initialized)
- drain_workqueue(adev->mman.bdev.wq);
-
if (adev->pm.sysfs_initialized)
amdgpu_pm_sysfs_fini(adev);
if (adev->ucode_sysfs_en)
@@ -5489,7 +5489,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle,
vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
if (!test_bit(AMDGPU_SKIP_COREDUMP, &reset_context->flags))
- amdgpu_coredump(tmp_adev, vram_lost, reset_context);
+ amdgpu_coredump(tmp_adev, false, vram_lost, reset_context->job);
if (vram_lost) {
DRM_INFO("VRAM is lost due to GPU reset!\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 5dd39e6c6223..8dee7c62c801 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -131,6 +131,7 @@ enum AMDGPU_DEBUG_MASK {
AMDGPU_DEBUG_DISABLE_GPU_SOFT_RECOVERY = BIT(2),
AMDGPU_DEBUG_USE_VRAM_FW_BUF = BIT(3),
AMDGPU_DEBUG_ENABLE_RAS_ACA = BIT(4),
+ AMDGPU_DEBUG_ENABLE_EXP_RESETS = BIT(5),
};
unsigned int amdgpu_vram_limit = UINT_MAX;
@@ -2199,6 +2200,11 @@ static void amdgpu_init_debug_options(struct amdgpu_device *adev)
pr_info("debug: enable RAS ACA\n");
adev->debug_enable_ras_aca = true;
}
+
+ if (amdgpu_debug_mask & AMDGPU_DEBUG_ENABLE_EXP_RESETS) {
+ pr_info("debug: enable experimental reset features\n");
+ adev->debug_exp_resets = true;
+ }
}
static unsigned long amdgpu_fix_asic_type(struct pci_dev *pdev, unsigned long flags)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index aad2027e5c7c..0e617dff8765 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -348,6 +348,9 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data,
return -EINVAL;
}
+ /* always clear VRAM */
+ flags |= AMDGPU_GEM_CREATE_VRAM_CLEARED;
+
/* create a gem object to contain this object in */
if (args->in.domains & (AMDGPU_GEM_DOMAIN_GDS |
AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index b4efeef848de..b779d47a546a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -660,7 +660,7 @@ int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id)
uint64_t queue_mask = 0;
int r, i, j;
- if (adev->enable_mes)
+ if (adev->mes.enable_legacy_queue_map)
return amdgpu_gfx_mes_enable_kcq(adev, xcc_id);
if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
@@ -722,7 +722,7 @@ int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id)
amdgpu_device_flush_hdp(adev, NULL);
- if (adev->enable_mes) {
+ if (adev->mes.enable_legacy_queue_map) {
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
j = i + xcc_id * adev->gfx.num_gfx_rings;
r = amdgpu_mes_map_legacy_queue(adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 3bb9d1ca74b8..570820edcfda 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -30,6 +30,60 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
#include "amdgpu_reset.h"
+#include "amdgpu_dev_coredump.h"
+#include "amdgpu_xgmi.h"
+
+static void amdgpu_job_do_core_dump(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ int i;
+
+ dev_info(adev->dev, "Dumping IP State\n");
+ for (i = 0; i < adev->num_ip_blocks; i++)
+ if (adev->ip_blocks[i].version->funcs->dump_ip_state)
+ adev->ip_blocks[i].version->funcs
+ ->dump_ip_state((void *)adev);
+ dev_info(adev->dev, "Dumping IP State Completed\n");
+
+ amdgpu_coredump(adev, true, false, job);
+}
+
+static void amdgpu_job_core_dump(struct amdgpu_device *adev,
+ struct amdgpu_job *job)
+{
+ struct list_head device_list, *device_list_handle = NULL;
+ struct amdgpu_device *tmp_adev = NULL;
+ struct amdgpu_hive_info *hive = NULL;
+
+ if (!amdgpu_sriov_vf(adev))
+ hive = amdgpu_get_xgmi_hive(adev);
+ if (hive)
+ mutex_lock(&hive->hive_lock);
+ /*
+ * Reuse the logic in amdgpu_device_gpu_recover() to build list of
+ * devices for code dump
+ */
+ INIT_LIST_HEAD(&device_list);
+ if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1) && hive) {
+ list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head)
+ list_add_tail(&tmp_adev->reset_list, &device_list);
+ if (!list_is_first(&adev->reset_list, &device_list))
+ list_rotate_to_front(&adev->reset_list, &device_list);
+ device_list_handle = &device_list;
+ } else {
+ list_add_tail(&adev->reset_list, &device_list);
+ device_list_handle = &device_list;
+ }
+
+ /* Do the coredump for each device */
+ list_for_each_entry(tmp_adev, device_list_handle, reset_list)
+ amdgpu_job_do_core_dump(tmp_adev, job);
+
+ if (hive) {
+ mutex_unlock(&hive->hive_lock);
+ amdgpu_put_xgmi_hive(hive);
+ }
+}
static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
{
@@ -48,9 +102,14 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
return DRM_GPU_SCHED_STAT_ENODEV;
}
-
adev->job_hang = true;
+ /*
+ * Do the coredump immediately after a job timeout to get a very
+ * close dump/snapshot/representation of GPU's current error status
+ */
+ amdgpu_job_core_dump(adev, job);
+
if (amdgpu_gpu_recovery &&
amdgpu_ring_soft_recovery(ring, job->vmid, s_job->s_fence->parent)) {
dev_err(adev->dev, "ring %s timeout, but soft recovered\n",
@@ -101,6 +160,12 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job)
reset_context.src = AMDGPU_RESET_SRC_JOB;
clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags);
+ /*
+ * To avoid an unnecessary extra coredump, as we have already
+ * got the very close representation of GPU's error status
+ */
+ set_bit(AMDGPU_SKIP_COREDUMP, &reset_context.flags);
+
r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context);
if (r)
dev_err(adev->dev, "GPU Recovery Failed: %d\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
index 44c74a08987d..f7d5d4f08a53 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c
@@ -826,6 +826,24 @@ int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id)
return 0;
}
+int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
+ int me_id, int pipe_id, int queue_id, int vmid)
+{
+ struct mes_reset_queue_input queue_input;
+ int r;
+
+ queue_input.use_mmio = true;
+ queue_input.me_id = me_id;
+ queue_input.pipe_id = pipe_id;
+ queue_input.queue_id = queue_id;
+ queue_input.vmid = vmid;
+ r = adev->mes.funcs->reset_hw_queue(&adev->mes, &queue_input);
+ if (r)
+ DRM_ERROR("failed to reset hardware queue by mmio, queue id = %d\n",
+ queue_id);
+ return r;
+}
+
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring)
{
@@ -873,7 +891,8 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
- unsigned int vmid)
+ unsigned int vmid,
+ bool use_mmio)
{
struct mes_reset_legacy_queue_input queue_input;
int r;
@@ -882,11 +901,13 @@ int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
queue_input.queue_type = ring->funcs->type;
queue_input.doorbell_offset = ring->doorbell_index;
+ queue_input.me_id = ring->me;
queue_input.pipe_id = ring->pipe;
queue_input.queue_id = ring->queue;
queue_input.mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj);
queue_input.wptr_addr = ring->wptr_gpu_addr;
queue_input.vmid = vmid;
+ queue_input.use_mmio = use_mmio;
r = adev->mes.funcs->reset_legacy_queue(&adev->mes, &queue_input);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
index a5b1ea60cac8..96788c0f42f1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h
@@ -75,6 +75,7 @@ struct amdgpu_mes {
uint32_t sched_version;
uint32_t kiq_version;
+ bool enable_legacy_queue_map;
uint32_t total_max_queue;
uint32_t max_doorbell_slices;
@@ -251,6 +252,13 @@ struct mes_remove_queue_input {
struct mes_reset_queue_input {
uint32_t doorbell_offset;
uint64_t gang_context_addr;
+ bool use_mmio;
+ uint32_t queue_type;
+ uint32_t me_id;
+ uint32_t pipe_id;
+ uint32_t queue_id;
+ uint32_t xcc_id;
+ uint32_t vmid;
};
struct mes_map_legacy_queue_input {
@@ -287,6 +295,8 @@ struct mes_resume_gang_input {
struct mes_reset_legacy_queue_input {
uint32_t queue_type;
uint32_t doorbell_offset;
+ bool use_mmio;
+ uint32_t me_id;
uint32_t pipe_id;
uint32_t queue_id;
uint64_t mqd_addr;
@@ -396,6 +406,8 @@ int amdgpu_mes_add_hw_queue(struct amdgpu_device *adev, int gang_id,
int *queue_id);
int amdgpu_mes_remove_hw_queue(struct amdgpu_device *adev, int queue_id);
int amdgpu_mes_reset_hw_queue(struct amdgpu_device *adev, int queue_id);
+int amdgpu_mes_reset_hw_queue_mmio(struct amdgpu_device *adev, int queue_type,
+ int me_id, int pipe_id, int queue_id, int vmid);
int amdgpu_mes_map_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
@@ -405,7 +417,8 @@ int amdgpu_mes_unmap_legacy_queue(struct amdgpu_device *adev,
u64 gpu_addr, u64 seq);
int amdgpu_mes_reset_legacy_queue(struct amdgpu_device *adev,
struct amdgpu_ring *ring,
- unsigned int vmid);
+ unsigned int vmid,
+ bool use_mmio);
uint32_t amdgpu_mes_rreg(struct amdgpu_device *adev, uint32_t reg);
int amdgpu_mes_wreg(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index ca983a014ba0..45ed97038df0 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -6692,13 +6692,13 @@ static int gfx_v10_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
return 0;
}
-static int gfx_v10_0_gfx_init_queue(struct amdgpu_ring *ring)
+static int gfx_v10_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v10_gfx_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.gfx_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -6750,7 +6750,7 @@ static int gfx_v10_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v10_0_gfx_init_queue(ring);
+ r = gfx_v10_0_kgq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -7030,13 +7030,13 @@ static int gfx_v10_0_kiq_init_queue(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
+static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring, bool restore)
{
struct amdgpu_device *adev = ring->adev;
struct v10_compute_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!restore && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -7098,7 +7098,7 @@ static int gfx_v10_0_kcq_resume(struct amdgpu_device *adev)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v10_0_kcq_init_queue(ring);
+ r = gfx_v10_0_kcq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -8949,7 +8949,9 @@ static void gfx_v10_0_ring_soft_recovery(struct amdgpu_ring *ring,
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
WREG32_SOC15(GC, 0, mmSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void
@@ -9416,6 +9418,156 @@ static void gfx_v10_ring_insert_nop(struct amdgpu_ring *ring, uint32_t num_nop)
amdgpu_ring_write(ring, ring->funcs->nop);
}
+static int gfx_v10_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ u32 tmp;
+ u64 addr;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, 5 + 7 + 7 + kiq->pmf->map_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ addr = amdgpu_bo_gpu_offset(ring->mqd_obj) +
+ offsetof(struct v10_gfx_mqd, cp_gfx_hqd_active);
+ tmp = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ if (ring->pipe == 0)
+ tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE0_QUEUES, 1 << ring->queue);
+ else
+ tmp = REG_SET_FIELD(tmp, CP_VMID_RESET, PIPE1_QUEUES, 1 << ring->queue);
+
+ gfx_v10_0_ring_emit_wreg(kiq_ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), tmp);
+ gfx_v10_0_wait_reg_mem(kiq_ring, 0, 1, 0,
+ lower_32_bits(addr), upper_32_bits(addr),
+ 0, 1, 0x20);
+ gfx_v10_0_ring_emit_reg_wait(kiq_ring,
+ SOC15_REG_OFFSET(GC, 0, mmCP_VMID_RESET), 0, 0xffffffff);
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v10_0_kgq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ DRM_ERROR("fail to unresv mqd_obj\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
+static int gfx_v10_0_reset_kcq(struct amdgpu_ring *ring,
+ unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq[0];
+ struct amdgpu_ring *kiq_ring = &kiq->ring;
+ unsigned long flags;
+ int i, r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
+ return -EINVAL;
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+
+ kiq->pmf->kiq_unmap_queues(kiq_ring, ring, RESET_QUEUES,
+ 0, 0);
+ amdgpu_ring_commit(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ /* make sure dequeue is complete*/
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ nv_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ nv_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ if (r) {
+ dev_err(adev->dev, "fail to wait on hqd deactivate\n");
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v10_0_kcq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size)) {
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+ return -ENOMEM;
+ }
+ kiq->pmf->kiq_map_queues(kiq_ring, ring);
+ amdgpu_ring_commit(kiq_ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_ring_test_ring(kiq_ring);
+ if (r)
+ return r;
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static void gfx_v10_ip_print(void *handle, struct drm_printer *p)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -9619,6 +9771,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
+ .reset = gfx_v10_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
@@ -9655,6 +9808,7 @@ static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_compute = {
.emit_reg_write_reg_wait = gfx_v10_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v10_0_ring_soft_recovery,
.emit_mem_sync = gfx_v10_0_emit_mem_sync,
+ .reset = gfx_v10_0_reset_kcq,
};
static const struct amdgpu_ring_funcs gfx_v10_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
index ee8604722467..d3e8be82a172 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
@@ -3984,13 +3984,13 @@ static int gfx_v11_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
return 0;
}
-static int gfx_v11_0_gfx_init_queue(struct amdgpu_ring *ring)
+static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v11_gfx_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.gfx_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -4026,7 +4026,7 @@ static int gfx_v11_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v11_0_gfx_init_queue(ring);
+ r = gfx_v11_0_kgq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -4321,13 +4321,13 @@ static int gfx_v11_0_kiq_init_queue(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring)
+static int gfx_v11_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v11_compute_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -4391,7 +4391,7 @@ static int gfx_v11_0_kcq_resume(struct amdgpu_device *adev)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v11_0_kcq_init_queue(ring);
+ r = gfx_v11_0_kcq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -4781,7 +4781,7 @@ static int gfx_v11_0_soft_reset(void *handle)
int r, i, j, k;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- gfx_v11_0_set_safe_mode(adev, 0);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
tmp = RREG32_SOC15(GC, 0, regCP_INT_CNTL);
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, CMP_BUSY_INT_ENABLE, 0);
@@ -4900,7 +4900,7 @@ static int gfx_v11_0_soft_reset(void *handle)
tmp = REG_SET_FIELD(tmp, CP_INT_CNTL, GFX_IDLE_INT_ENABLE, 1);
WREG32_SOC15(GC, 0, regCP_INT_CNTL, tmp);
- gfx_v11_0_unset_safe_mode(adev, 0);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
return gfx_v11_0_cp_resume(adev);
}
@@ -5923,6 +5923,9 @@ static int gfx_v11_0_ring_preempt_ib(struct amdgpu_ring *ring)
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
+ if (adev->enable_mes)
+ return -EINVAL;
+
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
@@ -6088,7 +6091,9 @@ static void gfx_v11_0_ring_soft_recovery(struct amdgpu_ring *ring,
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
WREG32_SOC15(GC, 0, regSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void
@@ -6541,6 +6546,99 @@ static void gfx_v11_0_emit_mem_sync(struct amdgpu_ring *ring)
amdgpu_ring_write(ring, gcr_cntl); /* GCR_CNTL */
}
+static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
+ if (r)
+ return r;
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v11_0_kgq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kgq\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
+static int gfx_v11_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int i, r = 0;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
+
+ /* make sure dequeue is complete*/
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ if (r) {
+ dev_err(adev->dev, "fail to wait on hqd deactivate\n");
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v11_0_kcq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ dev_err(adev->dev, "fail to unresv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kcq\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static void gfx_v11_ip_print(void *handle, struct drm_printer *p)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -6742,6 +6840,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
+ .reset = gfx_v11_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
@@ -6779,6 +6878,7 @@ static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_compute = {
.emit_reg_write_reg_wait = gfx_v11_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v11_0_ring_soft_recovery,
.emit_mem_sync = gfx_v11_0_emit_mem_sync,
+ .reset = gfx_v11_0_reset_kcq,
};
static const struct amdgpu_ring_funcs gfx_v11_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
index 54059cbcfc08..d1357c01eb39 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c
@@ -2916,13 +2916,13 @@ static int gfx_v12_0_gfx_mqd_init(struct amdgpu_device *adev, void *m,
return 0;
}
-static int gfx_v12_0_gfx_init_queue(struct amdgpu_ring *ring)
+static int gfx_v12_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v12_gfx_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.gfx_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -2958,7 +2958,7 @@ static int gfx_v12_0_cp_async_gfx_ring_resume(struct amdgpu_device *adev)
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v12_0_gfx_init_queue(ring);
+ r = gfx_v12_0_kgq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -3262,13 +3262,13 @@ static int gfx_v12_0_kiq_init_queue(struct amdgpu_ring *ring)
return 0;
}
-static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring)
+static int gfx_v12_0_kcq_init_queue(struct amdgpu_ring *ring, bool reset)
{
struct amdgpu_device *adev = ring->adev;
struct v12_compute_mqd *mqd = ring->mqd_ptr;
int mqd_idx = ring - &adev->gfx.compute_ring[0];
- if (!amdgpu_in_reset(adev) && !adev->in_suspend) {
+ if (!reset && !amdgpu_in_reset(adev) && !adev->in_suspend) {
memset((void *)mqd, 0, sizeof(*mqd));
mutex_lock(&adev->srbm_mutex);
soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
@@ -3332,7 +3332,7 @@ static int gfx_v12_0_kcq_resume(struct amdgpu_device *adev)
goto done;
r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
if (!r) {
- r = gfx_v12_0_kcq_init_queue(ring);
+ r = gfx_v12_0_kcq_init_queue(ring, false);
amdgpu_bo_kunmap(ring->mqd_obj);
ring->mqd_ptr = NULL;
}
@@ -4501,6 +4501,9 @@ static int gfx_v12_0_ring_preempt_ib(struct amdgpu_ring *ring)
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
+ if (adev->enable_mes)
+ return -EINVAL;
+
if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
return -EINVAL;
@@ -4617,7 +4620,9 @@ static void gfx_v12_0_ring_soft_recovery(struct amdgpu_ring *ring,
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
value = REG_SET_FIELD(value, SQ_CMD, VM_ID, vmid);
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
WREG32_SOC15(GC, 0, regSQ_CMD, value);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
}
static void
@@ -5155,6 +5160,93 @@ static void gfx_v12_ip_dump(void *handle)
amdgpu_gfx_off_ctrl(adev, true);
}
+static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false);
+ if (r) {
+ dev_err(adev->dev, "reset via MES failed %d\n", r);
+ return r;
+ }
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ dev_err(adev->dev, "fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v12_0_kgq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ DRM_ERROR("fail to unresv mqd_obj\n");
+ return r;
+ }
+
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kgq\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
+static int gfx_v12_0_reset_kcq(struct amdgpu_ring *ring, unsigned int vmid)
+{
+ struct amdgpu_device *adev = ring->adev;
+ int r, i;
+
+ if (amdgpu_sriov_vf(adev))
+ return -EINVAL;
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+ mutex_lock(&adev->srbm_mutex);
+ soc24_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ soc24_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+
+ r = amdgpu_bo_reserve(ring->mqd_obj, false);
+ if (unlikely(r != 0)) {
+ DRM_ERROR("fail to resv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&ring->mqd_ptr);
+ if (!r) {
+ r = gfx_v12_0_kcq_init_queue(ring, true);
+ amdgpu_bo_kunmap(ring->mqd_obj);
+ ring->mqd_ptr = NULL;
+ }
+ amdgpu_bo_unreserve(ring->mqd_obj);
+ if (r) {
+ DRM_ERROR("fail to unresv mqd_obj\n");
+ return r;
+ }
+ r = amdgpu_mes_map_legacy_queue(adev, ring);
+ if (r) {
+ dev_err(adev->dev, "failed to remap kcq\n");
+ return r;
+ }
+
+ return amdgpu_ring_test_ring(ring);
+}
+
static const struct amd_ip_funcs gfx_v12_0_ip_funcs = {
.name = "gfx_v12_0",
.early_init = gfx_v12_0_early_init,
@@ -5217,6 +5309,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_gfx = {
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
+ .reset = gfx_v12_0_reset_kgq,
};
static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
@@ -5251,6 +5344,7 @@ static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_compute = {
.emit_reg_write_reg_wait = gfx_v12_0_ring_emit_reg_write_reg_wait,
.soft_recovery = gfx_v12_0_ring_soft_recovery,
.emit_mem_sync = gfx_v12_0_emit_mem_sync,
+ .reset = gfx_v12_0_reset_kcq,
};
static const struct amdgpu_ring_funcs gfx_v12_0_ring_funcs_kiq = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 21089aadbb7b..8cf5d7925b51 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -7233,6 +7233,10 @@ static int gfx_v9_0_reset_kcq(struct amdgpu_ring *ring,
unsigned long flags;
int i, r;
+ if (!adev->debug_exp_resets &&
+ !adev->gfx.num_gfx_rings)
+ return -EINVAL;
+
if (amdgpu_sriov_vf(adev))
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
index 2067f26d3a9d..408e5600bb61 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c
@@ -3052,6 +3052,9 @@ static void gfx_v9_4_3_ring_soft_recovery(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev;
uint32_t value = 0;
+ if (!adev->debug_exp_resets)
+ return;
+
value = REG_SET_FIELD(value, SQ_CMD, CMD, 0x03);
value = REG_SET_FIELD(value, SQ_CMD, MODE, 0x01);
value = REG_SET_FIELD(value, SQ_CMD, CHECK_VMID, 1);
@@ -3466,6 +3469,98 @@ static void gfx_v9_4_3_emit_wave_limit(struct amdgpu_ring *ring, bool enable)
}
}
+static int gfx_v9_4_3_unmap_done(struct amdgpu_device *adev, uint32_t me,
+ uint32_t pipe, uint32_t queue,
+ uint32_t xcc_id)
+{
+ int i, r;
+ /* make sure dequeue is complete*/
+ gfx_v9_4_3_xcc_set_safe_mode(adev, xcc_id);
+ mutex_lock(&adev->srbm_mutex);
+ soc15_grbm_select(adev, me, pipe, queue, 0, GET_INST(GC, xcc_id));
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, GET_INST(GC, xcc_id), regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout)
+ r = -ETIMEDOUT;
+ else
+ r = 0;
+ soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, xcc_id));
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v9_4_3_xcc_unset_safe_mode(adev, xcc_id);
+
+ return r;
+
+}
+
+static bool gfx_v9_4_3_pipe_reset_support(struct amdgpu_device *adev)
+{
+ /*TODO: Need check gfx9.4.4 mec fw whether supports pipe reset as well.*/
+ if (amdgpu_ip_version(adev, GC_HWIP, 0) == IP_VERSION(9, 4, 3) &&
+ adev->gfx.mec_fw_version >= 0x0000009b)
+ return true;
+ else
+ dev_warn_once(adev->dev, "Please use the latest MEC version to see whether support pipe reset\n");
+
+ return false;
+}
+
+static int gfx_v9_4_3_reset_hw_pipe(struct amdgpu_ring *ring)
+{
+ struct amdgpu_device *adev = ring->adev;
+ uint32_t reset_pipe, clean_pipe;
+ int r;
+
+ if (!gfx_v9_4_3_pipe_reset_support(adev))
+ return -EINVAL;
+
+ gfx_v9_4_3_xcc_set_safe_mode(adev, ring->xcc_id);
+ mutex_lock(&adev->srbm_mutex);
+
+ reset_pipe = RREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL);
+ clean_pipe = reset_pipe;
+
+ if (ring->me == 1) {
+ switch (ring->pipe) {
+ case 0:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE0_RESET, 1);
+ break;
+ case 1:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE1_RESET, 1);
+ break;
+ case 2:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE2_RESET, 1);
+ break;
+ case 3:
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME1_PIPE3_RESET, 1);
+ break;
+ default:
+ break;
+ }
+ } else {
+ if (ring->pipe)
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE1_RESET, 1);
+ else
+ reset_pipe = REG_SET_FIELD(reset_pipe, CP_MEC_CNTL,
+ MEC_ME2_PIPE0_RESET, 1);
+ }
+
+ WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, reset_pipe);
+ WREG32_SOC15(GC, GET_INST(GC, ring->xcc_id), regCP_MEC_CNTL, clean_pipe);
+ mutex_unlock(&adev->srbm_mutex);
+ gfx_v9_4_3_xcc_unset_safe_mode(adev, ring->xcc_id);
+
+ r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
+ return r;
+}
+
static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
unsigned int vmid)
{
@@ -3473,7 +3568,10 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
struct amdgpu_kiq *kiq = &adev->gfx.kiq[ring->xcc_id];
struct amdgpu_ring *kiq_ring = &kiq->ring;
unsigned long flags;
- int r, i;
+ int r;
+
+ if (!adev->debug_exp_resets)
+ return -EINVAL;
if (amdgpu_sriov_vf(adev))
return -EINVAL;
@@ -3495,26 +3593,23 @@ static int gfx_v9_4_3_reset_kcq(struct amdgpu_ring *ring,
spin_unlock_irqrestore(&kiq->ring_lock, flags);
r = amdgpu_ring_test_ring(kiq_ring);
+ if (r) {
+ dev_err(adev->dev, "kiq ring test failed after ring: %s queue reset\n",
+ ring->name);
+ goto pipe_reset;
+ }
+
+ r = gfx_v9_4_3_unmap_done(adev, ring->me, ring->pipe, ring->queue, ring->xcc_id);
if (r)
- return r;
+ dev_err(adev->dev, "fail to wait on hqd deactive and will try pipe reset\n");
- /* make sure dequeue is complete*/
- amdgpu_gfx_rlc_enter_safe_mode(adev, ring->xcc_id);
- mutex_lock(&adev->srbm_mutex);
- soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0, GET_INST(GC, ring->xcc_id));
- for (i = 0; i < adev->usec_timeout; i++) {
- if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
- break;
- udelay(1);
- }
- if (i >= adev->usec_timeout)
- r = -ETIMEDOUT;
- soc15_grbm_select(adev, 0, 0, 0, 0, GET_INST(GC, ring->xcc_id));
- mutex_unlock(&adev->srbm_mutex);
- amdgpu_gfx_rlc_exit_safe_mode(adev, ring->xcc_id);
- if (r) {
- dev_err(adev->dev, "fail to wait on hqd deactive\n");
- return r;
+pipe_reset:
+ if(r) {
+ r = gfx_v9_4_3_reset_hw_pipe(ring);
+ dev_info(adev->dev, "ring: %s pipe reset :%s\n", ring->name,
+ r ? "failed" : "successfully");
+ if (r)
+ return r;
}
r = amdgpu_bo_reserve(ring->mqd_obj, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
index 8edcd85a1261..0f055d1b1da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "soc15_common.h"
#include "soc21.h"
+#include "gfx_v11_0.h"
#include "gc/gc_11_0_0_offset.h"
#include "gc/gc_11_0_0_sh_mask.h"
#include "gc/gc_11_0_0_default.h"
@@ -360,9 +361,83 @@ static int mes_v11_0_remove_hw_queue(struct amdgpu_mes *mes,
offsetof(union MESAPI__REMOVE_QUEUE, api_status));
}
+static int mes_v11_0_reset_queue_mmio(struct amdgpu_mes *mes, uint32_t queue_type,
+ uint32_t me_id, uint32_t pipe_id,
+ uint32_t queue_id, uint32_t vmid)
+{
+ struct amdgpu_device *adev = mes->adev;
+ uint32_t value;
+ int i, r = 0;
+
+ amdgpu_gfx_rlc_enter_safe_mode(adev, 0);
+
+ if (queue_type == AMDGPU_RING_TYPE_GFX) {
+ dev_info(adev->dev, "reset gfx queue (%d:%d:%d: vmid:%d)\n",
+ me_id, pipe_id, queue_id, vmid);
+
+ mutex_lock(&adev->gfx.reset_sem_mutex);
+ gfx_v11_0_request_gfx_index_mutex(adev, true);
+ /* all se allow writes */
+ WREG32_SOC15(GC, 0, regGRBM_GFX_INDEX,
+ (uint32_t)(0x1 << GRBM_GFX_INDEX__SE_BROADCAST_WRITES__SHIFT));
+ value = REG_SET_FIELD(0, CP_VMID_RESET, RESET_REQUEST, 1 << vmid);
+ if (pipe_id == 0)
+ value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE0_QUEUES, 1 << queue_id);
+ else
+ value = REG_SET_FIELD(value, CP_VMID_RESET, PIPE1_QUEUES, 1 << queue_id);
+ WREG32_SOC15(GC, 0, regCP_VMID_RESET, value);
+ gfx_v11_0_request_gfx_index_mutex(adev, false);
+ mutex_unlock(&adev->gfx.reset_sem_mutex);
+
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_GFX_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on gfx hqd deactivate\n");
+ r = -ETIMEDOUT;
+ }
+
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ } else if (queue_type == AMDGPU_RING_TYPE_COMPUTE) {
+ dev_info(adev->dev, "reset compute queue (%d:%d:%d)\n",
+ me_id, pipe_id, queue_id);
+ mutex_lock(&adev->srbm_mutex);
+ soc21_grbm_select(adev, me_id, pipe_id, queue_id, 0);
+ WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 0x2);
+ WREG32_SOC15(GC, 0, regSPI_COMPUTE_QUEUE_RESET, 0x1);
+
+ /* wait till dequeue take effects */
+ for (i = 0; i < adev->usec_timeout; i++) {
+ if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1))
+ break;
+ udelay(1);
+ }
+ if (i >= adev->usec_timeout) {
+ dev_err(adev->dev, "failed to wait on hqd deactivate\n");
+ r = -ETIMEDOUT;
+ }
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+ }
+
+ amdgpu_gfx_rlc_exit_safe_mode(adev, 0);
+ return r;
+}
+
static int mes_v11_0_reset_hw_queue(struct amdgpu_mes *mes,
struct mes_reset_queue_input *input)
{
+ if (input->use_mmio)
+ return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
+ input->me_id, input->pipe_id,
+ input->queue_id, input->vmid);
+
union MESAPI__RESET mes_reset_queue_pkt;
memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
@@ -648,6 +723,11 @@ static int mes_v11_0_reset_legacy_queue(struct amdgpu_mes *mes,
{
union MESAPI__RESET mes_reset_queue_pkt;
+ if (input->use_mmio)
+ return mes_v11_0_reset_queue_mmio(mes, input->queue_type,
+ input->me_id, input->pipe_id,
+ input->queue_id, input->vmid);
+
memset(&mes_reset_queue_pkt, 0, sizeof(mes_reset_queue_pkt));
mes_reset_queue_pkt.header.type = MES_API_TYPE_SCHEDULER;
@@ -775,6 +855,28 @@ static void mes_v11_0_free_ucode_buffers(struct amdgpu_device *adev,
(void **)&adev->mes.ucode_fw_ptr[pipe]);
}
+static void mes_v11_0_get_fw_version(struct amdgpu_device *adev)
+{
+ int pipe;
+
+ /* get MES scheduler/KIQ versions */
+ mutex_lock(&adev->srbm_mutex);
+
+ for (pipe = 0; pipe < AMDGPU_MAX_MES_PIPES; pipe++) {
+ soc21_grbm_select(adev, 3, pipe, 0, 0);
+
+ if (pipe == AMDGPU_MES_SCHED_PIPE)
+ adev->mes.sched_version =
+ RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
+ else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
+ adev->mes.kiq_version =
+ RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
+ }
+
+ soc21_grbm_select(adev, 0, 0, 0, 0);
+ mutex_unlock(&adev->srbm_mutex);
+}
+
static void mes_v11_0_enable(struct amdgpu_device *adev, bool enable)
{
uint64_t ucode_addr;
@@ -1144,18 +1246,6 @@ static int mes_v11_0_queue_init(struct amdgpu_device *adev,
mes_v11_0_queue_init_register(ring);
}
- /* get MES scheduler/KIQ versions */
- mutex_lock(&adev->srbm_mutex);
- soc21_grbm_select(adev, 3, pipe, 0, 0);
-
- if (pipe == AMDGPU_MES_SCHED_PIPE)
- adev->mes.sched_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
- else if (pipe == AMDGPU_MES_KIQ_PIPE && adev->enable_mes_kiq)
- adev->mes.kiq_version = RREG32_SOC15(GC, 0, regCP_MES_GP3_LO);
-
- soc21_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-
return 0;
}
@@ -1402,15 +1492,24 @@ static int mes_v11_0_kiq_hw_init(struct amdgpu_device *adev)
mes_v11_0_enable(adev, true);
+ mes_v11_0_get_fw_version(adev);
+
mes_v11_0_kiq_setting(&adev->gfx.kiq[0].ring);
r = mes_v11_0_queue_init(adev, AMDGPU_MES_KIQ_PIPE);
if (r)
goto failure;
- r = mes_v11_0_hw_init(adev);
- if (r)
- goto failure;
+ if ((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 0x47)
+ adev->mes.enable_legacy_queue_map = true;
+ else
+ adev->mes.enable_legacy_queue_map = false;
+
+ if (adev->mes.enable_legacy_queue_map) {
+ r = mes_v11_0_hw_init(adev);
+ if (r)
+ goto failure;
+ }
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 47a73f6ae4da..e499b2857a01 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -1332,6 +1332,7 @@ static int mes_v12_0_sw_init(void *handle)
adev->mes.funcs = &mes_v12_0_funcs;
adev->mes.kiq_hw_init = &mes_v12_0_kiq_hw_init;
adev->mes.kiq_hw_fini = &mes_v12_0_kiq_hw_fini;
+ adev->mes.enable_legacy_queue_map = true;
adev->mes.event_log_size = AMDGPU_MES_LOG_BUFFER_SIZE;
@@ -1488,9 +1489,11 @@ static int mes_v12_0_kiq_hw_init(struct amdgpu_device *adev)
mes_v12_0_set_hw_resources_1(&adev->mes, AMDGPU_MES_KIQ_PIPE);
}
- r = mes_v12_0_hw_init(adev);
- if (r)
- goto failure;
+ if (adev->mes.enable_legacy_queue_map) {
+ r = mes_v12_0_hw_init(adev);
+ if (r)
+ goto failure;
+ }
return r;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index ce2a5d9f90d3..40c94c4cdd96 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -2464,11 +2464,14 @@ svm_range_unmap_from_cpu(struct mm_struct *mm, struct svm_range *prange,
adev = pdd->dev->adev;
/* Check and drain ih1 ring if cam not available */
- ih = &adev->irq.ih1;
- checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
- if (ih->rptr != checkpoint_wptr) {
- svms->checkpoint_ts[i] = amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
- continue;
+ if (adev->irq.ih1.ring_size) {
+ ih = &adev->irq.ih1;
+ checkpoint_wptr = amdgpu_ih_get_wptr(adev, ih);
+ if (ih->rptr != checkpoint_wptr) {
+ svms->checkpoint_ts[i] =
+ amdgpu_ih_decode_iv_ts(adev, ih, checkpoint_wptr, -1);
+ continue;
+ }
}
/* check if dev->irq.ih_soft is not empty */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 5f7b178ba870..a8d0d1b71723 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -1754,6 +1754,30 @@ static struct dml2_soc_bb *dm_dmub_get_vbios_bounding_box(struct amdgpu_device *
return bb;
}
+static enum dmub_ips_disable_type dm_get_default_ips_mode(
+ struct amdgpu_device *adev)
+{
+ /*
+ * On DCN35 systems with Z8 enabled, it's possible for IPS2 + Z8 to
+ * cause a hard hang. A fix exists for newer PMFW.
+ *
+ * As a workaround, for non-fixed PMFW, force IPS1+RCG as the deepest
+ * IPS state in all cases, except for s0ix and all displays off (DPMS),
+ * where IPS2 is allowed.
+ *
+ * When checking pmfw version, use the major and minor only.
+ */
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) == IP_VERSION(3, 5, 0) &&
+ (adev->pm.fw_version & 0x00FFFF00) < 0x005D6300)
+ return DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 5, 0))
+ return DMUB_IPS_ENABLE;
+
+ /* ASICs older than DCN35 do not have IPSs */
+ return DMUB_IPS_DISABLE_ALL;
+}
+
static int amdgpu_dm_init(struct amdgpu_device *adev)
{
struct dc_init_data init_data;
@@ -1864,8 +1888,14 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
- else
+ else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS_DYNAMIC)
+ init_data.flags.disable_ips = DMUB_IPS_DISABLE_DYNAMIC;
+ else if (amdgpu_dc_debug_mask & DC_DISABLE_IPS2_DYNAMIC)
+ init_data.flags.disable_ips = DMUB_IPS_RCG_IN_ACTIVE_IPS2_IN_OFF;
+ else if (amdgpu_dc_debug_mask & DC_FORCE_IPS_ENABLE)
init_data.flags.disable_ips = DMUB_IPS_ENABLE;
+ else
+ init_data.flags.disable_ips = dm_get_default_ips_mode(adev);
init_data.flags.disable_ips_in_vpb = 0;
@@ -4507,7 +4537,7 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
struct amdgpu_dm_backlight_caps caps;
struct dc_link *link;
u32 brightness;
- bool rc;
+ bool rc, reallow_idle = false;
amdgpu_dm_update_backlight_caps(dm, bl_idx);
caps = dm->backlight_caps[bl_idx];
@@ -4520,6 +4550,12 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
link = (struct dc_link *)dm->backlight_link[bl_idx];
/* Change brightness based on AUX property */
+ mutex_lock(&dm->dc_lock);
+ if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) {
+ dc_allow_idle_optimizations(dm->dc, false);
+ reallow_idle = true;
+ }
+
if (caps.aux_support) {
rc = dc_link_set_backlight_level_nits(link, true, brightness,
AUX_BL_DEFAULT_TRANSITION_TIME_MS);
@@ -4531,6 +4567,11 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm,
DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx);
}
+ if (dm->dc->caps.ips_support && reallow_idle)
+ dc_allow_idle_optimizations(dm->dc, true);
+
+ mutex_unlock(&dm->dc_lock);
+
if (rc)
dm->actual_brightness[bl_idx] = user_brightness;
}
@@ -6441,7 +6482,8 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name);
+ DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from SST RX\n",
+ __func__, drm_connector->name);
}
} else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
@@ -6460,7 +6502,7 @@ static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector,
dc_link_get_highest_encoding_format(aconnector->dc_link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n",
+ DRM_DEBUG_DRIVER("%s: SST_DSC [%s] DSC is selected from DP-HDMI PCON\n",
__func__, drm_connector->name);
}
}
@@ -11637,7 +11679,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
if (dc_resource_is_dsc_encoding_supported(dc)) {
ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
if (ret) {
- drm_dbg_atomic(dev, "compute_mst_dsc_configs_for_state() failed\n");
+ drm_dbg_atomic(dev, "MST_DSC compute_mst_dsc_configs_for_state() failed\n");
ret = -EINVAL;
goto fail;
}
@@ -11658,7 +11700,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
*/
ret = drm_dp_mst_atomic_check(state);
if (ret) {
- drm_dbg_atomic(dev, "drm_dp_mst_atomic_check() failed\n");
+ drm_dbg_atomic(dev, "MST drm_dp_mst_atomic_check() failed\n");
goto fail;
}
status = dc_validate_global_state(dc, dm_state->context, true);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 165e010fe69c..50109d13d967 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -759,7 +759,7 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
uint8_t ret = 0;
drm_dbg_dp(aux->drm_dev,
- "Configure DSC to non-virtual dpcd synaptics\n");
+ "MST_DSC Configure DSC to non-virtual dpcd synaptics\n");
if (enable) {
/* When DSC is enabled on previous boot and reboot with the hub,
@@ -772,7 +772,7 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
apply_synaptics_fifo_reset_wa(aux);
ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
- DRM_INFO("Send DSC enable to synaptics\n");
+ DRM_INFO("MST_DSC Send DSC enable to synaptics\n");
} else {
/* Synaptics hub not support virtual dpcd,
@@ -781,7 +781,7 @@ static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst(
*/
if (!stream->link->link_status.link_active) {
ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1);
- DRM_INFO("Send DSC disable to synaptics\n");
+ DRM_INFO("MST_DSC Send DSC disable to synaptics\n");
}
}
@@ -823,14 +823,14 @@ bool dm_helpers_dp_write_dsc_enable(
DP_DSC_ENABLE,
&enable_passthrough, 1);
drm_dbg_dp(dev,
- "Sent DSC pass-through enable to virtual dpcd port, ret = %u\n",
+ "MST_DSC Sent DSC pass-through enable to virtual dpcd port, ret = %u\n",
ret);
}
ret = drm_dp_dpcd_write(aconnector->dsc_aux,
DP_DSC_ENABLE, &enable_dsc, 1);
drm_dbg_dp(dev,
- "Sent DSC decoding enable to %s port, ret = %u\n",
+ "MST_DSC Sent DSC decoding enable to %s port, ret = %u\n",
(port->passthrough_aux) ? "remote RX" :
"virtual dpcd",
ret);
@@ -838,7 +838,7 @@ bool dm_helpers_dp_write_dsc_enable(
ret = drm_dp_dpcd_write(aconnector->dsc_aux,
DP_DSC_ENABLE, &enable_dsc, 1);
drm_dbg_dp(dev,
- "Sent DSC decoding disable to %s port, ret = %u\n",
+ "MST_DSC Sent DSC decoding disable to %s port, ret = %u\n",
(port->passthrough_aux) ? "remote RX" :
"virtual dpcd",
ret);
@@ -848,7 +848,7 @@ bool dm_helpers_dp_write_dsc_enable(
DP_DSC_ENABLE,
&enable_passthrough, 1);
drm_dbg_dp(dev,
- "Sent DSC pass-through disable to virtual dpcd port, ret = %u\n",
+ "MST_DSC Sent DSC pass-through disable to virtual dpcd port, ret = %u\n",
ret);
}
}
@@ -858,12 +858,12 @@ bool dm_helpers_dp_write_dsc_enable(
if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) {
ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
drm_dbg_dp(dev,
- "Send DSC %s to SST RX\n",
+ "SST_DSC Send DSC %s to SST RX\n",
enable_dsc ? "enable" : "disable");
} else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) {
ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1);
drm_dbg_dp(dev,
- "Send DSC %s to DP-HDMI PCON\n",
+ "SST_DSC Send DSC %s to DP-HDMI PCON\n",
enable_dsc ? "enable" : "disable");
}
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 2e9f6da1acdc..6b5eed37532b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -253,7 +253,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux;
/* synaptics cascaded MST hub case */
- if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port))
+ if (is_synaptics_cascaded_panamera(aconnector->dc_link, port))
aconnector->dsc_aux = port->mgr->aux;
if (!aconnector->dsc_aux)
@@ -578,6 +578,8 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
if (!aconnector)
return NULL;
+ DRM_DEBUG_DRIVER("%s: Create aconnector 0x%p for port 0x%p\n", __func__, aconnector, port);
+
connector = &aconnector->base;
aconnector->mst_output_port = port;
aconnector->mst_root = master;
@@ -872,11 +874,11 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
if (params[i].sink) {
if (params[i].sink->sink_signal != SIGNAL_TYPE_VIRTUAL &&
params[i].sink->sink_signal != SIGNAL_TYPE_NONE)
- DRM_DEBUG_DRIVER("%s i=%d dispname=%s\n", __func__, i,
+ DRM_DEBUG_DRIVER("MST_DSC %s i=%d dispname=%s\n", __func__, i,
params[i].sink->edid_caps.display_name);
}
- DRM_DEBUG_DRIVER("dsc=%d bits_per_pixel=%d pbn=%d\n",
+ DRM_DEBUG_DRIVER("MST_DSC dsc=%d bits_per_pixel=%d pbn=%d\n",
params[i].timing->flags.DSC,
params[i].timing->dsc_cfg.bits_per_pixel,
vars[i + k].pbn);
@@ -1054,6 +1056,7 @@ static int try_disable_dsc(struct drm_atomic_state *state,
if (next_index == -1)
break;
+ DRM_DEBUG_DRIVER("MST_DSC index #%d, try no compression\n", next_index);
vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
@@ -1064,10 +1067,12 @@ static int try_disable_dsc(struct drm_atomic_state *state,
ret = drm_dp_mst_atomic_check(state);
if (ret == 0) {
+ DRM_DEBUG_DRIVER("MST_DSC index #%d, greedily disable dsc\n", next_index);
vars[next_index].dsc_enabled = false;
vars[next_index].bpp_x16 = 0;
} else {
- vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
+ DRM_DEBUG_DRIVER("MST_DSC index #%d, restore minimum compression\n", next_index);
+ vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000);
ret = drm_dp_atomic_find_time_slots(state,
params[next_index].port->mgr,
params[next_index].port,
@@ -1082,6 +1087,15 @@ static int try_disable_dsc(struct drm_atomic_state *state,
return 0;
}
+static void log_dsc_params(int count, struct dsc_mst_fairness_vars *vars, int k)
+{
+ int i;
+
+ for (i = 0; i < count; i++)
+ DRM_DEBUG_DRIVER("MST_DSC DSC params: stream #%d --- dsc_enabled = %d, bpp_x16 = %d, pbn = %d\n",
+ i, vars[i + k].dsc_enabled, vars[i + k].bpp_x16, vars[i + k].pbn);
+}
+
static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
struct dc_state *dc_state,
struct dc_link *dc_link,
@@ -1104,6 +1118,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
return PTR_ERR(mst_state);
/* Set up params */
+ DRM_DEBUG_DRIVER("%s: MST_DSC Set up params for %d streams\n", __func__, dc_state->stream_count);
for (i = 0; i < dc_state->stream_count; i++) {
struct dc_dsc_policy dsc_policy = {0};
@@ -1145,6 +1160,9 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing,
dc_link_get_highest_encoding_format(dc_link));
+ DRM_DEBUG_DRIVER("MST_DSC #%d stream 0x%p - max_kbps = %u, min_kbps = %u, uncompressed_kbps = %u\n",
+ count, stream, params[count].bw_range.max_kbps, params[count].bw_range.min_kbps,
+ params[count].bw_range.stream_kbps);
count++;
}
@@ -1159,6 +1177,7 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
*link_vars_start_index += count;
/* Try no compression */
+ DRM_DEBUG_DRIVER("MST_DSC Try no compression\n");
for (i = 0; i < count; i++) {
vars[i + k].aconnector = params[i].aconnector;
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000);
@@ -1177,7 +1196,10 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
return ret;
}
+ log_dsc_params(count, vars, k);
+
/* Try max compression */
+ DRM_DEBUG_DRIVER("MST_DSC Try max compression\n");
for (i = 0; i < count; i++) {
if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) {
vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000);
@@ -1201,14 +1223,26 @@ static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
if (ret != 0)
return ret;
+ log_dsc_params(count, vars, k);
+
/* Optimize degree of compression */
+ DRM_DEBUG_DRIVER("MST_DSC Try optimize compression\n");
ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k);
- if (ret < 0)
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("MST_DSC Failed to optimize compression\n");
return ret;
+ }
+ log_dsc_params(count, vars, k);
+
+ DRM_DEBUG_DRIVER("MST_DSC Try disable compression\n");
ret = try_disable_dsc(state, dc_link, params, vars, count, k);
- if (ret < 0)
+ if (ret < 0) {
+ DRM_DEBUG_DRIVER("MST_DSC Failed to disable compression\n");
return ret;
+ }
+
+ log_dsc_params(count, vars, k);
set_dsc_configs_from_fairness_vars(params, vars, count, k);
@@ -1230,17 +1264,19 @@ static bool is_dsc_need_re_compute(
/* only check phy used by dsc mst branch */
if (dc_link->type != dc_connection_mst_branch)
- return false;
+ goto out;
/* add a check for older MST DSC with no virtual DPCDs */
if (needs_dsc_aux_workaround(dc_link) &&
(!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT ||
dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)))
- return false;
+ goto out;
for (i = 0; i < MAX_PIPES; i++)
stream_on_link[i] = NULL;
+ DRM_DEBUG_DRIVER("%s: MST_DSC check on %d streams in new dc_state\n", __func__, dc_state->stream_count);
+
/* check if there is mode change in new request */
for (i = 0; i < dc_state->stream_count; i++) {
struct drm_crtc_state *new_crtc_state;
@@ -1250,6 +1286,8 @@ static bool is_dsc_need_re_compute(
if (!stream)
continue;
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC checking #%d stream 0x%p\n", __func__, __LINE__, i, stream);
+
/* check if stream using the same link for mst */
if (stream->link != dc_link)
continue;
@@ -1262,8 +1300,11 @@ static bool is_dsc_need_re_compute(
new_stream_on_link_num++;
new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base);
- if (!new_conn_state)
+ if (!new_conn_state) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC no new_conn_state for stream 0x%p, aconnector 0x%p\n",
+ __func__, __LINE__, stream, aconnector);
continue;
+ }
if (IS_ERR(new_conn_state))
continue;
@@ -1272,21 +1313,36 @@ static bool is_dsc_need_re_compute(
continue;
new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc);
- if (!new_crtc_state)
+ if (!new_crtc_state) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC no new_crtc_state for crtc of stream 0x%p, aconnector 0x%p\n",
+ __func__, __LINE__, stream, aconnector);
continue;
+ }
if (IS_ERR(new_crtc_state))
continue;
if (new_crtc_state->enable && new_crtc_state->active) {
if (new_crtc_state->mode_changed || new_crtc_state->active_changed ||
- new_crtc_state->connectors_changed)
- return true;
+ new_crtc_state->connectors_changed) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC dsc recompte required."
+ "stream 0x%p in new dc_state\n",
+ __func__, __LINE__, stream);
+ is_dsc_need_re_compute = true;
+ goto out;
+ }
}
}
- if (new_stream_on_link_num == 0)
- return false;
+ if (new_stream_on_link_num == 0) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC no mode change request for streams in new dc_state\n",
+ __func__, __LINE__);
+ is_dsc_need_re_compute = false;
+ goto out;
+ }
+
+ DRM_DEBUG_DRIVER("%s: MST_DSC check on %d streams in current dc_state\n",
+ __func__, dc->current_state->stream_count);
/* check current_state if there stream on link but it is not in
* new request state
@@ -1310,11 +1366,18 @@ static bool is_dsc_need_re_compute(
if (j == new_stream_on_link_num) {
/* not in new state */
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC dsc recompute required."
+ "stream 0x%p in current dc_state but not in new dc_state\n",
+ __func__, __LINE__, stream);
is_dsc_need_re_compute = true;
break;
}
}
+out:
+ DRM_DEBUG_DRIVER("%s: MST_DSC dsc recompute %s\n",
+ __func__, is_dsc_need_re_compute ? "required" : "not required");
+
return is_dsc_need_re_compute;
}
@@ -1343,6 +1406,9 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ DRM_DEBUG_DRIVER("%s: MST_DSC compute mst dsc configs for stream 0x%p, aconnector 0x%p\n",
+ __func__, stream, aconnector);
+
if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)
continue;
@@ -1375,8 +1441,11 @@ int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
stream = dc_state->streams[i];
if (stream->timing.flags.DSC == 1)
- if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK)
+ if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) {
+ DRM_DEBUG_DRIVER("%s:%d MST_DSC Failed to request dsc hw resource for stream 0x%p\n",
+ __func__, __LINE__, stream);
return -EINVAL;
+ }
}
return ret;
@@ -1405,6 +1474,9 @@ static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
+ DRM_DEBUG_DRIVER("MST_DSC pre compute mst dsc configs for #%d stream 0x%p, aconnector 0x%p\n",
+ i, stream, aconnector);
+
if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port)
continue;
@@ -1494,12 +1566,12 @@ int pre_validate_dsc(struct drm_atomic_state *state,
int ret = 0;
if (!is_dsc_precompute_needed(state)) {
- DRM_INFO_ONCE("DSC precompute is not needed.\n");
+ DRM_INFO_ONCE("%s:%d MST_DSC dsc precompute is not needed\n", __func__, __LINE__);
return 0;
}
ret = dm_atomic_get_state(state, dm_state_ptr);
if (ret != 0) {
- DRM_INFO_ONCE("dm_atomic_get_state() failed\n");
+ DRM_INFO_ONCE("%s:%d MST_DSC dm_atomic_get_state() failed\n", __func__, __LINE__);
return ret;
}
dm_state = *dm_state_ptr;
@@ -1553,7 +1625,8 @@ int pre_validate_dsc(struct drm_atomic_state *state,
ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars);
if (ret != 0) {
- DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n");
+ DRM_INFO_ONCE("%s:%d MST_DSC dsc pre_compute_mst_dsc_configs_for_state() failed\n",
+ __func__, __LINE__);
ret = -EINVAL;
goto clean_exit;
}
@@ -1567,12 +1640,15 @@ int pre_validate_dsc(struct drm_atomic_state *state,
if (local_dc_state->streams[i] &&
dc_is_timing_changed(stream, local_dc_state->streams[i])) {
- DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i);
+ DRM_INFO_ONCE("%s:%d MST_DSC crtc[%d] needs mode_change\n", __func__, __LINE__, i);
} else {
int ind = find_crtc_index_in_state_by_stream(state, stream);
- if (ind >= 0)
+ if (ind >= 0) {
+ DRM_INFO_ONCE("%s:%d MST_DSC no mode changed for stream 0x%p\n",
+ __func__, __LINE__, stream);
state->crtcs[ind].new_state->mode_changed = 0;
+ }
}
}
clean_exit:
@@ -1697,7 +1773,7 @@ enum dc_status dm_dp_mst_is_port_support_mode(
end_to_end_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
if (stream_kbps <= end_to_end_bw_in_kbps) {
- DRM_DEBUG_DRIVER("No DSC needed. End-to-end bw sufficient.");
+ DRM_DEBUG_DRIVER("MST_DSC no dsc required. End-to-end bw sufficient\n");
return DC_OK;
}
@@ -1710,7 +1786,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
/*capable of dsc passthough. dsc bitstream along the entire path*/
if (aconnector->mst_output_port->passthrough_aux) {
if (bw_range.min_kbps > end_to_end_bw_in_kbps) {
- DRM_DEBUG_DRIVER("DSC passthrough. Max dsc compression can't fit into end-to-end bw\n");
+ DRM_DEBUG_DRIVER("MST_DSC dsc passthrough and decode at endpoint"
+ "Max dsc compression bw can't fit into end-to-end bw\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
} else {
@@ -1721,7 +1798,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
/*Get last DP link BW capability*/
if (dp_get_link_current_set_bw(&aconnector->mst_output_port->aux, &end_link_bw)) {
if (stream_kbps > end_link_bw) {
- DRM_DEBUG_DRIVER("DSC decode at last link. Mode required bw can't fit into available bw\n");
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ "Mode required bw can't fit into last link\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
}
@@ -1734,7 +1812,8 @@ enum dc_status dm_dp_mst_is_port_support_mode(
virtual_channel_bw_in_kbps = kbps_from_pbn(immediate_upstream_port->full_pbn);
virtual_channel_bw_in_kbps = min(root_link_bw_in_kbps, virtual_channel_bw_in_kbps);
if (bw_range.min_kbps > virtual_channel_bw_in_kbps) {
- DRM_DEBUG_DRIVER("DSC decode at last link. Max dsc compression can't fit into MST available bw\n");
+ DRM_DEBUG_DRIVER("MST_DSC dsc decode at last link."
+ "Max dsc compression can't fit into MST available bw\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
}
@@ -1751,9 +1830,9 @@ enum dc_status dm_dp_mst_is_port_support_mode(
dc_link_get_highest_encoding_format(stream->link),
&stream->timing.dsc_cfg)) {
stream->timing.flags.DSC = 1;
- DRM_DEBUG_DRIVER("Require dsc and dsc config found\n");
+ DRM_DEBUG_DRIVER("MST_DSC require dsc and dsc config found\n");
} else {
- DRM_DEBUG_DRIVER("Require dsc but can't find appropriate dsc config\n");
+ DRM_DEBUG_DRIVER("MST_DSC require dsc but can't find appropriate dsc config\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
@@ -1775,11 +1854,11 @@ enum dc_status dm_dp_mst_is_port_support_mode(
if (branch_max_throughput_mps != 0 &&
((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) {
- DRM_DEBUG_DRIVER("DSC is required but max throughput mps fails");
+ DRM_DEBUG_DRIVER("MST_DSC require dsc but max throughput mps fails\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
} else {
- DRM_DEBUG_DRIVER("DSC is required but can't find common dsc config.");
+ DRM_DEBUG_DRIVER("MST_DSC require dsc but can't find common dsc config\n");
return DC_FAIL_BANDWIDTH_VALIDATE;
}
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index a573a6639898..25f63b2e7a8e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -1283,6 +1283,7 @@ int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc
struct dc_cursor_position *position)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct amdgpu_device *adev = drm_to_adev(plane->dev);
int x, y;
int xorigin = 0, yorigin = 0;
@@ -1314,12 +1315,14 @@ int amdgpu_dm_plane_get_cursor_position(struct drm_plane *plane, struct drm_crtc
y = 0;
}
position->enable = true;
- position->translate_by_source = true;
position->x = x;
position->y = y;
position->x_hotspot = xorigin;
position->y_hotspot = yorigin;
+ if (amdgpu_ip_version(adev, DCE_HWIP, 0) < IP_VERSION(4, 0, 1))
+ position->translate_by_source = true;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 4254bdfefe38..7d18f372ce7a 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -227,7 +227,7 @@ static void init_transmitter_control(struct bios_parser *bp)
uint8_t frev;
uint8_t crev = 0;
- if (!BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev))
+ if (!BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) && (bp->base.ctx->dc->ctx->dce_version <= DCN_VERSION_2_0))
BREAK_TO_DEBUGGER();
switch (crev) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
index 0ce9b40dfc68..97164b5585a8 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c
@@ -305,9 +305,6 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
if (new_clocks->dtbclk_en && !new_clocks->ref_dtbclk_khz)
new_clocks->ref_dtbclk_khz = 600000;
- if (dc->debug.min_disp_clk_khz > 0 && new_clocks->dispclk_khz < dc->debug.min_disp_clk_khz)
- new_clocks->dispclk_khz = dc->debug.min_disp_clk_khz;
-
/*
* if it is safe to lower, but we are already in the lower state, we don't have to do anything
* also if safe to lower is false, we just go in the higher state
@@ -385,6 +382,9 @@ void dcn35_update_clocks(struct clk_mgr *clk_mgr_base,
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) {
dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true);
+ if (dc->debug.min_disp_clk_khz > 0 && new_clocks->dispclk_khz < dc->debug.min_disp_clk_khz)
+ new_clocks->dispclk_khz = dc->debug.min_disp_clk_khz;
+
clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz;
dcn35_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz);
dcn35_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false);
@@ -1100,7 +1100,7 @@ void dcn35_clk_mgr_construct(
clk_mgr->smu_wm_set.wm_set = (struct dcn35_watermarks *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
- DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ DC_MEM_ALLOC_TYPE_GART,
sizeof(struct dcn35_watermarks),
&clk_mgr->smu_wm_set.mc_address.quad_part);
@@ -1112,7 +1112,7 @@ void dcn35_clk_mgr_construct(
smu_dpm_clks.dpm_clks = (DpmClocks_t_dcn35 *)dm_helpers_allocate_gpu_mem(
clk_mgr->base.base.ctx,
- DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ DC_MEM_ALLOC_TYPE_GART,
sizeof(DpmClocks_t_dcn35),
&smu_dpm_clks.mc_address.quad_part);
@@ -1209,7 +1209,7 @@ void dcn35_clk_mgr_construct(
}
if (smu_dpm_clks.dpm_clks && smu_dpm_clks.mc_address.quad_part != 0)
- dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_FRAME_BUFFER,
+ dm_helpers_free_gpu_mem(clk_mgr->base.base.ctx, DC_MEM_ALLOC_TYPE_GART,
smu_dpm_clks.dpm_clks);
if (ctx->dc->config.disable_ips != DMUB_IPS_DISABLE_ALL) {
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
index 01ea3a31e54d..8cfc5f435937 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn401/dcn401_clk_mgr.c
@@ -1366,9 +1366,6 @@ static void dcn401_update_clocks(struct clk_mgr *clk_mgr_base,
unsigned int num_steps = 0;
- if (dc->work_arounds.skip_clock_update)
- return;
-
if (dc->debug.enable_legacy_clock_update) {
dcn401_update_clocks_legacy(clk_mgr_base, context, safe_to_lower);
return;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index c8dabb081b3d..ae788154896c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -2690,6 +2690,9 @@ static enum surface_update_type check_update_surfaces_for_stream(
stream_update->vrr_active_variable || stream_update->vrr_active_fixed))
su_flags->bits.fams_changed = 1;
+ if (stream_update->scaler_sharpener_update)
+ su_flags->bits.scaler_sharpener = 1;
+
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
@@ -3022,6 +3025,8 @@ static void copy_stream_update_to_stream(struct dc *dc,
update->dsc_config = NULL;
}
}
+ if (update->scaler_sharpener_update)
+ stream->scaler_sharpener_update = *update->scaler_sharpener_update;
}
static void backup_planes_and_stream_state(
@@ -4713,7 +4718,8 @@ static bool full_update_required(struct dc *dc,
stream_update->func_shaper ||
stream_update->lut3d_func ||
stream_update->pending_test_pattern ||
- stream_update->crtc_timing_adjust))
+ stream_update->crtc_timing_adjust ||
+ stream_update->scaler_sharpener_update))
return true;
if (stream) {
@@ -5161,6 +5167,8 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state)
dc_z10_restore(dc);
+ dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
+
dc->hwss.init_hw(dc);
if (dc->hwss.init_sys_ctx != NULL &&
@@ -5172,6 +5180,8 @@ void dc_set_power_state(struct dc *dc, enum dc_acpi_cm_power_state power_state)
default:
ASSERT(dc->current_state->stream_count == 0);
+ dc_dmub_srv_notify_fw_dc_power_state(dc->ctx->dmub_srv, power_state);
+
dc_state_destruct(dc->current_state);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index ef585a89847b..c7599c40d4be 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -1506,8 +1506,6 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
pipe_ctx->plane_res.scl_data.lb_params.depth = LB_PIXEL_DEPTH_30BPP;
pipe_ctx->plane_res.scl_data.lb_params.alpha_en = plane_state->per_pixel_alpha;
- spl_out->scl_data.h_active = pipe_ctx->plane_res.scl_data.h_active;
- spl_out->scl_data.v_active = pipe_ctx->plane_res.scl_data.v_active;
// Convert pipe_ctx to respective input params for SPL
translate_SPL_in_params_from_pipe_ctx(pipe_ctx, spl_in);
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 5bbc7d2daca6..4c94dd38be4b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -55,7 +55,7 @@ struct aux_payload;
struct set_config_cmd_payload;
struct dmub_notification;
-#define DC_VER "3.2.297"
+#define DC_VER "3.2.299"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -761,7 +761,8 @@ union dpia_debug_options {
uint32_t extend_aux_rd_interval:1; /* bit 2 */
uint32_t disable_mst_dsc_work_around:1; /* bit 3 */
uint32_t enable_force_tbt3_work_around:1; /* bit 4 */
- uint32_t reserved:27;
+ uint32_t disable_usb4_pm_support:1; /* bit 5 */
+ uint32_t reserved:26;
} bits;
uint32_t raw;
};
@@ -1051,6 +1052,7 @@ struct dc_debug_options {
unsigned int disable_spl;
unsigned int force_easf;
unsigned int force_sharpness;
+ unsigned int force_sharpness_level;
unsigned int force_lls;
bool notify_dpia_hr_bw;
bool enable_ips_visual_confirm;
@@ -1347,7 +1349,7 @@ struct dc_plane_state {
enum mpcc_movable_cm_location mcm_location;
struct dc_csc_transform cursor_csc_color_matrix;
bool adaptive_sharpness_en;
- unsigned int sharpnessX1000;
+ int sharpness_level;
enum linear_light_scaling linear_light_scaling;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
index b1265124608b..1e7de0f03290 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.c
@@ -1476,7 +1476,7 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
ips2_exit_count);
}
-void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState)
+void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state)
{
struct dmub_srv *dmub;
@@ -1485,12 +1485,38 @@ void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_c
dmub = dc_dmub_srv->dmub;
- if (powerState == DC_ACPI_CM_POWER_STATE_D0)
+ if (power_state == DC_ACPI_CM_POWER_STATE_D0)
dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D0);
else
dmub_srv_set_power_state(dmub, DMUB_POWER_STATE_D3);
}
+void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv,
+ enum dc_acpi_cm_power_state power_state)
+{
+ union dmub_rb_cmd cmd;
+
+ if (!dc_dmub_srv)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+
+ cmd.idle_opt_set_dc_power_state.header.type = DMUB_CMD__IDLE_OPT;
+ cmd.idle_opt_set_dc_power_state.header.sub_type = DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE;
+ cmd.idle_opt_set_dc_power_state.header.payload_bytes =
+ sizeof(cmd.idle_opt_set_dc_power_state) - sizeof(cmd.idle_opt_set_dc_power_state.header);
+
+ if (power_state == DC_ACPI_CM_POWER_STATE_D0) {
+ cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D0;
+ } else if (power_state == DC_ACPI_CM_POWER_STATE_D3) {
+ cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_D3;
+ } else {
+ cmd.idle_opt_set_dc_power_state.data.power_state = DMUB_IDLE_OPT_DC_POWER_STATE_UNKNOWN;
+ }
+
+ dc_wake_and_execute_dmub_cmd(dc_dmub_srv->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+}
+
bool dc_dmub_srv_should_detect(struct dc_dmub_srv *dc_dmub_srv)
{
volatile const struct dmub_shared_state_ips_fw *ips_fw;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
index 580940222777..42f0cb672d8b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dmub_srv.h
@@ -109,7 +109,29 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait);
void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle);
-void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state powerState);
+/**
+ * dc_dmub_srv_set_power_state() - Sets the power state for DMUB service.
+ *
+ * Controls whether messaging the DMCUB or interfacing with it via HW register
+ * interaction is permittable.
+ *
+ * @dc_dmub_srv - The DC DMUB service pointer
+ * @power_state - the DC power state
+ */
+void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state);
+
+/**
+ * dc_dmub_srv_notify_fw_dc_power_state() - Notifies firmware of the DC power state.
+ *
+ * Differs from dc_dmub_srv_set_power_state in that it needs to access HW in order
+ * to message DMCUB of the state transition. Should come after the D0 exit and
+ * before D3 set power state.
+ *
+ * @dc_dmub_srv - The DC DMUB service pointer
+ * @power_state - the DC power state
+ */
+void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv,
+ enum dc_acpi_cm_power_state power_state);
/**
* @dc_dmub_srv_should_detect() - Checks if link detection is required.
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
index 8f85a1db5eba..cd6de93eb91c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.c
@@ -42,26 +42,26 @@ static void populate_spltaps_from_taps(struct spl_taps *spl_scaling_quality,
static void populate_taps_from_spltaps(struct scaling_taps *scaling_quality,
const struct spl_taps *spl_scaling_quality)
{
- scaling_quality->h_taps_c = spl_scaling_quality->h_taps_c;
- scaling_quality->h_taps = spl_scaling_quality->h_taps;
- scaling_quality->v_taps_c = spl_scaling_quality->v_taps_c;
- scaling_quality->v_taps = spl_scaling_quality->v_taps;
+ scaling_quality->h_taps_c = spl_scaling_quality->h_taps_c + 1;
+ scaling_quality->h_taps = spl_scaling_quality->h_taps + 1;
+ scaling_quality->v_taps_c = spl_scaling_quality->v_taps_c + 1;
+ scaling_quality->v_taps = spl_scaling_quality->v_taps + 1;
}
static void populate_ratios_from_splratios(struct scaling_ratios *ratios,
- const struct spl_ratios *spl_ratios)
+ const struct ratio *spl_ratios)
{
- ratios->horz = spl_ratios->horz;
- ratios->vert = spl_ratios->vert;
- ratios->horz_c = spl_ratios->horz_c;
- ratios->vert_c = spl_ratios->vert_c;
+ ratios->horz = dc_fixpt_from_ux_dy(spl_ratios->h_scale_ratio >> 5, 3, 19);
+ ratios->vert = dc_fixpt_from_ux_dy(spl_ratios->v_scale_ratio >> 5, 3, 19);
+ ratios->horz_c = dc_fixpt_from_ux_dy(spl_ratios->h_scale_ratio_c >> 5, 3, 19);
+ ratios->vert_c = dc_fixpt_from_ux_dy(spl_ratios->v_scale_ratio_c >> 5, 3, 19);
}
static void populate_inits_from_splinits(struct scl_inits *inits,
- const struct spl_inits *spl_inits)
+ const struct init *spl_inits)
{
- inits->h = spl_inits->h;
- inits->v = spl_inits->v;
- inits->h_c = spl_inits->h_c;
- inits->v_c = spl_inits->v_c;
+ inits->h = dc_fixpt_from_int_dy(spl_inits->h_filter_init_int, spl_inits->h_filter_init_frac >> 5, 0, 19);
+ inits->v = dc_fixpt_from_int_dy(spl_inits->v_filter_init_int, spl_inits->v_filter_init_frac >> 5, 0, 19);
+ inits->h_c = dc_fixpt_from_int_dy(spl_inits->h_filter_init_int_c, spl_inits->h_filter_init_frac_c >> 5, 0, 19);
+ inits->v_c = dc_fixpt_from_int_dy(spl_inits->v_filter_init_int_c, spl_inits->v_filter_init_frac_c >> 5, 0, 19);
}
/// @brief Translate SPL input parameters from pipe context
/// @param pipe_ctx
@@ -139,24 +139,36 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
else if (pipe_ctx->stream->ctx->dc->debug.force_easf == 2)
spl_in->disable_easf = true;
/* Translate adaptive sharpening preference */
- if (pipe_ctx->stream->ctx->dc->debug.force_sharpness > 0) {
- spl_in->adaptive_sharpness.enable = (pipe_ctx->stream->ctx->dc->debug.force_sharpness > 1) ? true : false;
- if (pipe_ctx->stream->ctx->dc->debug.force_sharpness == 2)
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_LOW;
- else if (pipe_ctx->stream->ctx->dc->debug.force_sharpness == 3)
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_MID;
- else if (pipe_ctx->stream->ctx->dc->debug.force_sharpness >= 4)
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_HIGH;
- } else {
- spl_in->adaptive_sharpness.enable = plane_state->adaptive_sharpness_en;
- if (plane_state->sharpnessX1000 == 0)
+ unsigned int sharpness_setting = pipe_ctx->stream->ctx->dc->debug.force_sharpness;
+ unsigned int force_sharpness_level = pipe_ctx->stream->ctx->dc->debug.force_sharpness_level;
+ if (sharpness_setting == SHARPNESS_HW_OFF)
+ spl_in->adaptive_sharpness.enable = false;
+ else if (sharpness_setting == SHARPNESS_ZERO) {
+ spl_in->adaptive_sharpness.enable = true;
+ spl_in->adaptive_sharpness.sharpness_level = 0;
+ } else if (sharpness_setting == SHARPNESS_CUSTOM) {
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_min = 0;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_max = 1750;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_rgb_mid = 750;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_min = 0;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_max = 3500;
+ spl_in->adaptive_sharpness.sharpness_range.sdr_yuv_mid = 1500;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_min = 0;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_max = 2750;
+ spl_in->adaptive_sharpness.sharpness_range.hdr_rgb_mid = 1500;
+
+ if (force_sharpness_level > 0) {
+ if (force_sharpness_level > 10)
+ force_sharpness_level = 10;
+ spl_in->adaptive_sharpness.enable = true;
+ spl_in->adaptive_sharpness.sharpness_level = force_sharpness_level;
+ } else if (!plane_state->adaptive_sharpness_en) {
spl_in->adaptive_sharpness.enable = false;
- else if (plane_state->sharpnessX1000 < 999)
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_LOW;
- else if (plane_state->sharpnessX1000 < 1999)
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_MID;
- else // Any other value is high sharpness
- spl_in->adaptive_sharpness.sharpness = SHARPNESS_HIGH;
+ spl_in->adaptive_sharpness.sharpness_level = 0;
+ } else {
+ spl_in->adaptive_sharpness.enable = true;
+ spl_in->adaptive_sharpness.sharpness_level = plane_state->sharpness_level;
+ }
}
// Translate linear light scaling preference
if (pipe_ctx->stream->ctx->dc->debug.force_lls > 0)
@@ -171,6 +183,22 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
/* Translate transfer function */
spl_in->basic_in.tf_type = (enum spl_transfer_func_type) plane_state->in_transfer_func.type;
spl_in->basic_in.tf_predefined_type = (enum spl_transfer_func_predefined) plane_state->in_transfer_func.tf;
+
+ spl_in->h_active = pipe_ctx->plane_res.scl_data.h_active;
+ spl_in->v_active = pipe_ctx->plane_res.scl_data.v_active;
+ /* Check if it is stream is in fullscreen and if its HDR.
+ * Use this to determine sharpness levels
+ */
+ spl_in->is_fullscreen = dm_helpers_is_fullscreen(pipe_ctx->stream->ctx, pipe_ctx->stream);
+ spl_in->is_hdr_on = dm_helpers_is_hdr_on(pipe_ctx->stream->ctx, pipe_ctx->stream);
+ spl_in->hdr_multx100 = 0;
+ if (spl_in->is_hdr_on) {
+ spl_in->hdr_multx100 = (uint32_t)dc_fixpt_floor(dc_fixpt_mul(plane_state->hdr_mult,
+ dc_fixpt_from_int(100)));
+ /* Disable sharpness for HDR Mult > 6.0 */
+ if (spl_in->hdr_multx100 > 600)
+ spl_in->adaptive_sharpness.enable = false;
+ }
}
/// @brief Translate SPL output parameters to pipe context
@@ -179,15 +207,15 @@ void translate_SPL_in_params_from_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl
void translate_SPL_out_params_to_pipe_ctx(struct pipe_ctx *pipe_ctx, struct spl_out *spl_out)
{
// Make scaler data recout point to spl output field recout
- populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.recout, &spl_out->scl_data.recout);
+ populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.recout, &spl_out->dscl_prog_data->recout);
// Make scaler data ratios point to spl output field ratios
- populate_ratios_from_splratios(&pipe_ctx->plane_res.scl_data.ratios, &spl_out->scl_data.ratios);
+ populate_ratios_from_splratios(&pipe_ctx->plane_res.scl_data.ratios, &spl_out->dscl_prog_data->ratios);
// Make scaler data viewport point to spl output field viewport
- populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport, &spl_out->scl_data.viewport);
+ populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport, &spl_out->dscl_prog_data->viewport);
// Make scaler data viewport_c point to spl output field viewport_c
- populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport_c, &spl_out->scl_data.viewport_c);
+ populate_rect_from_splrect(&pipe_ctx->plane_res.scl_data.viewport_c, &spl_out->dscl_prog_data->viewport_c);
// Make scaler data taps point to spl output field scaling taps
- populate_taps_from_spltaps(&pipe_ctx->plane_res.scl_data.taps, &spl_out->scl_data.taps);
+ populate_taps_from_spltaps(&pipe_ctx->plane_res.scl_data.taps, &spl_out->dscl_prog_data->taps);
// Make scaler data init point to spl output field init
- populate_inits_from_splinits(&pipe_ctx->plane_res.scl_data.inits, &spl_out->scl_data.inits);
+ populate_inits_from_splinits(&pipe_ctx->plane_res.scl_data.inits, &spl_out->dscl_prog_data->init);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.h b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.h
index c73d640c3632..eaa5c5373b28 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_spl_translate.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_spl_translate.h
@@ -6,6 +6,7 @@
#define __DC_SPL_TRANSLATE_H__
#include "dc.h"
#include "resource.h"
+#include "dm_helpers.h"
/* Map SPL input parameters to pipe context
* @pipe_ctx: pipe context
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index de9bd72ca514..14ea47eda0c8 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -142,6 +142,7 @@ union stream_update_flags {
uint32_t mst_bw : 1;
uint32_t crtc_timing_adjust : 1;
uint32_t fams_changed : 1;
+ uint32_t scaler_sharpener : 1;
} bits;
uint32_t raw;
@@ -308,6 +309,7 @@ struct dc_stream_state {
bool is_phantom;
struct luminance_data lumin_data;
+ bool scaler_sharpener_update;
};
#define ABM_LEVEL_IMMEDIATE_DISABLE 255
@@ -353,6 +355,7 @@ struct dc_stream_update {
struct dc_cursor_attributes *cursor_attributes;
struct dc_cursor_position *cursor_position;
bool *hw_cursor_req;
+ bool *scaler_sharpener_update;
};
bool dc_is_stream_unchanged(
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index 97279b080f3e..fd6dca735714 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -1050,6 +1050,23 @@ union replay_error_status {
unsigned char raw;
};
+union replay_low_refresh_rate_enable_options {
+ struct {
+ //BIT[0-3]: Replay Low Hz Support control
+ unsigned int ENABLE_LOW_RR_SUPPORT :1;
+ unsigned int RESERVED_1_3 :3;
+ //BIT[4-15]: Replay Low Hz Enable Scenarios
+ unsigned int ENABLE_STATIC_SCREEN :1;
+ unsigned int ENABLE_FULL_SCREEN_VIDEO :1;
+ unsigned int ENABLE_GENERAL_UI :1;
+ unsigned int RESERVED_7_15 :9;
+ //BIT[16-31]: Replay Low Hz Enable Check
+ unsigned int ENABLE_STATIC_FLICKER_CHECK :1;
+ unsigned int RESERVED_17_31 :15;
+ } bits;
+ unsigned int raw;
+};
+
struct replay_config {
/* Replay feature is supported */
bool replay_supported;
@@ -1073,6 +1090,8 @@ struct replay_config {
bool replay_support_fast_resync_in_ultra_sleep_mode;
/* Replay error status */
union replay_error_status replay_error_status;
+ /* Replay Low Hz enable Options */
+ union replay_low_refresh_rate_enable_options low_rr_enable_options;
};
/* Replay feature flags*/
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
index 6ac2bd86c4db..160c299419b7 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
@@ -328,6 +328,17 @@
type DPSTREAMCLK1_GATE_DISABLE;\
type DPSTREAMCLK2_GATE_DISABLE;\
type DPSTREAMCLK3_GATE_DISABLE;\
+ type SYMCLKA_FE_GATE_DISABLE;\
+ type SYMCLKB_FE_GATE_DISABLE;\
+ type SYMCLKC_FE_GATE_DISABLE;\
+ type SYMCLKD_FE_GATE_DISABLE;\
+ type SYMCLKE_FE_GATE_DISABLE;\
+ type SYMCLKA_GATE_DISABLE;\
+ type SYMCLKB_GATE_DISABLE;\
+ type SYMCLKC_GATE_DISABLE;\
+ type SYMCLKD_GATE_DISABLE;\
+ type SYMCLKE_GATE_DISABLE;\
+
#define DCCG401_REG_FIELD_LIST(type) \
type OTG0_TMDS_PIXEL_RATE_DIV;\
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
index 7f91e48902e2..ee02b78e290f 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn35/dcn35_dccg.c
@@ -24,6 +24,7 @@
#include "reg_helper.h"
#include "core_types.h"
+#include "resource.h"
#include "dcn35_dccg.h"
#define TO_DCN_DCCG(dccg)\
@@ -136,7 +137,7 @@ static void dccg35_set_dsc_clk_rcg(struct dccg *dccg, int inst, bool enable)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dsc && enable)
return;
switch (inst) {
@@ -165,7 +166,7 @@ static void dccg35_set_symclk32_se_rcg(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se && enable)
return;
/* SYMCLK32_ROOT_SE#_GATE_DISABLE will clock gate in DCCG */
@@ -204,7 +205,7 @@ static void dccg35_set_symclk32_le_rcg(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le && enable)
return;
switch (inst) {
@@ -231,7 +232,7 @@ static void dccg35_set_physymclk_rcg(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk && enable)
return;
switch (inst) {
@@ -262,35 +263,45 @@ static void dccg35_set_physymclk_rcg(
}
static void dccg35_set_symclk_fe_rcg(
- struct dccg *dccg,
- int inst,
- bool enable)
+ struct dccg *dccg,
+ int inst,
+ bool enable)
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk_fe && enable)
return;
switch (inst) {
case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKA_FE_GATE_DISABLE, enable ? 0 : 1);
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
- SYMCLKA_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ SYMCLKA_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKB_FE_GATE_DISABLE, enable ? 0 : 1);
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
- SYMCLKB_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ SYMCLKB_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKC_FE_GATE_DISABLE, enable ? 0 : 1);
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
- SYMCLKC_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ SYMCLKC_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKD_FE_GATE_DISABLE, enable ? 0 : 1);
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
- SYMCLKD_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ SYMCLKD_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 4:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKE_FE_GATE_DISABLE, enable ? 0 : 1);
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
- SYMCLKE_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
+ SYMCLKE_FE_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
default:
BREAK_TO_DEBUGGER();
@@ -307,27 +318,37 @@ static void dccg35_set_symclk_be_rcg(
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
/* TBD add symclk_be in rcg control bits */
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk_fe && enable)
return;
switch (inst) {
case 0:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKA_GATE_DISABLE, enable ? 0 : 1);
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
SYMCLKA_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 1:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKB_GATE_DISABLE, enable ? 0 : 1);
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
SYMCLKB_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 2:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKC_GATE_DISABLE, enable ? 0 : 1);
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
SYMCLKC_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 3:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKD_GATE_DISABLE, enable ? 0 : 1);
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
SYMCLKD_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
case 4:
+ REG_UPDATE(DCCG_GATE_DISABLE_CNTL2,
+ SYMCLKE_GATE_DISABLE, enable ? 0 : 1);
REG_UPDATE(DCCG_GATE_DISABLE_CNTL5,
SYMCLKE_ROOT_GATE_DISABLE, enable ? 0 : 1);
break;
@@ -342,7 +363,7 @@ static void dccg35_set_dtbclk_p_rcg(struct dccg *dccg, int inst, bool enable)
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable)
return;
switch (inst) {
@@ -370,7 +391,7 @@ static void dccg35_set_dppclk_rcg(struct dccg *dccg,
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpp && enable)
return;
switch (inst) {
@@ -399,7 +420,7 @@ static void dccg35_set_dpstreamclk_rcg(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream && enable)
return;
switch (inst) {
@@ -436,7 +457,7 @@ static void dccg35_set_smclk32_se_rcg(
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
- if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se)
+ if (!dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se && enable)
return;
switch (inst) {
@@ -1082,7 +1103,8 @@ static void dccg35_trigger_dio_fifo_resync(struct dccg *dccg)
uint32_t dispclk_rdivider_value = 0;
REG_GET(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_RDIVIDER, &dispclk_rdivider_value);
- REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
+ if (dispclk_rdivider_value != 0)
+ REG_UPDATE(DENTIST_DISPCLK_CNTL, DENTIST_DISPCLK_WDIVIDER, dispclk_rdivider_value);
}
static void dcn35_set_dppclk_enable(struct dccg *dccg,
@@ -1692,6 +1714,12 @@ static void dccg35_disable_symclk32_se(
}
}
+static void dccg35_init_cb(struct dccg *dccg)
+{
+ (void)dccg;
+ /* Any RCG should be done when driver enter low power mode*/
+}
+
void dccg35_init(struct dccg *dccg)
{
int otg_inst;
@@ -2042,8 +2070,6 @@ static void dccg35_set_dpstreamclk_cb(
enum dtbclk_source dtb_clk_src;
enum dp_stream_clk_source dp_stream_clk_src;
- ASSERT(otg_inst >= DP_STREAM_DTBCLK_P5);
-
switch (src) {
case REFCLK:
dtb_clk_src = DTBCLK_REFCLK;
@@ -2098,6 +2124,13 @@ static void dccg35_update_dpp_dto_cb(struct dccg *dccg, int dpp_inst,
{
struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg);
+ if (dccg->dpp_clock_gated[dpp_inst]) {
+ /*
+ * Do not update the DPPCLK DTO if the clock is stopped.
+ */
+ return;
+ }
+
if (dccg->ref_dppclk && req_dppclk) {
int ref_dppclk = dccg->ref_dppclk;
int modulo, phase;
@@ -2125,19 +2158,20 @@ static void dccg35_update_dpp_dto_cb(struct dccg *dccg, int dpp_inst,
}
static void dccg35_dpp_root_clock_control_cb(
- struct dccg *dccg,
- unsigned int dpp_inst,
- bool power_on)
+ struct dccg *dccg,
+ unsigned int dpp_inst,
+ bool power_on)
{
+ if (dccg->dpp_clock_gated[dpp_inst] == power_on)
+ return;
/* power_on set indicates we need to ungate
* Currently called from optimize_bandwidth and prepare_bandwidth calls
* Since clock source is not passed restore to refclock on ungate
* Redundant as gating when enabled is acheived through update_dpp_dto
*/
- if (power_on)
- dccg35_enable_dpp_clk_new(dccg, dpp_inst, DPP_REFCLK);
- else
- dccg35_disable_dpp_clk_new(dccg, dpp_inst);
+ dccg35_set_dppclk_rcg(dccg, dpp_inst, !power_on);
+
+ dccg->dpp_clock_gated[dpp_inst] = !power_on;
}
static void dccg35_enable_symclk32_se_cb(
@@ -2321,7 +2355,7 @@ static const struct dccg_funcs dccg35_funcs_new = {
.update_dpp_dto = dccg35_update_dpp_dto_cb,
.dpp_root_clock_control = dccg35_dpp_root_clock_control_cb,
.get_dccg_ref_freq = dccg31_get_dccg_ref_freq,
- .dccg_init = dccg35_init,
+ .dccg_init = dccg35_init_cb,
.set_dpstreamclk = dccg35_set_dpstreamclk_cb,
.set_dpstreamclk_root_clock_gating = dccg35_set_dpstreamclk_root_clock_gating_cb,
.enable_symclk32_se = dccg35_enable_symclk32_se_cb,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index 14f935961672..c31e4f26a305 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -12,6 +12,8 @@
#define MAX_PIPES 6
+#define GPINT_RETRY_NUM 20
+
static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3};
static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5};
@@ -222,6 +224,7 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
uint32_t *residency, const bool is_start, enum pr_residency_mode mode)
{
uint16_t param = (uint16_t)(panel_inst << 8);
+ uint32_t i = 0;
switch (mode) {
case PR_RESIDENCY_MODE_PHY:
@@ -249,10 +252,17 @@ static void dmub_replay_residency(struct dmub_replay *dmub, uint8_t panel_inst,
if (is_start)
param |= REPLAY_RESIDENCY_ENABLE;
- // Send gpint command and wait for ack
- if (!dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__REPLAY_RESIDENCY, param,
- residency, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
- *residency = 0;
+ for (i = 0; i < GPINT_RETRY_NUM; i++) {
+ // Send gpint command and wait for ack
+ if (dc_wake_and_execute_gpint(dmub->ctx, DMUB_GPINT__REPLAY_RESIDENCY, param,
+ residency, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY))
+ return;
+
+ udelay(100);
+ }
+
+ // it means gpint retry many times
+ *residency = 0;
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
index 710a25dcfef0..b0d9aed0f265 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/dml21_translation_helper.c
@@ -777,6 +777,14 @@ static void populate_dml21_plane_config_from_plane_state(struct dml2_context *dm
* certain cases. Hence do corrective active and disable scaling.
*/
plane->composition.scaler_info.enabled = false;
+ } else if ((plane_state->ctx->dc->config.use_spl == true) &&
+ (plane->composition.scaler_info.enabled == false)) {
+ /* To enable sharpener for 1:1, scaler must be enabled. If use_spl is set, then
+ * allow case where ratio is 1 but taps > 1
+ */
+ if ((scaler_data->taps.h_taps > 1) || (scaler_data->taps.v_taps > 1) ||
+ (scaler_data->taps.h_taps_c > 1) || (scaler_data->taps.v_taps_c > 1))
+ plane->composition.scaler_info.enabled = true;
}
/* always_scale is only used for debug purposes not used in production but has to be
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
index 1c773bbb9992..eeb96c455658 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/inc/dml_top_types.h
@@ -5,7 +5,6 @@
#ifndef __DML_TOP_TYPES_H__
#define __DML_TOP_TYPES_H__
-#include "dml_top_types.h"
#include "dml_top_display_cfg_types.h"
#include "dml_top_soc_parameter_types.h"
#include "dml_top_policy_types.h"
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
index 805fd783131f..3ea54fd52e46 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_dcn4_calcs.c
@@ -2085,7 +2085,11 @@ static void CalculateDCCConfiguration(
unsigned int full_swath_bytes_vert_wc_l;
unsigned int full_swath_bytes_vert_wc_c;
- yuv420 = dml_is_420(SourcePixelFormat);
+ if (dml_is_420(SourcePixelFormat))
+ yuv420 = 1;
+ else
+ yuv420 = 0;
+
horz_div_l = 1;
horz_div_c = 1;
vert_div_l = 1;
@@ -2553,8 +2557,11 @@ static void calculate_mcache_setting(
l->luma_time_factor = (double)l->mvmpg_width_c / l->mvmpg_width_l * 2;
// The algorithm starts with computing a non-integer, avg_mcache_element_size_l/c:
- l->avg_mcache_element_size_l = l->meta_row_width_l / *p->num_mcaches_l;
- if (l->is_dual_plane) {
+ if (*p->num_mcaches_l) {
+ l->avg_mcache_element_size_l = l->meta_row_width_l / *p->num_mcaches_l;
+ }
+
+ if (l->is_dual_plane && *p->num_mcaches_c) {
l->avg_mcache_element_size_c = l->meta_row_width_c / *p->num_mcaches_c;
if (!p->imall_enable || (*p->mall_comb_mcache_l == *p->mall_comb_mcache_c)) {
@@ -2683,9 +2690,9 @@ static double dml_get_return_bandwidth_available(
double ideal_fabric_bandwidth = fclk_mhz * (double)soc->fabric_datapath_to_dcn_data_return_bytes;
double ideal_dram_bandwidth = dram_bw_mbps; //dram_speed_mts * soc->clk_table.dram_config.channel_count * soc->clk_table.dram_config.channel_width_bytes;
- double derate_sdp_factor = 1;
- double derate_fabric_factor = 1;
- double derate_dram_factor = 1;
+ double derate_sdp_factor;
+ double derate_fabric_factor;
+ double derate_dram_factor;
double derate_sdp_bandwidth;
double derate_fabric_bandwidth;
@@ -5056,6 +5063,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->trip_to_mem = 0.0;
*p->Tvm_trips = 0.0;
*p->Tr0_trips = 0.0;
+ s->Tvm_no_trip_oto = 0.0;
+ s->Tr0_no_trip_oto = 0.0;
s->Tvm_trips_rounded = 0.0;
s->Tr0_trips_rounded = 0.0;
s->max_Tsw = 0.0;
@@ -5293,31 +5302,38 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
s->Lsw_oto = math_ceil2(4.0 * math_max2(s->prefetch_sw_bytes / s->prefetch_bw_oto / s->LineTime, s->min_Lsw_oto), 1.0) / 4.0;
if (p->display_cfg->gpuvm_enable == true) {
- s->Tvm_oto = math_max3(
- *p->Tvm_trips,
+ s->Tvm_no_trip_oto = math_max2(
*p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto,
s->LineTime / 4.0);
+ s->Tvm_oto = math_max2(
+ *p->Tvm_trips,
+ s->Tvm_no_trip_oto);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Tvm_oto max0 = %f\n", __func__, *p->Tvm_trips);
dml2_printf("DML::%s: Tvm_oto max1 = %f\n", __func__, *p->Tno_bw + vm_bytes * p->HostVMInefficiencyFactor / s->prefetch_bw_oto);
dml2_printf("DML::%s: Tvm_oto max2 = %f\n", __func__, s->LineTime / 4.0);
#endif
} else {
+ s->Tvm_no_trip_oto = s->Tvm_trips_rounded;
s->Tvm_oto = s->Tvm_trips_rounded;
}
if ((p->display_cfg->gpuvm_enable == true || p->setup_for_tdlut || dcc_mrq_enable)) {
- s->Tr0_oto = math_max3(
- *p->Tr0_trips,
+ s->Tr0_no_trip_oto = math_max2(
(p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto,
s->LineTime / 4.0);
+ s->Tr0_oto = math_max2(
+ *p->Tr0_trips,
+ s->Tr0_no_trip_oto);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Tr0_oto max0 = %f\n", __func__, *p->Tr0_trips);
dml2_printf("DML::%s: Tr0_oto max1 = %f\n", __func__, (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + p->meta_row_bytes + tdlut_row_bytes) / s->prefetch_bw_oto);
dml2_printf("DML::%s: Tr0_oto max2 = %f\n", __func__, s->LineTime / 4);
#endif
- } else
- s->Tr0_oto = (s->LineTime - s->Tvm_oto) / 4.0;
+ } else {
+ s->Tr0_no_trip_oto = (s->LineTime - s->Tvm_oto) / 4.0;
+ s->Tr0_oto = s->Tr0_no_trip_oto;
+ }
s->Tvm_oto_lines = math_ceil2(4.0 * s->Tvm_oto / s->LineTime, 1) / 4.0;
s->Tr0_oto_lines = math_ceil2(4.0 * s->Tr0_oto / s->LineTime, 1) / 4.0;
@@ -5595,6 +5611,9 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
dml2_printf("DML::%s: Tvm_equ = %f\n", __func__, s->Tvm_equ);
dml2_printf("DML::%s: Tr0_equ = %f\n", __func__, s->Tr0_equ);
#endif
+ // Lsw = dst_y_prefetch - (dst_y_per_vm_vblank + 2*dst_y_per_row_vblank)
+ s->Lsw_equ = s->dst_y_prefetch_equ - math_ceil2(4.0 * (s->Tvm_equ + 2 * s->Tr0_equ) / s->LineTime, 1.0) / 4.0;
+
// Use the more stressful prefetch schedule
if (s->dst_y_prefetch_oto < s->dst_y_prefetch_equ) {
*p->dst_y_prefetch = s->dst_y_prefetch_oto;
@@ -5603,25 +5622,28 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
*p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
*p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
+ s->dst_y_per_vm_no_trip_vblank = math_ceil2(4.0 * s->Tvm_no_trip_oto / s->LineTime, 1.0) / 4.0;
+ s->dst_y_per_row_no_trip_vblank = math_ceil2(4.0 * s->Tr0_no_trip_oto / s->LineTime, 1.0) / 4.0;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Using oto scheduling for prefetch\n", __func__);
#endif
-
} else {
*p->dst_y_prefetch = s->dst_y_prefetch_equ;
s->TimeForFetchingVM = s->Tvm_equ;
s->TimeForFetchingRowInVBlank = s->Tr0_equ;
- *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
- *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
+ *p->dst_y_per_vm_vblank = math_ceil2(4.0 * s->TimeForFetchingVM / s->LineTime, 1.0) / 4.0;
+ *p->dst_y_per_row_vblank = math_ceil2(4.0 * s->TimeForFetchingRowInVBlank / s->LineTime, 1.0) / 4.0;
+ s->dst_y_per_vm_no_trip_vblank = *p->dst_y_per_vm_vblank;
+ s->dst_y_per_row_no_trip_vblank = *p->dst_y_per_row_vblank;
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: Using equ bw scheduling for prefetch\n", __func__);
#endif
}
- // Lsw = dst_y_prefetch - (dst_y_per_vm_vblank + 2*dst_y_per_row_vblank)
- s->LinesToRequestPrefetchPixelData = *p->dst_y_prefetch - *p->dst_y_per_vm_vblank - 2 * *p->dst_y_per_row_vblank; // Lsw
+ /* take worst case Lsw to calculate bandwidth requirement regardless of schedule */
+ s->LinesToRequestPrefetchPixelData = math_min2(s->Lsw_equ, s->Lsw_oto); // Lsw
s->cursor_prefetch_bytes = (unsigned int)math_max2(p->cursor_bytes_per_chunk, 4 * p->cursor_bytes_per_line);
*p->prefetch_cursor_bw = p->num_cursors * s->cursor_prefetch_bytes / (s->LinesToRequestPrefetchPixelData * s->LineTime);
@@ -5741,13 +5763,13 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (vm_bytes == 0) {
prefetch_vm_bw = 0;
- } else if (*p->dst_y_per_vm_vblank > 0) {
+ } else if (s->dst_y_per_vm_no_trip_vblank > 0) {
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: HostVMInefficiencyFactor = %f\n", __func__, p->HostVMInefficiencyFactor);
dml2_printf("DML::%s: dst_y_per_vm_vblank = %f\n", __func__, *p->dst_y_per_vm_vblank);
dml2_printf("DML::%s: LineTime = %f\n", __func__, s->LineTime);
#endif
- prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (*p->dst_y_per_vm_vblank * s->LineTime);
+ prefetch_vm_bw = vm_bytes * p->HostVMInefficiencyFactor / (s->dst_y_per_vm_no_trip_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: prefetch_vm_bw = %f\n", __func__, prefetch_vm_bw);
#endif
@@ -5759,8 +5781,8 @@ static bool CalculatePrefetchSchedule(struct dml2_core_internal_scratch *scratch
if (p->PixelPTEBytesPerRow == 0 && tdlut_row_bytes == 0) {
prefetch_row_bw = 0;
- } else if (*p->dst_y_per_row_vblank > 0) {
- prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (*p->dst_y_per_row_vblank * s->LineTime);
+ } else if (s->dst_y_per_row_no_trip_vblank > 0) {
+ prefetch_row_bw = (p->PixelPTEBytesPerRow * p->HostVMInefficiencyFactor + tdlut_row_bytes) / (s->dst_y_per_row_no_trip_vblank * s->LineTime);
#ifdef __DML_VBA_DEBUG__
dml2_printf("DML::%s: PixelPTEBytesPerRow = %u\n", __func__, p->PixelPTEBytesPerRow);
@@ -7194,7 +7216,7 @@ static bool dml_core_mode_support(struct dml2_core_calcs_mode_support_ex *in_out
mode_lib->ms.support.WritebackLatencySupport = true;
for (k = 0; k <= mode_lib->ms.num_active_planes - 1; k++) {
if (display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].writeback.enable == true &&
- (mode_lib->ms.WriteBandwidth[k] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024 / mode_lib->soc.qos_parameters.writeback.base_latency_us)) {
+ (mode_lib->ms.WriteBandwidth[k] > mode_lib->ip.writeback_interface_buffer_size_kbytes * 1024 / ((double)mode_lib->soc.qos_parameters.writeback.base_latency_us))) {
mode_lib->ms.support.WritebackLatencySupport = false;
}
}
@@ -10739,7 +10761,7 @@ static bool dml_core_mode_programming(struct dml2_core_calcs_mode_programming_ex
mode_lib->mp.UrgentLatency,
mode_lib->mp.TripToMemory,
!dml_is_phantom_pipe(&display_cfg->plane_descriptors[k]) && display_cfg->stream_descriptors[display_cfg->plane_descriptors[k].stream_index].timing.drr_config.enabled ?
- get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->ms.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0);
+ get_g6_temp_read_blackout_us(&mode_lib->soc, (unsigned int)(mode_lib->mp.uclk_freq_mhz * 1000), in_out_params->min_clk_index) : 0.0);
myPipe->Dppclk = mode_lib->mp.Dppclk[k];
myPipe->Dispclk = mode_lib->mp.Dispclk;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
index 13961c2eb634..cbdfbd5a0bde 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_core/dml2_core_shared_types.h
@@ -1187,11 +1187,15 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_locals {
double prefetch_bw_oto;
double Tvm_oto;
double Tr0_oto;
+ double Tvm_no_trip_oto;
+ double Tr0_no_trip_oto;
double Tvm_oto_lines;
double Tr0_oto_lines;
double dst_y_prefetch_oto;
double TimeForFetchingVM;
double TimeForFetchingRowInVBlank;
+ double dst_y_per_vm_no_trip_vblank;
+ double dst_y_per_row_no_trip_vblank;
double LinesToRequestPrefetchPixelData;
unsigned int HostVMDynamicLevelsTrips;
double trip_to_mem;
@@ -1199,6 +1203,7 @@ struct dml2_core_calcs_CalculatePrefetchSchedule_locals {
double Tr0_trips_rounded;
double max_Tsw;
double Lsw_oto;
+ double Lsw_equ;
double Tpre_rounded;
double prefetch_bw_equ;
double Tvm_equ;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
index 8e68a8094658..a31db5742675 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn3.c
@@ -497,7 +497,6 @@ bool pmo_dcn3_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_out *in
in_out->cfg_support_info->plane_support_info[i].dpps_used)) {
result = false;
} else {
- free_pipes -= planes_on_stream;
break;
}
} else {
@@ -666,7 +665,7 @@ bool pmo_dcn3_optimize_for_pstate_support(struct dml2_pmo_optimize_for_pstate_su
struct dml2_pmo_instance *pmo = in_out->instance;
unsigned int stream_index;
bool success = false;
- bool reached_end = true;
+ bool reached_end;
memcpy(in_out->optimized_display_config, in_out->base_display_config, sizeof(struct display_configuation_with_meta));
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
index 30767f330fd4..d63558ee3135 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_pmo/dml2_pmo_dcn4_fams2.c
@@ -334,7 +334,6 @@ bool pmo_dcn4_fams2_optimize_dcc_mcache(struct dml2_pmo_optimize_dcc_mcache_in_o
in_out->cfg_support_info->plane_support_info[i].dpps_used)) {
result = false;
} else {
- free_pipes -= planes_on_stream;
break;
}
} else {
@@ -672,8 +671,6 @@ bool pmo_dcn4_fams2_initialize(struct dml2_pmo_initialize_in_out *in_out)
/* populate list */
expand_base_strategies(pmo, base_strategy_list_4_display, base_strategy_list_4_display_size, 4);
break;
- default:
- break;
}
}
@@ -944,7 +941,8 @@ static void build_synchronized_timing_groups(
for (j = i + 1; j < display_config->display_config.num_streams; j++) {
if (memcmp(master_timing,
&display_config->display_config.stream_descriptors[j].timing,
- sizeof(struct dml2_timing_cfg)) == 0) {
+ sizeof(struct dml2_timing_cfg)) == 0 &&
+ display_config->display_config.stream_descriptors[i].output.output_encoder == display_config->display_config.stream_descriptors[j].output.output_encoder) {
set_bit_in_bitfield(&pmo->scratch.pmo_dcn4.synchronized_timing_group_masks[timing_group_idx], j);
set_bit_in_bitfield(&stream_mapped_mask, j);
}
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c
index dc8af4dd0410..d0e026d981b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml21/src/dml2_top/dml2_top_optimization.c
@@ -219,7 +219,6 @@ bool dml2_top_optimization_perform_optimization_phase_1(struct dml2_optimization
copy_display_configuration_with_meta(&l->cur_candidate_display_cfg, params->display_config);
highest_state = l->cur_candidate_display_cfg.stage1.min_clk_index_for_latency;
lowest_state = 0;
- cur_state = 0;
while (highest_state > lowest_state) {
cur_state = (highest_state + lowest_state) / 2;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
index 3ba184be25d3..140ec01545db 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_internal_types.h
@@ -101,7 +101,7 @@ struct dml2_wrapper_scratch {
struct dml2_dml_to_dc_pipe_mapping dml_to_dc_pipe_mapping;
bool enable_flexible_pipe_mapping;
bool plane_duplicate_exists;
- unsigned int dp2_mst_stream_count;
+ int hpo_stream_to_link_encoder_mapping[MAX_HPO_DP2_ENCODERS];
};
struct dml2_helper_det_policy_scratch {
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
index 7e39873832bf..bde4250853b1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c
@@ -733,8 +733,7 @@ static void populate_dml_timing_cfg_from_stream_state(struct dml_timing_cfg_st *
}
static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *out, unsigned int location,
- const struct dc_stream_state *in, const struct pipe_ctx *pipe,
- unsigned int dp2_mst_stream_count)
+ const struct dc_stream_state *in, const struct pipe_ctx *pipe, struct dml2_context *dml2)
{
unsigned int output_bpc;
@@ -747,8 +746,8 @@ static void populate_dml_output_cfg_from_stream_state(struct dml_output_cfg_st *
case SIGNAL_TYPE_DISPLAY_PORT_MST:
case SIGNAL_TYPE_DISPLAY_PORT:
out->OutputEncoder[location] = dml_dp;
- if (is_dp2p0_output_encoder(pipe, dp2_mst_stream_count))
- out->OutputEncoder[location] = dml_dp2p0;
+ if (dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location] != -1)
+ out->OutputEncoder[dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[location]] = dml_dp2p0;
break;
case SIGNAL_TYPE_EDP:
out->OutputEncoder[location] = dml_edp;
@@ -1199,36 +1198,6 @@ static void dml2_populate_pipe_to_plane_index_mapping(struct dml2_context *dml2,
}
}
-static unsigned int calculate_dp2_mst_stream_count(struct dc_state *context)
-{
- int i, j;
- unsigned int dp2_mst_stream_count = 0;
-
- for (i = 0; i < context->stream_count; i++) {
- struct dc_stream_state *stream = context->streams[i];
-
- if (!stream || stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST)
- continue;
-
- for (j = 0; j < MAX_PIPES; j++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
-
- if (!pipe_ctx || !pipe_ctx->stream)
- continue;
-
- if (stream != pipe_ctx->stream)
- continue;
-
- if (pipe_ctx->stream_res.hpo_dp_stream_enc && pipe_ctx->link_res.hpo_dp_link_enc) {
- dp2_mst_stream_count++;
- break;
- }
- }
- }
-
- return dp2_mst_stream_count;
-}
-
static void populate_dml_writeback_cfg_from_stream_state(struct dml_writeback_cfg_st *out,
unsigned int location, const struct dc_stream_state *in)
{
@@ -1269,6 +1238,30 @@ static void populate_dml_writeback_cfg_from_stream_state(struct dml_writeback_cf
}
}
}
+
+static void dml2_map_hpo_stream_encoder_to_hpo_link_encoder_index(struct dml2_context *dml2, struct dc_state *context)
+{
+ int i;
+ struct pipe_ctx *current_pipe_context;
+
+ /* Scratch gets reset to zero in dml, but link encoder instance can be zero, so reset to -1 */
+ for (i = 0; i < MAX_HPO_DP2_ENCODERS; i++) {
+ dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[i] = -1;
+ }
+
+ /* If an HPO stream encoder is allocated to a pipe, get the instance of it's allocated HPO Link encoder */
+ for (i = 0; i < MAX_PIPES; i++) {
+ current_pipe_context = &context->res_ctx.pipe_ctx[i];
+ if (current_pipe_context->stream &&
+ current_pipe_context->stream_res.hpo_dp_stream_enc &&
+ current_pipe_context->link_res.hpo_dp_link_enc &&
+ dc_is_dp_signal(current_pipe_context->stream->signal)) {
+ dml2->v20.scratch.hpo_stream_to_link_encoder_mapping[current_pipe_context->stream_res.hpo_dp_stream_enc->inst] =
+ current_pipe_context->link_res.hpo_dp_link_enc->inst;
+ }
+ }
+}
+
void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg)
{
int i = 0, j = 0, k = 0;
@@ -1291,8 +1284,8 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
if (dml2->v20.dml_core_ctx.ip.hostvm_enable)
dml2->v20.dml_core_ctx.policy.AllowForPStateChangeOrStutterInVBlankFinal = dml_prefetch_support_uclk_fclk_and_stutter;
- dml2->v20.scratch.dp2_mst_stream_count = calculate_dp2_mst_stream_count(context);
dml2_populate_pipe_to_plane_index_mapping(dml2, context);
+ dml2_map_hpo_stream_encoder_to_hpo_link_encoder_index(dml2, context);
for (i = 0; i < context->stream_count; i++) {
current_pipe_context = NULL;
@@ -1313,7 +1306,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
ASSERT(disp_cfg_stream_location >= 0 && disp_cfg_stream_location <= __DML2_WRAPPER_MAX_STREAMS_PLANES__);
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_stream_location, context->streams[i]);
- populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context, dml2->v20.scratch.dp2_mst_stream_count);
+ populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_stream_location, context->streams[i], current_pipe_context, dml2);
/*Call site for populate_dml_writeback_cfg_from_stream_state*/
populate_dml_writeback_cfg_from_stream_state(&dml_dispcfg->writeback,
disp_cfg_stream_location, context->streams[i]);
@@ -1378,7 +1371,7 @@ void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_stat
if (j >= 1) {
populate_dml_timing_cfg_from_stream_state(&dml_dispcfg->timing, disp_cfg_plane_location, context->streams[i]);
- populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], current_pipe_context, dml2->v20.scratch.dp2_mst_stream_count);
+ populate_dml_output_cfg_from_stream_state(&dml_dispcfg->output, disp_cfg_plane_location, context->streams[i], current_pipe_context, dml2);
switch (context->streams[i]->debug.force_odm_combine_segments) {
case 2:
dml2->v20.dml_core_ctx.policy.ODMUse[disp_cfg_plane_location] = dml_odm_use_policy_combine_2to1;
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h
index 55659b22d87f..d764773938f4 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.h
@@ -36,6 +36,6 @@ void dml2_translate_socbb_params(const struct dc *in_dc, struct soc_bounding_box
void dml2_translate_soc_states(const struct dc *in_dc, struct soc_states_st *out, int num_states);
void map_dc_state_into_dml_display_cfg(struct dml2_context *dml2, struct dc_state *context, struct dml_display_cfg_st *dml_dispcfg);
void dml2_update_pipe_ctx_dchub_regs(struct _vcs_dpi_dml_display_rq_regs_st *rq_regs, struct _vcs_dpi_dml_display_dlg_regs_st *disp_dlg_regs, struct _vcs_dpi_dml_display_ttu_regs_st *disp_ttu_regs, struct pipe_ctx *out);
-bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe, unsigned int dp2_mst_stream_count);
+bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe);
#endif //__DML2_TRANSLATION_HELPER_H__
diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
index 9e8ff3a9718e..9a33158b63bf 100644
--- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
+++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_utils.c
@@ -153,7 +153,7 @@ unsigned int dml2_util_get_maximum_odm_combine_for_output(bool force_odm_4to1, e
}
}
-bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx, unsigned int dp2_mst_stream_count)
+bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx)
{
if (pipe_ctx == NULL || pipe_ctx->stream == NULL)
return false;
@@ -161,14 +161,6 @@ bool is_dp2p0_output_encoder(const struct pipe_ctx *pipe_ctx, unsigned int dp2_m
/* If this assert is hit then we have a link encoder dynamic management issue */
ASSERT(pipe_ctx->stream_res.hpo_dp_stream_enc ? pipe_ctx->link_res.hpo_dp_link_enc != NULL : true);
- /* Count MST hubs once by treating only 1st remote sink in topology as an encoder */
- if (pipe_ctx->stream->link && pipe_ctx->stream->link->remote_sinks[0] && dp2_mst_stream_count > 1) {
- return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
- pipe_ctx->link_res.hpo_dp_link_enc &&
- dc_is_dp_signal(pipe_ctx->stream->signal) &&
- (pipe_ctx->stream->link->remote_sinks[0]->sink_id == pipe_ctx->stream->sink->sink_id));
- }
-
return (pipe_ctx->stream_res.hpo_dp_stream_enc &&
pipe_ctx->link_res.hpo_dp_link_enc &&
dc_is_dp_signal(pipe_ctx->stream->signal));
@@ -181,7 +173,7 @@ bool is_dtbclk_required(const struct dc *dc, struct dc_state *context)
for (i = 0; i < dc->res_pool->pipe_count; i++) {
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- if (is_dp2p0_output_encoder(&context->res_ctx.pipe_ctx[i], context->bw_ctx.dml2->v20.scratch.dp2_mst_stream_count))
+ if (is_dp2p0_output_encoder(&context->res_ctx.pipe_ctx[i]))
return true;
}
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
index 505929800426..01f98139292e 100644
--- a/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
+++ b/drivers/gpu/drm/amd/display/dc/dpp/dcn401/dcn401_dpp_dscl.c
@@ -280,7 +280,8 @@ static void dpp401_dscl_set_scaler_filter(
static void dpp401_dscl_set_scl_filter(
struct dcn401_dpp *dpp,
const struct scaler_data *scl_data,
- bool chroma_coef_mode)
+ bool chroma_coef_mode,
+ bool force_coeffs_update)
{
bool h_2tap_hardcode_coef_en = false;
bool v_2tap_hardcode_coef_en = false;
@@ -343,7 +344,7 @@ static void dpp401_dscl_set_scl_filter(
|| (filter_v_c && (filter_v_c != dpp->filter_v_c));
}
- if (filter_updated) {
+ if ((filter_updated) || (force_coeffs_update)) {
uint32_t scl_mode = REG_READ(SCL_MODE);
if (!h_2tap_hardcode_coef_en && filter_h) {
@@ -656,274 +657,252 @@ static void dpp401_dscl_set_recout(struct dcn401_dpp *dpp,
RECOUT_HEIGHT, recout->height);
}
/**
- * dpp401_dscl_program_easf - Program EASF
+ * dpp401_dscl_program_easf_v - Program EASF_V
*
* @dpp_base: High level DPP struct
* @scl_data: scalaer_data info
*
- * This is the primary function to program EASF
+ * This is the primary function to program vertical EASF registers
*
*/
-static void dpp401_dscl_program_easf(struct dpp *dpp_base, const struct scaler_data *scl_data)
+static void dpp401_dscl_program_easf_v(struct dpp *dpp_base, const struct scaler_data *scl_data)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
PERF_TRACE();
- REG_UPDATE(DSCL_SC_MODE,
- SCL_SC_MATRIX_MODE, scl_data->dscl_prog_data.easf_matrix_mode);
- REG_UPDATE(DSCL_SC_MODE,
- SCL_SC_LTONL_EN, scl_data->dscl_prog_data.easf_ltonl_en);
/* DSCL_EASF_V_MODE */
- REG_UPDATE(DSCL_EASF_V_MODE,
- SCL_EASF_V_EN, scl_data->dscl_prog_data.easf_v_en);
- REG_UPDATE(DSCL_EASF_V_MODE,
- SCL_EASF_V_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_v_sharp_factor);
- REG_UPDATE(DSCL_EASF_V_MODE,
+ REG_SET_3(DSCL_EASF_V_MODE, 0,
+ SCL_EASF_V_EN, scl_data->dscl_prog_data.easf_v_en,
+ SCL_EASF_V_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_v_sharp_factor,
SCL_EASF_V_RINGEST_FORCE_EN, scl_data->dscl_prog_data.easf_v_ring);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
- SCL_EASF_V_BF1_EN, scl_data->dscl_prog_data.easf_v_bf1_en);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
- SCL_EASF_V_BF2_MODE, scl_data->dscl_prog_data.easf_v_bf2_mode);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
- SCL_EASF_V_BF3_MODE, scl_data->dscl_prog_data.easf_v_bf3_mode);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
- SCL_EASF_V_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat1_gain);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
- SCL_EASF_V_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat2_gain);
- REG_UPDATE(DSCL_EASF_V_BF_CNTL,
+
+ if (!scl_data->dscl_prog_data.easf_v_en) {
+ PERF_TRACE();
+ return;
+ }
+
+ /* DSCL_EASF_V_BF_CNTL */
+ REG_SET_6(DSCL_EASF_V_BF_CNTL, 0,
+ SCL_EASF_V_BF1_EN, scl_data->dscl_prog_data.easf_v_bf1_en,
+ SCL_EASF_V_BF2_MODE, scl_data->dscl_prog_data.easf_v_bf2_mode,
+ SCL_EASF_V_BF3_MODE, scl_data->dscl_prog_data.easf_v_bf3_mode,
+ SCL_EASF_V_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat1_gain,
+ SCL_EASF_V_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_v_bf2_flat2_gain,
SCL_EASF_V_BF2_ROC_GAIN, scl_data->dscl_prog_data.easf_v_bf2_roc_gain);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL1,
- SCL_EASF_V_RINGEST_3TAP_DNTILT_UPTILT, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_uptilt);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL1,
+ /* DSCL_EASF_V_RINGEST_3TAP_CNTLn */
+ REG_SET_2(DSCL_EASF_V_RINGEST_3TAP_CNTL1, 0,
+ SCL_EASF_V_RINGEST_3TAP_DNTILT_UPTILT, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_uptilt,
SCL_EASF_V_RINGEST_3TAP_UPTILT_MAXVAL, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt_max);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL2,
- SCL_EASF_V_RINGEST_3TAP_DNTILT_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_slope);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL2,
+ REG_SET_2(DSCL_EASF_V_RINGEST_3TAP_CNTL2, 0,
+ SCL_EASF_V_RINGEST_3TAP_DNTILT_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_dntilt_slope,
SCL_EASF_V_RINGEST_3TAP_UPTILT1_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt1_slope);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL3,
- SCL_EASF_V_RINGEST_3TAP_UPTILT2_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_slope);
- REG_UPDATE(DSCL_EASF_V_RINGEST_3TAP_CNTL3,
+ REG_SET_2(DSCL_EASF_V_RINGEST_3TAP_CNTL3, 0,
+ SCL_EASF_V_RINGEST_3TAP_UPTILT2_SLOPE, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_slope,
SCL_EASF_V_RINGEST_3TAP_UPTILT2_OFFSET, scl_data->dscl_prog_data.easf_v_ringest_3tap_uptilt2_offset);
- REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_REDUCE,
- SCL_EASF_V_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg1);
- REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_REDUCE,
+ /* DSCL_EASF_V_RINGEST_EVENTAP_REDUCE */
+ REG_SET_2(DSCL_EASF_V_RINGEST_EVENTAP_REDUCE, 0,
+ SCL_EASF_V_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg1,
SCL_EASF_V_RINGEST_EVENTAP_REDUCEG2, scl_data->dscl_prog_data.easf_v_ringest_eventap_reduceg2);
- REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_GAIN,
- SCL_EASF_V_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain1);
- REG_UPDATE(DSCL_EASF_V_RINGEST_EVENTAP_GAIN,
+ /* DSCL_EASF_V_RINGEST_EVENTAP_GAIN */
+ REG_SET_2(DSCL_EASF_V_RINGEST_EVENTAP_GAIN, 0,
+ SCL_EASF_V_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain1,
SCL_EASF_V_RINGEST_EVENTAP_GAIN2, scl_data->dscl_prog_data.easf_v_ringest_eventap_gain2);
- REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
- SCL_EASF_V_BF_MAXA, scl_data->dscl_prog_data.easf_v_bf_maxa);
- REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
- SCL_EASF_V_BF_MAXB, scl_data->dscl_prog_data.easf_v_bf_maxb);
- REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
- SCL_EASF_V_BF_MINA, scl_data->dscl_prog_data.easf_v_bf_mina);
- REG_UPDATE(DSCL_EASF_V_BF_FINAL_MAX_MIN,
+ /* DSCL_EASF_V_BF_FINAL_MAX_MIN */
+ REG_SET_4(DSCL_EASF_V_BF_FINAL_MAX_MIN, 0,
+ SCL_EASF_V_BF_MAXA, scl_data->dscl_prog_data.easf_v_bf_maxa,
+ SCL_EASF_V_BF_MAXB, scl_data->dscl_prog_data.easf_v_bf_maxb,
+ SCL_EASF_V_BF_MINA, scl_data->dscl_prog_data.easf_v_bf_mina,
SCL_EASF_V_BF_MINB, scl_data->dscl_prog_data.easf_v_bf_minb);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG0,
- SCL_EASF_V_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg0);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG0,
- SCL_EASF_V_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg0);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG0,
+ /* DSCL_EASF_V_BF1_PWL_SEGn */
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG0, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg0,
+ SCL_EASF_V_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg0,
SCL_EASF_V_BF1_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg0);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG1,
- SCL_EASF_V_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg1);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG1,
- SCL_EASF_V_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg1);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG1,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG1, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg1,
+ SCL_EASF_V_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg1,
SCL_EASF_V_BF1_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg1);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG2,
- SCL_EASF_V_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg2);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG2,
- SCL_EASF_V_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg2);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG2,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG2, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg2,
+ SCL_EASF_V_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg2,
SCL_EASF_V_BF1_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg2);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG3,
- SCL_EASF_V_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg3);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG3,
- SCL_EASF_V_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg3);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG3,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG3, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg3,
+ SCL_EASF_V_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg3,
SCL_EASF_V_BF1_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg3);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG4,
- SCL_EASF_V_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg4);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG4,
- SCL_EASF_V_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg4);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG4,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG4, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg4,
+ SCL_EASF_V_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg4,
SCL_EASF_V_BF1_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg4);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG5,
- SCL_EASF_V_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg5);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG5,
- SCL_EASF_V_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg5);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG5,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG5, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg5,
+ SCL_EASF_V_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg5,
SCL_EASF_V_BF1_PWL_SLOPE_SEG5, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg5);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG6,
- SCL_EASF_V_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg6);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG6,
- SCL_EASF_V_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg6);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG6,
+ REG_SET_3(DSCL_EASF_V_BF1_PWL_SEG6, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg6,
+ SCL_EASF_V_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg6,
SCL_EASF_V_BF1_PWL_SLOPE_SEG6, scl_data->dscl_prog_data.easf_v_bf1_pwl_slope_seg6);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG7,
- SCL_EASF_V_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg7);
- REG_UPDATE(DSCL_EASF_V_BF1_PWL_SEG7,
+ REG_SET_2(DSCL_EASF_V_BF1_PWL_SEG7, 0,
+ SCL_EASF_V_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_v_bf1_pwl_in_seg7,
SCL_EASF_V_BF1_PWL_BASE_SEG7, scl_data->dscl_prog_data.easf_v_bf1_pwl_base_seg7);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG0,
- SCL_EASF_V_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set0);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG0,
- SCL_EASF_V_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set0);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG0,
+ /* DSCL_EASF_V_BF3_PWL_SEGn */
+ REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG0, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set0,
+ SCL_EASF_V_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set0,
SCL_EASF_V_BF3_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set0);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG1,
- SCL_EASF_V_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set1);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG1,
- SCL_EASF_V_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set1);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG1,
+ REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG1, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set1,
+ SCL_EASF_V_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set1,
SCL_EASF_V_BF3_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set1);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG2,
- SCL_EASF_V_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set2);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG2,
- SCL_EASF_V_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set2);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG2,
+ REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG2, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set2,
+ SCL_EASF_V_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set2,
SCL_EASF_V_BF3_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set2);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG3,
- SCL_EASF_V_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set3);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG3,
- SCL_EASF_V_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set3);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG3,
+ REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG3, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set3,
+ SCL_EASF_V_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set3,
SCL_EASF_V_BF3_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set3);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG4,
- SCL_EASF_V_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set4);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG4,
- SCL_EASF_V_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set4);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG4,
+ REG_SET_3(DSCL_EASF_V_BF3_PWL_SEG4, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set4,
+ SCL_EASF_V_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set4,
SCL_EASF_V_BF3_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_v_bf3_pwl_slope_set4);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG5,
- SCL_EASF_V_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set5);
- REG_UPDATE(DSCL_EASF_V_BF3_PWL_SEG5,
+ REG_SET_2(DSCL_EASF_V_BF3_PWL_SEG5, 0,
+ SCL_EASF_V_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_v_bf3_pwl_in_set5,
SCL_EASF_V_BF3_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_v_bf3_pwl_base_set5);
+ PERF_TRACE();
+}
+/**
+ * dpp401_dscl_program_easf_h - Program EASF_H
+ *
+ * @dpp_base: High level DPP struct
+ * @scl_data: scalaer_data info
+ *
+ * This is the primary function to program horizontal EASF registers
+ *
+ */
+static void dpp401_dscl_program_easf_h(struct dpp *dpp_base, const struct scaler_data *scl_data)
+{
+ struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
+
+ PERF_TRACE();
/* DSCL_EASF_H_MODE */
- REG_UPDATE(DSCL_EASF_H_MODE,
- SCL_EASF_H_EN, scl_data->dscl_prog_data.easf_h_en);
- REG_UPDATE(DSCL_EASF_H_MODE,
- SCL_EASF_H_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_h_sharp_factor);
- REG_UPDATE(DSCL_EASF_H_MODE,
+ REG_SET_3(DSCL_EASF_H_MODE, 0,
+ SCL_EASF_H_EN, scl_data->dscl_prog_data.easf_h_en,
+ SCL_EASF_H_2TAP_SHARP_FACTOR, scl_data->dscl_prog_data.easf_h_sharp_factor,
SCL_EASF_H_RINGEST_FORCE_EN, scl_data->dscl_prog_data.easf_h_ring);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
- SCL_EASF_H_BF1_EN, scl_data->dscl_prog_data.easf_h_bf1_en);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
- SCL_EASF_H_BF2_MODE, scl_data->dscl_prog_data.easf_h_bf2_mode);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
- SCL_EASF_H_BF3_MODE, scl_data->dscl_prog_data.easf_h_bf3_mode);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
- SCL_EASF_H_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat1_gain);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
- SCL_EASF_H_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat2_gain);
- REG_UPDATE(DSCL_EASF_H_BF_CNTL,
+
+ if (!scl_data->dscl_prog_data.easf_h_en) {
+ PERF_TRACE();
+ return;
+ }
+
+ /* DSCL_EASF_H_BF_CNTL */
+ REG_SET_6(DSCL_EASF_H_BF_CNTL, 0,
+ SCL_EASF_H_BF1_EN, scl_data->dscl_prog_data.easf_h_bf1_en,
+ SCL_EASF_H_BF2_MODE, scl_data->dscl_prog_data.easf_h_bf2_mode,
+ SCL_EASF_H_BF3_MODE, scl_data->dscl_prog_data.easf_h_bf3_mode,
+ SCL_EASF_H_BF2_FLAT1_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat1_gain,
+ SCL_EASF_H_BF2_FLAT2_GAIN, scl_data->dscl_prog_data.easf_h_bf2_flat2_gain,
SCL_EASF_H_BF2_ROC_GAIN, scl_data->dscl_prog_data.easf_h_bf2_roc_gain);
- REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_REDUCE,
- SCL_EASF_H_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg1);
- REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_REDUCE,
+ /* DSCL_EASF_H_RINGEST_EVENTAP_REDUCE */
+ REG_SET_2(DSCL_EASF_H_RINGEST_EVENTAP_REDUCE, 0,
+ SCL_EASF_H_RINGEST_EVENTAP_REDUCEG1, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg1,
SCL_EASF_H_RINGEST_EVENTAP_REDUCEG2, scl_data->dscl_prog_data.easf_h_ringest_eventap_reduceg2);
- REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_GAIN,
- SCL_EASF_H_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain1);
- REG_UPDATE(DSCL_EASF_H_RINGEST_EVENTAP_GAIN,
+ /* DSCL_EASF_H_RINGEST_EVENTAP_GAIN */
+ REG_SET_2(DSCL_EASF_H_RINGEST_EVENTAP_GAIN, 0,
+ SCL_EASF_H_RINGEST_EVENTAP_GAIN1, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain1,
SCL_EASF_H_RINGEST_EVENTAP_GAIN2, scl_data->dscl_prog_data.easf_h_ringest_eventap_gain2);
- REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
- SCL_EASF_H_BF_MAXA, scl_data->dscl_prog_data.easf_h_bf_maxa);
- REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
- SCL_EASF_H_BF_MAXB, scl_data->dscl_prog_data.easf_h_bf_maxb);
- REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
- SCL_EASF_H_BF_MINA, scl_data->dscl_prog_data.easf_h_bf_mina);
- REG_UPDATE(DSCL_EASF_H_BF_FINAL_MAX_MIN,
+ /* DSCL_EASF_H_BF_FINAL_MAX_MIN */
+ REG_SET_4(DSCL_EASF_H_BF_FINAL_MAX_MIN, 0,
+ SCL_EASF_H_BF_MAXA, scl_data->dscl_prog_data.easf_h_bf_maxa,
+ SCL_EASF_H_BF_MAXB, scl_data->dscl_prog_data.easf_h_bf_maxb,
+ SCL_EASF_H_BF_MINA, scl_data->dscl_prog_data.easf_h_bf_mina,
SCL_EASF_H_BF_MINB, scl_data->dscl_prog_data.easf_h_bf_minb);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG0,
- SCL_EASF_H_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg0);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG0,
- SCL_EASF_H_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg0);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG0,
+ /* DSCL_EASF_H_BF1_PWL_SEGn */
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG0, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg0,
+ SCL_EASF_H_BF1_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg0,
SCL_EASF_H_BF1_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg0);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG1,
- SCL_EASF_H_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg1);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG1,
- SCL_EASF_H_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg1);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG1,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG1, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg1,
+ SCL_EASF_H_BF1_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg1,
SCL_EASF_H_BF1_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg1);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG2,
- SCL_EASF_H_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg2);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG2,
- SCL_EASF_H_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg2);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG2,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG2, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg2,
+ SCL_EASF_H_BF1_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg2,
SCL_EASF_H_BF1_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg2);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG3,
- SCL_EASF_H_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg3);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG3,
- SCL_EASF_H_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg3);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG3,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG3, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg3,
+ SCL_EASF_H_BF1_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg3,
SCL_EASF_H_BF1_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg3);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG4,
- SCL_EASF_H_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg4);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG4,
- SCL_EASF_H_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg4);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG4,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG4, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg4,
+ SCL_EASF_H_BF1_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg4,
SCL_EASF_H_BF1_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg4);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG5,
- SCL_EASF_H_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg5);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG5,
- SCL_EASF_H_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg5);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG5,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG5, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg5,
+ SCL_EASF_H_BF1_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg5,
SCL_EASF_H_BF1_PWL_SLOPE_SEG5, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg5);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG6,
- SCL_EASF_H_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg6);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG6,
- SCL_EASF_H_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg6);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG6,
+ REG_SET_3(DSCL_EASF_H_BF1_PWL_SEG6, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg6,
+ SCL_EASF_H_BF1_PWL_BASE_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg6,
SCL_EASF_H_BF1_PWL_SLOPE_SEG6, scl_data->dscl_prog_data.easf_h_bf1_pwl_slope_seg6);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG7,
- SCL_EASF_H_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg7);
- REG_UPDATE(DSCL_EASF_H_BF1_PWL_SEG7,
+ REG_SET_2(DSCL_EASF_H_BF1_PWL_SEG7, 0,
+ SCL_EASF_H_BF1_PWL_IN_SEG7, scl_data->dscl_prog_data.easf_h_bf1_pwl_in_seg7,
SCL_EASF_H_BF1_PWL_BASE_SEG7, scl_data->dscl_prog_data.easf_h_bf1_pwl_base_seg7);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG0,
- SCL_EASF_H_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set0);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG0,
- SCL_EASF_H_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set0);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG0,
+ /* DSCL_EASF_H_BF3_PWL_SEGn */
+ REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG0, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set0,
+ SCL_EASF_H_BF3_PWL_BASE_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set0,
SCL_EASF_H_BF3_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set0);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG1,
- SCL_EASF_H_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set1);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG1,
- SCL_EASF_H_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set1);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG1,
+ REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG1, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set1,
+ SCL_EASF_H_BF3_PWL_BASE_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set1,
SCL_EASF_H_BF3_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set1);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG2,
- SCL_EASF_H_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set2);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG2,
- SCL_EASF_H_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set2);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG2,
+ REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG2, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set2,
+ SCL_EASF_H_BF3_PWL_BASE_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set2,
SCL_EASF_H_BF3_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set2);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG3,
- SCL_EASF_H_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set3);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG3,
- SCL_EASF_H_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set3);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG3,
+ REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG3, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set3,
+ SCL_EASF_H_BF3_PWL_BASE_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set3,
SCL_EASF_H_BF3_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set3);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG4,
- SCL_EASF_H_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set4);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG4,
- SCL_EASF_H_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set4);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG4,
+ REG_SET_3(DSCL_EASF_H_BF3_PWL_SEG4, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set4,
+ SCL_EASF_H_BF3_PWL_BASE_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set4,
SCL_EASF_H_BF3_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.easf_h_bf3_pwl_slope_set4);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG5,
- SCL_EASF_H_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set5);
- REG_UPDATE(DSCL_EASF_H_BF3_PWL_SEG5,
+ REG_SET_2(DSCL_EASF_H_BF3_PWL_SEG5, 0,
+ SCL_EASF_H_BF3_PWL_IN_SEG5, scl_data->dscl_prog_data.easf_h_bf3_pwl_in_set5,
SCL_EASF_H_BF3_PWL_BASE_SEG5, scl_data->dscl_prog_data.easf_h_bf3_pwl_base_set5);
+ PERF_TRACE();
+}
+/**
+ * dpp401_dscl_program_easf - Program EASF
+ *
+ * @dpp_base: High level DPP struct
+ * @scl_data: scalaer_data info
+ *
+ * This is the primary function to program EASF
+ *
+ */
+static void dpp401_dscl_program_easf(struct dpp *dpp_base, const struct scaler_data *scl_data)
+{
+ struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
+
+ PERF_TRACE();
+ /* DSCL_SC_MODE */
+ REG_SET_2(DSCL_SC_MODE, 0,
+ SCL_SC_MATRIX_MODE, scl_data->dscl_prog_data.easf_matrix_mode,
+ SCL_SC_LTONL_EN, scl_data->dscl_prog_data.easf_ltonl_en);
/* DSCL_EASF_SC_MATRIX_C0C1, DSCL_EASF_SC_MATRIX_C2C3 */
- REG_UPDATE(DSCL_SC_MATRIX_C0C1,
- SCL_SC_MATRIX_C0, scl_data->dscl_prog_data.easf_matrix_c0);
- REG_UPDATE(DSCL_SC_MATRIX_C0C1,
+ REG_SET_2(DSCL_SC_MATRIX_C0C1, 0,
+ SCL_SC_MATRIX_C0, scl_data->dscl_prog_data.easf_matrix_c0,
SCL_SC_MATRIX_C1, scl_data->dscl_prog_data.easf_matrix_c1);
- REG_UPDATE(DSCL_SC_MATRIX_C2C3,
- SCL_SC_MATRIX_C2, scl_data->dscl_prog_data.easf_matrix_c2);
- REG_UPDATE(DSCL_SC_MATRIX_C2C3,
+ REG_SET_2(DSCL_SC_MATRIX_C2C3, 0,
+ SCL_SC_MATRIX_C2, scl_data->dscl_prog_data.easf_matrix_c2,
SCL_SC_MATRIX_C3, scl_data->dscl_prog_data.easf_matrix_c3);
+ dpp401_dscl_program_easf_v(dpp_base, scl_data);
+ dpp401_dscl_program_easf_h(dpp_base, scl_data);
PERF_TRACE();
}
/**
@@ -958,10 +937,11 @@ static void dpp401_dscl_set_isharp_filter(
REG_UPDATE(ISHARP_DELTA_CTRL,
ISHARP_DELTA_LUT_HOST_SELECT, 0);
+ /* LUT data write is auto-indexed. Write index once */
+ REG_SET(ISHARP_DELTA_INDEX, 0,
+ ISHARP_DELTA_INDEX, 0);
for (level = 0; level < NUM_LEVELS; level++) {
filter_data = filter[level];
- REG_SET(ISHARP_DELTA_INDEX, 0,
- ISHARP_DELTA_INDEX, level);
REG_SET(ISHARP_DELTA_DATA, 0,
ISHARP_DELTA_DATA, filter_data);
}
@@ -976,107 +956,76 @@ static void dpp401_dscl_set_isharp_filter(
*
*/
static void dpp401_dscl_program_isharp(struct dpp *dpp_base,
- const struct scaler_data *scl_data)
+ const struct scaler_data *scl_data,
+ bool program_isharp_1dlut,
+ bool *bs_coeffs_updated)
{
struct dcn401_dpp *dpp = TO_DCN401_DPP(dpp_base);
+ *bs_coeffs_updated = false;
PERF_TRACE();
- /* ISHARP_EN */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_EN, scl_data->dscl_prog_data.isharp_en);
- /* ISHARP_NOISEDET_EN */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_NOISEDET_EN, scl_data->dscl_prog_data.isharp_noise_det.enable);
- /* ISHARP_NOISEDET_MODE */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_NOISEDET_MODE, scl_data->dscl_prog_data.isharp_noise_det.mode);
- /* ISHARP_NOISEDET_UTHRE */
- REG_UPDATE(ISHARP_NOISEDET_THRESHOLD,
- ISHARP_NOISEDET_UTHRE, scl_data->dscl_prog_data.isharp_noise_det.uthreshold);
- /* ISHARP_NOISEDET_DTHRE */
- REG_UPDATE(ISHARP_NOISEDET_THRESHOLD,
- ISHARP_NOISEDET_DTHRE, scl_data->dscl_prog_data.isharp_noise_det.dthreshold);
- REG_UPDATE(ISHARP_MODE,
- ISHARP_NOISEDET_MODE, scl_data->dscl_prog_data.isharp_noise_det.mode);
- /* ISHARP_NOISEDET_UTHRE */
- REG_UPDATE(ISHARP_NOISEDET_THRESHOLD,
- ISHARP_NOISEDET_UTHRE, scl_data->dscl_prog_data.isharp_noise_det.uthreshold);
- /* ISHARP_NOISEDET_DTHRE */
- REG_UPDATE(ISHARP_NOISEDET_THRESHOLD,
+ /* ISHARP_MODE */
+ REG_SET_6(ISHARP_MODE, 0,
+ ISHARP_EN, scl_data->dscl_prog_data.isharp_en,
+ ISHARP_NOISEDET_EN, scl_data->dscl_prog_data.isharp_noise_det.enable,
+ ISHARP_NOISEDET_MODE, scl_data->dscl_prog_data.isharp_noise_det.mode,
+ ISHARP_LBA_MODE, scl_data->dscl_prog_data.isharp_lba.mode,
+ ISHARP_FMT_MODE, scl_data->dscl_prog_data.isharp_fmt.mode,
+ ISHARP_FMT_NORM, scl_data->dscl_prog_data.isharp_fmt.norm);
+
+ /* Skip remaining register programming if ISHARP is disabled */
+ if (!scl_data->dscl_prog_data.isharp_en) {
+ PERF_TRACE();
+ return;
+ }
+
+ /* ISHARP_NOISEDET_THRESHOLD */
+ REG_SET_2(ISHARP_NOISEDET_THRESHOLD, 0,
+ ISHARP_NOISEDET_UTHRE, scl_data->dscl_prog_data.isharp_noise_det.uthreshold,
ISHARP_NOISEDET_DTHRE, scl_data->dscl_prog_data.isharp_noise_det.dthreshold);
- /* ISHARP_NOISEDET_PWL_START_IN */
- REG_UPDATE(ISHARP_NOISE_GAIN_PWL,
- ISHARP_NOISEDET_PWL_START_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_start_in);
- /* ISHARP_NOISEDET_PWL_END_IN */
- REG_UPDATE(ISHARP_NOISE_GAIN_PWL,
- ISHARP_NOISEDET_PWL_END_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_end_in);
- /* ISHARP_NOISEDET_PWL_SLOPE */
- REG_UPDATE(ISHARP_NOISE_GAIN_PWL,
+
+ /* ISHARP_NOISE_GAIN_PWL */
+ REG_SET_3(ISHARP_NOISE_GAIN_PWL, 0,
+ ISHARP_NOISEDET_PWL_START_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_start_in,
+ ISHARP_NOISEDET_PWL_END_IN, scl_data->dscl_prog_data.isharp_noise_det.pwl_end_in,
ISHARP_NOISEDET_PWL_SLOPE, scl_data->dscl_prog_data.isharp_noise_det.pwl_slope);
- /* ISHARP_LBA_MODE */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_LBA_MODE, scl_data->dscl_prog_data.isharp_lba.mode);
+
/* ISHARP_LBA: IN_SEG, BASE_SEG, SLOPE_SEG */
- REG_UPDATE(ISHARP_LBA_PWL_SEG0,
- ISHARP_LBA_PWL_IN_SEG0, scl_data->dscl_prog_data.isharp_lba.in_seg[0]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG0,
- ISHARP_LBA_PWL_BASE_SEG0, scl_data->dscl_prog_data.isharp_lba.base_seg[0]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG0,
+ REG_SET_3(ISHARP_LBA_PWL_SEG0, 0,
+ ISHARP_LBA_PWL_IN_SEG0, scl_data->dscl_prog_data.isharp_lba.in_seg[0],
+ ISHARP_LBA_PWL_BASE_SEG0, scl_data->dscl_prog_data.isharp_lba.base_seg[0],
ISHARP_LBA_PWL_SLOPE_SEG0, scl_data->dscl_prog_data.isharp_lba.slope_seg[0]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG1,
- ISHARP_LBA_PWL_IN_SEG1, scl_data->dscl_prog_data.isharp_lba.in_seg[1]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG1,
- ISHARP_LBA_PWL_BASE_SEG1, scl_data->dscl_prog_data.isharp_lba.base_seg[1]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG1,
+ REG_SET_3(ISHARP_LBA_PWL_SEG1, 0,
+ ISHARP_LBA_PWL_IN_SEG1, scl_data->dscl_prog_data.isharp_lba.in_seg[1],
+ ISHARP_LBA_PWL_BASE_SEG1, scl_data->dscl_prog_data.isharp_lba.base_seg[1],
ISHARP_LBA_PWL_SLOPE_SEG1, scl_data->dscl_prog_data.isharp_lba.slope_seg[1]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG2,
- ISHARP_LBA_PWL_IN_SEG2, scl_data->dscl_prog_data.isharp_lba.in_seg[2]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG2,
- ISHARP_LBA_PWL_BASE_SEG2, scl_data->dscl_prog_data.isharp_lba.base_seg[2]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG2,
+ REG_SET_3(ISHARP_LBA_PWL_SEG2, 0,
+ ISHARP_LBA_PWL_IN_SEG2, scl_data->dscl_prog_data.isharp_lba.in_seg[2],
+ ISHARP_LBA_PWL_BASE_SEG2, scl_data->dscl_prog_data.isharp_lba.base_seg[2],
ISHARP_LBA_PWL_SLOPE_SEG2, scl_data->dscl_prog_data.isharp_lba.slope_seg[2]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG3,
- ISHARP_LBA_PWL_IN_SEG3, scl_data->dscl_prog_data.isharp_lba.in_seg[3]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG3,
- ISHARP_LBA_PWL_BASE_SEG3, scl_data->dscl_prog_data.isharp_lba.base_seg[3]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG3,
+ REG_SET_3(ISHARP_LBA_PWL_SEG3, 0,
+ ISHARP_LBA_PWL_IN_SEG3, scl_data->dscl_prog_data.isharp_lba.in_seg[3],
+ ISHARP_LBA_PWL_BASE_SEG3, scl_data->dscl_prog_data.isharp_lba.base_seg[3],
ISHARP_LBA_PWL_SLOPE_SEG3, scl_data->dscl_prog_data.isharp_lba.slope_seg[3]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG4,
- ISHARP_LBA_PWL_IN_SEG4, scl_data->dscl_prog_data.isharp_lba.in_seg[4]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG4,
- ISHARP_LBA_PWL_BASE_SEG4, scl_data->dscl_prog_data.isharp_lba.base_seg[4]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG4,
+ REG_SET_3(ISHARP_LBA_PWL_SEG4, 0,
+ ISHARP_LBA_PWL_IN_SEG4, scl_data->dscl_prog_data.isharp_lba.in_seg[4],
+ ISHARP_LBA_PWL_BASE_SEG4, scl_data->dscl_prog_data.isharp_lba.base_seg[4],
ISHARP_LBA_PWL_SLOPE_SEG4, scl_data->dscl_prog_data.isharp_lba.slope_seg[4]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG5,
- ISHARP_LBA_PWL_IN_SEG5, scl_data->dscl_prog_data.isharp_lba.in_seg[5]);
- REG_UPDATE(ISHARP_LBA_PWL_SEG5,
+ REG_SET_2(ISHARP_LBA_PWL_SEG5, 0,
+ ISHARP_LBA_PWL_IN_SEG5, scl_data->dscl_prog_data.isharp_lba.in_seg[5],
ISHARP_LBA_PWL_BASE_SEG5, scl_data->dscl_prog_data.isharp_lba.base_seg[5]);
- /* ISHARP_FMT_MODE */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_FMT_MODE, scl_data->dscl_prog_data.isharp_fmt.mode);
- /* ISHARP_FMT_NORM */
- REG_UPDATE(ISHARP_MODE,
- ISHARP_FMT_NORM, scl_data->dscl_prog_data.isharp_fmt.norm);
/* ISHARP_DELTA_LUT */
- dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta);
- /* ISHARP_NLDELTA_SCLIP_EN_P */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
- ISHARP_NLDELTA_SCLIP_EN_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_p);
- /* ISHARP_NLDELTA_SCLIP_PIVOT_P */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
- ISHARP_NLDELTA_SCLIP_PIVOT_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_p);
- /* ISHARP_NLDELTA_SCLIP_SLOPE_P */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
- ISHARP_NLDELTA_SCLIP_SLOPE_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.slope_p);
- /* ISHARP_NLDELTA_SCLIP_EN_N */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
- ISHARP_NLDELTA_SCLIP_EN_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_n);
- /* ISHARP_NLDELTA_SCLIP_PIVOT_N */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
- ISHARP_NLDELTA_SCLIP_PIVOT_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_n);
- /* ISHARP_NLDELTA_SCLIP_SLOPE_N */
- REG_UPDATE(ISHARP_NLDELTA_SOFT_CLIP,
+ if (!program_isharp_1dlut)
+ dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta);
+
+ /* ISHARP_NLDELTA_SOFT_CLIP */
+ REG_SET_6(ISHARP_NLDELTA_SOFT_CLIP, 0,
+ ISHARP_NLDELTA_SCLIP_EN_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_p,
+ ISHARP_NLDELTA_SCLIP_PIVOT_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_p,
+ ISHARP_NLDELTA_SCLIP_SLOPE_P, scl_data->dscl_prog_data.isharp_nldelta_sclip.slope_p,
+ ISHARP_NLDELTA_SCLIP_EN_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.enable_n,
+ ISHARP_NLDELTA_SCLIP_PIVOT_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.pivot_n,
ISHARP_NLDELTA_SCLIP_SLOPE_N, scl_data->dscl_prog_data.isharp_nldelta_sclip.slope_n);
/* Blur and Scale Coefficients - SCL_COEF_RAM_TAP_SELECT */
@@ -1086,12 +1035,14 @@ static void dpp401_dscl_program_isharp(struct dpp *dpp_base,
dpp, scl_data->taps.v_taps,
SCL_COEF_VERTICAL_BLUR_SCALE,
scl_data->dscl_prog_data.filter_blur_scale_v);
+ *bs_coeffs_updated = true;
}
if (scl_data->dscl_prog_data.filter_blur_scale_h) {
dpp401_dscl_set_scaler_filter(
dpp, scl_data->taps.h_taps,
SCL_COEF_HORIZONTAL_BLUR_SCALE,
scl_data->dscl_prog_data.filter_blur_scale_h);
+ *bs_coeffs_updated = true;
}
}
PERF_TRACE();
@@ -1122,12 +1073,29 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
dpp_base, scl_data, dpp_base->ctx->dc->debug.always_scale);
bool ycbcr = scl_data->format >= PIXEL_FORMAT_VIDEO_BEGIN
&& scl_data->format <= PIXEL_FORMAT_VIDEO_END;
+ bool program_isharp_1dlut = false;
+ bool bs_coeffs_updated = false;
+
if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0)
return;
PERF_TRACE();
+ /* If only sharpness has changed, then only update 1dlut, then return */
+ if (scl_data->dscl_prog_data.isharp_en &&
+ (dpp->scl_data.dscl_prog_data.sharpness_level
+ != scl_data->dscl_prog_data.sharpness_level)) {
+ /* ISHARP_DELTA_LUT */
+ dpp401_dscl_set_isharp_filter(dpp, scl_data->dscl_prog_data.isharp_delta);
+ dpp->scl_data.dscl_prog_data.sharpness_level = scl_data->dscl_prog_data.sharpness_level;
+ dpp->scl_data.dscl_prog_data.isharp_delta = scl_data->dscl_prog_data.isharp_delta;
+
+ if (memcmp(&dpp->scl_data, scl_data, sizeof(*scl_data)) == 0)
+ return;
+ program_isharp_1dlut = true;
+ }
+
dpp->scl_data = *scl_data;
if ((dpp->base.ctx->dc->config.use_spl) && (!dpp->base.ctx->dc->debug.disable_spl)) {
@@ -1181,7 +1149,7 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
if (dscl_mode == DSCL_MODE_SCALING_444_BYPASS) {
if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_disable_easf(dpp_base, scl_data);
- dpp401_dscl_program_isharp(dpp_base, scl_data);
+ dpp401_dscl_program_isharp(dpp_base, scl_data, program_isharp_1dlut, &bs_coeffs_updated);
return;
}
@@ -1208,12 +1176,18 @@ void dpp401_dscl_set_scaler_manual_scale(struct dpp *dpp_base,
SCL_V_NUM_TAPS_C, v_num_taps_c,
SCL_H_NUM_TAPS_C, h_num_taps_c);
- dpp401_dscl_set_scl_filter(dpp, scl_data, ycbcr);
+ /* ISharp configuration
+ * - B&S coeffs are written to same coeff RAM as WB scaler coeffs
+ * - coeff RAM toggle is in EASF programming
+ * - if we are only programming B&S coeffs, then need to reprogram
+ * WB scaler coeffs and toggle coeff RAM together
+ */
+ //if (dpp->base.ctx->dc->config.prefer_easf)
+ dpp401_dscl_program_isharp(dpp_base, scl_data, program_isharp_1dlut, &bs_coeffs_updated);
+
+ dpp401_dscl_set_scl_filter(dpp, scl_data, ycbcr, bs_coeffs_updated);
/* Edge adaptive scaler function configuration */
if (dpp->base.ctx->dc->config.prefer_easf)
dpp401_dscl_program_easf(dpp_base, scl_data);
- /* isharp configuration */
- //if (dpp->base.ctx->dc->config.prefer_easf)
- dpp401_dscl_program_isharp(dpp_base, scl_data);
PERF_TRACE();
}
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
index fbbb20b9dbee..a4c6decee0f8 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
@@ -147,37 +147,6 @@ void dcn35_init_hw(struct dc *dc)
hws->funcs.bios_golden_init(dc);
}
- if (!dc->debug.disable_clock_gate) {
- REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
- REG_WRITE(DCCG_GATE_DISABLE_CNTL2, 0);
-
- /* Disable gating for PHYASYMCLK. This will be enabled in dccg if needed */
- REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_ROOT_GATE_DISABLE, 1,
- PHYBSYMCLK_ROOT_GATE_DISABLE, 1,
- PHYCSYMCLK_ROOT_GATE_DISABLE, 1,
- PHYDSYMCLK_ROOT_GATE_DISABLE, 1,
- PHYESYMCLK_ROOT_GATE_DISABLE, 1);
-
- REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL4,
- DPIASYMCLK0_GATE_DISABLE, 0,
- DPIASYMCLK1_GATE_DISABLE, 0,
- DPIASYMCLK2_GATE_DISABLE, 0,
- DPIASYMCLK3_GATE_DISABLE, 0);
-
- REG_WRITE(DCCG_GATE_DISABLE_CNTL5, 0xFFFFFFFF);
- REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5,
- DTBCLK_P0_GATE_DISABLE, 0,
- DTBCLK_P1_GATE_DISABLE, 0,
- DTBCLK_P2_GATE_DISABLE, 0,
- DTBCLK_P3_GATE_DISABLE, 0);
- REG_UPDATE_4(DCCG_GATE_DISABLE_CNTL5,
- DPSTREAMCLK0_GATE_DISABLE, 0,
- DPSTREAMCLK1_GATE_DISABLE, 0,
- DPSTREAMCLK2_GATE_DISABLE, 0,
- DPSTREAMCLK3_GATE_DISABLE, 0);
-
- }
-
// Initialize the dccg
if (res_pool->dccg->funcs->dccg_init)
res_pool->dccg->funcs->dccg_init(res_pool->dccg);
@@ -305,20 +274,6 @@ void dcn35_init_hw(struct dc *dc)
if (!dc->debug.disable_clock_gate) {
/* enable all DCN clock gating */
- REG_WRITE(DCCG_GATE_DISABLE_CNTL, 0);
-
- REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_FE_GATE_DISABLE, 0,
- SYMCLKB_FE_GATE_DISABLE, 0,
- SYMCLKC_FE_GATE_DISABLE, 0,
- SYMCLKD_FE_GATE_DISABLE, 0,
- SYMCLKE_FE_GATE_DISABLE, 0);
- REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, HDMICHARCLK0_GATE_DISABLE, 0);
- REG_UPDATE_5(DCCG_GATE_DISABLE_CNTL2, SYMCLKA_GATE_DISABLE, 0,
- SYMCLKB_GATE_DISABLE, 0,
- SYMCLKC_GATE_DISABLE, 0,
- SYMCLKD_GATE_DISABLE, 0,
- SYMCLKE_GATE_DISABLE, 0);
-
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
index 02e63b95c36d..9d56fbdcd06a 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn401/dcn401_resource.c
@@ -76,6 +76,9 @@
#include "dml2/dml2_wrapper.h"
+#include "spl/dc_spl_scl_easf_filters.h"
+#include "spl/dc_spl_isharp_filters.h"
+
#define DC_LOGGER_INIT(logger)
enum dcn401_clk_src_array_id {
@@ -2126,6 +2129,10 @@ static bool dcn401_resource_construct(
dc->dml2_options.max_segments_per_hubp = 20;
dc->dml2_options.det_segment_size = DCN4_01_CRB_SEGMENT_SIZE_KB;
+ /* SPL */
+ spl_init_easf_filter_coeffs();
+ spl_init_blur_scale_coeffs();
+
return true;
create_fail:
diff --git a/drivers/gpu/drm/amd/display/dc/spl/Makefile b/drivers/gpu/drm/amd/display/dc/spl/Makefile
index f8df85ea4d32..5edf3c6cf3e2 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/spl/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'spl' sub-component of DAL.
# It provides the scaling library interface.
-SPL = dc_spl.o dc_spl_scl_filters.o dc_spl_scl_filters_old.o dc_spl_isharp_filters.o
+SPL = dc_spl.o dc_spl_scl_filters.o dc_spl_scl_easf_filters.o dc_spl_isharp_filters.o dc_spl_filters.o spl_fixpt31_32.o spl_custom_float.o
AMD_DAL_SPL = $(addprefix $(AMDDALPATH)/dc/spl/,$(SPL))
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
index 9eccdb38bed4..15f7eda903e6 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl.c
@@ -4,9 +4,11 @@
#include "dc_spl.h"
#include "dc_spl_scl_filters.h"
+#include "dc_spl_scl_easf_filters.h"
#include "dc_spl_isharp_filters.h"
+#include "spl_debug.h"
-#define IDENTITY_RATIO(ratio) (dc_fixpt_u2d19(ratio) == (1 << 19))
+#define IDENTITY_RATIO(ratio) (spl_fixpt_u2d19(ratio) == (1 << 19))
#define MIN_VIEWPORT_SIZE 12
static struct spl_rect intersect_rec(const struct spl_rect *r0, const struct spl_rect *r1)
@@ -107,26 +109,26 @@ static struct spl_rect calculate_plane_rec_in_timing_active(
const struct spl_rect *stream_src = &spl_in->basic_out.src_rect;
const struct spl_rect *stream_dst = &spl_in->basic_out.dst_rect;
struct spl_rect rec_out = {0};
- struct fixed31_32 temp;
+ struct spl_fixed31_32 temp;
- temp = dc_fixpt_from_fraction(rec_in->x * (long long)stream_dst->width,
+ temp = spl_fixpt_from_fraction(rec_in->x * (long long)stream_dst->width,
stream_src->width);
- rec_out.x = stream_dst->x + dc_fixpt_round(temp);
+ rec_out.x = stream_dst->x + spl_fixpt_round(temp);
- temp = dc_fixpt_from_fraction(
+ temp = spl_fixpt_from_fraction(
(rec_in->x + rec_in->width) * (long long)stream_dst->width,
stream_src->width);
- rec_out.width = stream_dst->x + dc_fixpt_round(temp) - rec_out.x;
+ rec_out.width = stream_dst->x + spl_fixpt_round(temp) - rec_out.x;
- temp = dc_fixpt_from_fraction(rec_in->y * (long long)stream_dst->height,
+ temp = spl_fixpt_from_fraction(rec_in->y * (long long)stream_dst->height,
stream_src->height);
- rec_out.y = stream_dst->y + dc_fixpt_round(temp);
+ rec_out.y = stream_dst->y + spl_fixpt_round(temp);
- temp = dc_fixpt_from_fraction(
+ temp = spl_fixpt_from_fraction(
(rec_in->y + rec_in->height) * (long long)stream_dst->height,
stream_src->height);
- rec_out.height = stream_dst->y + dc_fixpt_round(temp) - rec_out.y;
+ rec_out.height = stream_dst->y + spl_fixpt_round(temp) - rec_out.y;
return rec_out;
}
@@ -144,7 +146,7 @@ static struct spl_rect calculate_mpc_slice_in_timing_active(
mpc_rec.x = plane_clip_rec->x + mpc_rec.width * mpc_slice_idx;
mpc_rec.height = plane_clip_rec->height;
mpc_rec.y = plane_clip_rec->y;
- ASSERT(mpc_slice_count == 1 ||
+ SPL_ASSERT(mpc_slice_count == 1 ||
spl_in->basic_out.view_format != SPL_VIEW_3D_SIDE_BY_SIDE ||
mpc_rec.width % 2 == 0);
@@ -157,7 +159,7 @@ static struct spl_rect calculate_mpc_slice_in_timing_active(
}
if (spl_in->basic_out.view_format == SPL_VIEW_3D_TOP_AND_BOTTOM) {
- ASSERT(mpc_rec.height % 2 == 0);
+ SPL_ASSERT(mpc_rec.height % 2 == 0);
mpc_rec.height /= 2;
}
return mpc_rec;
@@ -197,7 +199,7 @@ static struct spl_rect calculate_odm_slice_in_timing_active(struct spl_in *spl_i
return spl_in->basic_out.odm_slice_rect;
}
-static void spl_calculate_recout(struct spl_in *spl_in, struct spl_out *spl_out)
+static void spl_calculate_recout(struct spl_in *spl_in, struct spl_scratch *spl_scratch, struct spl_out *spl_out)
{
/*
* A plane clip represents the desired plane size and position in Stream
@@ -340,20 +342,23 @@ static void spl_calculate_recout(struct spl_in *spl_in, struct spl_out *spl_out)
/* shift the overlapping area so it is with respect to current
* ODM slice's position
*/
- spl_out->scl_data.recout = shift_rec(
+ spl_scratch->scl_data.recout = shift_rec(
&overlapping_area,
-odm_slice.x, -odm_slice.y);
- spl_out->scl_data.recout.height -=
+ spl_scratch->scl_data.recout.height -=
spl_in->debug.visual_confirm_base_offset;
- spl_out->scl_data.recout.height -=
+ spl_scratch->scl_data.recout.height -=
spl_in->debug.visual_confirm_dpp_offset;
} else
/* if there is no overlap, zero recout */
- memset(&spl_out->scl_data.recout, 0,
+ memset(&spl_scratch->scl_data.recout, 0,
sizeof(struct spl_rect));
}
+
/* Calculate scaling ratios */
-static void spl_calculate_scaling_ratios(struct spl_in *spl_in, struct spl_out *spl_out)
+static void spl_calculate_scaling_ratios(struct spl_in *spl_in,
+ struct spl_scratch *spl_scratch,
+ struct spl_out *spl_out)
{
const int in_w = spl_in->basic_out.src_rect.width;
const int in_h = spl_in->basic_out.src_rect.height;
@@ -364,59 +369,75 @@ static void spl_calculate_scaling_ratios(struct spl_in *spl_in, struct spl_out *
/*Swap surf_src height and width since scaling ratios are in recout rotation*/
if (spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_90 ||
spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_270)
- swap(surf_src.height, surf_src.width);
+ spl_swap(surf_src.height, surf_src.width);
- spl_out->scl_data.ratios.horz = dc_fixpt_from_fraction(
+ spl_scratch->scl_data.ratios.horz = spl_fixpt_from_fraction(
surf_src.width,
spl_in->basic_in.dst_rect.width);
- spl_out->scl_data.ratios.vert = dc_fixpt_from_fraction(
+ spl_scratch->scl_data.ratios.vert = spl_fixpt_from_fraction(
surf_src.height,
spl_in->basic_in.dst_rect.height);
if (spl_in->basic_out.view_format == SPL_VIEW_3D_SIDE_BY_SIDE)
- spl_out->scl_data.ratios.horz.value *= 2;
+ spl_scratch->scl_data.ratios.horz.value *= 2;
else if (spl_in->basic_out.view_format == SPL_VIEW_3D_TOP_AND_BOTTOM)
- spl_out->scl_data.ratios.vert.value *= 2;
+ spl_scratch->scl_data.ratios.vert.value *= 2;
- spl_out->scl_data.ratios.vert.value = div64_s64(
- spl_out->scl_data.ratios.vert.value * in_h, out_h);
- spl_out->scl_data.ratios.horz.value = div64_s64(
- spl_out->scl_data.ratios.horz.value * in_w, out_w);
+ spl_scratch->scl_data.ratios.vert.value = spl_div64_s64(
+ spl_scratch->scl_data.ratios.vert.value * in_h, out_h);
+ spl_scratch->scl_data.ratios.horz.value = spl_div64_s64(
+ spl_scratch->scl_data.ratios.horz.value * in_w, out_w);
- spl_out->scl_data.ratios.horz_c = spl_out->scl_data.ratios.horz;
- spl_out->scl_data.ratios.vert_c = spl_out->scl_data.ratios.vert;
+ spl_scratch->scl_data.ratios.horz_c = spl_scratch->scl_data.ratios.horz;
+ spl_scratch->scl_data.ratios.vert_c = spl_scratch->scl_data.ratios.vert;
if (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8
|| spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10) {
- spl_out->scl_data.ratios.horz_c.value /= 2;
- spl_out->scl_data.ratios.vert_c.value /= 2;
+ spl_scratch->scl_data.ratios.horz_c.value /= 2;
+ spl_scratch->scl_data.ratios.vert_c.value /= 2;
}
- spl_out->scl_data.ratios.horz = dc_fixpt_truncate(
- spl_out->scl_data.ratios.horz, 19);
- spl_out->scl_data.ratios.vert = dc_fixpt_truncate(
- spl_out->scl_data.ratios.vert, 19);
- spl_out->scl_data.ratios.horz_c = dc_fixpt_truncate(
- spl_out->scl_data.ratios.horz_c, 19);
- spl_out->scl_data.ratios.vert_c = dc_fixpt_truncate(
- spl_out->scl_data.ratios.vert_c, 19);
+ spl_scratch->scl_data.ratios.horz = spl_fixpt_truncate(
+ spl_scratch->scl_data.ratios.horz, 19);
+ spl_scratch->scl_data.ratios.vert = spl_fixpt_truncate(
+ spl_scratch->scl_data.ratios.vert, 19);
+ spl_scratch->scl_data.ratios.horz_c = spl_fixpt_truncate(
+ spl_scratch->scl_data.ratios.horz_c, 19);
+ spl_scratch->scl_data.ratios.vert_c = spl_fixpt_truncate(
+ spl_scratch->scl_data.ratios.vert_c, 19);
+
+ /*
+ * Coefficient table and some registers are different based on ratio
+ * that is output/input. Currently we calculate input/output
+ * Store 1/ratio in recip_ratio for those lookups
+ */
+ spl_scratch->scl_data.recip_ratios.horz = spl_fixpt_recip(
+ spl_scratch->scl_data.ratios.horz);
+ spl_scratch->scl_data.recip_ratios.vert = spl_fixpt_recip(
+ spl_scratch->scl_data.ratios.vert);
+ spl_scratch->scl_data.recip_ratios.horz_c = spl_fixpt_recip(
+ spl_scratch->scl_data.ratios.horz_c);
+ spl_scratch->scl_data.recip_ratios.vert_c = spl_fixpt_recip(
+ spl_scratch->scl_data.ratios.vert_c);
}
+
/* Calculate Viewport size */
-static void spl_calculate_viewport_size(struct spl_in *spl_in, struct spl_out *spl_out)
+static void spl_calculate_viewport_size(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
{
- spl_out->scl_data.viewport.width = dc_fixpt_ceil(dc_fixpt_mul_int(spl_out->scl_data.ratios.horz,
- spl_out->scl_data.recout.width));
- spl_out->scl_data.viewport.height = dc_fixpt_ceil(dc_fixpt_mul_int(spl_out->scl_data.ratios.vert,
- spl_out->scl_data.recout.height));
- spl_out->scl_data.viewport_c.width = dc_fixpt_ceil(dc_fixpt_mul_int(spl_out->scl_data.ratios.horz_c,
- spl_out->scl_data.recout.width));
- spl_out->scl_data.viewport_c.height = dc_fixpt_ceil(dc_fixpt_mul_int(spl_out->scl_data.ratios.vert_c,
- spl_out->scl_data.recout.height));
+ spl_scratch->scl_data.viewport.width = spl_fixpt_ceil(spl_fixpt_mul_int(spl_scratch->scl_data.ratios.horz,
+ spl_scratch->scl_data.recout.width));
+ spl_scratch->scl_data.viewport.height = spl_fixpt_ceil(spl_fixpt_mul_int(spl_scratch->scl_data.ratios.vert,
+ spl_scratch->scl_data.recout.height));
+ spl_scratch->scl_data.viewport_c.width = spl_fixpt_ceil(spl_fixpt_mul_int(spl_scratch->scl_data.ratios.horz_c,
+ spl_scratch->scl_data.recout.width));
+ spl_scratch->scl_data.viewport_c.height = spl_fixpt_ceil(spl_fixpt_mul_int(spl_scratch->scl_data.ratios.vert_c,
+ spl_scratch->scl_data.recout.height));
if (spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_90 ||
spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_270) {
- swap(spl_out->scl_data.viewport.width, spl_out->scl_data.viewport.height);
- swap(spl_out->scl_data.viewport_c.width, spl_out->scl_data.viewport_c.height);
+ spl_swap(spl_scratch->scl_data.viewport.width, spl_scratch->scl_data.viewport.height);
+ spl_swap(spl_scratch->scl_data.viewport_c.width, spl_scratch->scl_data.viewport_c.height);
}
}
+
static void spl_get_vp_scan_direction(enum spl_rotation_angle rotation,
bool horizontal_mirror,
bool *orthogonal_rotation,
@@ -440,6 +461,7 @@ static void spl_get_vp_scan_direction(enum spl_rotation_angle rotation,
if (horizontal_mirror)
*flip_horz_scan_dir = !*flip_horz_scan_dir;
}
+
/*
* We completely calculate vp offset, size and inits here based entirely on scaling
* ratios and recout for pixel perfect pipe combine.
@@ -449,13 +471,13 @@ static void spl_calculate_init_and_vp(bool flip_scan_dir,
int recout_size,
int src_size,
int taps,
- struct fixed31_32 ratio,
- struct fixed31_32 init_adj,
- struct fixed31_32 *init,
+ struct spl_fixed31_32 ratio,
+ struct spl_fixed31_32 init_adj,
+ struct spl_fixed31_32 *init,
int *vp_offset,
int *vp_size)
{
- struct fixed31_32 temp;
+ struct spl_fixed31_32 temp;
int int_part;
/*
@@ -468,33 +490,33 @@ static void spl_calculate_init_and_vp(bool flip_scan_dir,
* init_bot = init + scaling_ratio
* to get pixel perfect combine add the fraction from calculating vp offset
*/
- temp = dc_fixpt_mul_int(ratio, recout_offset_within_recout_full);
- *vp_offset = dc_fixpt_floor(temp);
+ temp = spl_fixpt_mul_int(ratio, recout_offset_within_recout_full);
+ *vp_offset = spl_fixpt_floor(temp);
temp.value &= 0xffffffff;
- *init = dc_fixpt_add(dc_fixpt_div_int(dc_fixpt_add_int(ratio, taps + 1), 2), temp);
- *init = dc_fixpt_add(*init, init_adj);
- *init = dc_fixpt_truncate(*init, 19);
+ *init = spl_fixpt_add(spl_fixpt_div_int(spl_fixpt_add_int(ratio, taps + 1), 2), temp);
+ *init = spl_fixpt_add(*init, init_adj);
+ *init = spl_fixpt_truncate(*init, 19);
/*
* If viewport has non 0 offset and there are more taps than covered by init then
* we should decrease the offset and increase init so we are never sampling
* outside of viewport.
*/
- int_part = dc_fixpt_floor(*init);
+ int_part = spl_fixpt_floor(*init);
if (int_part < taps) {
int_part = taps - int_part;
if (int_part > *vp_offset)
int_part = *vp_offset;
*vp_offset -= int_part;
- *init = dc_fixpt_add_int(*init, int_part);
+ *init = spl_fixpt_add_int(*init, int_part);
}
/*
* If taps are sampling outside of viewport at end of recout and there are more pixels
* available in the surface we should increase the viewport size, regardless set vp to
* only what is used.
*/
- temp = dc_fixpt_add(*init, dc_fixpt_mul_int(ratio, recout_size - 1));
- *vp_size = dc_fixpt_floor(temp);
+ temp = spl_fixpt_add(*init, spl_fixpt_mul_int(ratio, recout_size - 1));
+ *vp_size = spl_fixpt_floor(temp);
if (*vp_size + *vp_offset > src_size)
*vp_size = src_size - *vp_offset;
@@ -509,15 +531,24 @@ static void spl_calculate_init_and_vp(bool flip_scan_dir,
static bool spl_is_yuv420(enum spl_pixel_format format)
{
- if ((format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN) &&
- (format <= SPL_PIXEL_FORMAT_VIDEO_END))
+ if ((format >= SPL_PIXEL_FORMAT_420BPP8) &&
+ (format <= SPL_PIXEL_FORMAT_420BPP10))
+ return true;
+
+ return false;
+}
+
+static bool spl_is_rgb8(enum spl_pixel_format format)
+{
+ if (format == SPL_PIXEL_FORMAT_ARGB8888)
return true;
return false;
}
/*Calculate inits and viewport */
-static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_out *spl_out)
+static void spl_calculate_inits_and_viewports(struct spl_in *spl_in,
+ struct spl_scratch *spl_scratch)
{
struct spl_rect src = spl_in->basic_in.src_rect;
struct spl_rect recout_dst_in_active_timing;
@@ -528,11 +559,11 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_
int vpc_div = (spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8
|| spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP10) ? 2 : 1;
bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
- struct fixed31_32 init_adj_h = dc_fixpt_zero;
- struct fixed31_32 init_adj_v = dc_fixpt_zero;
+ struct spl_fixed31_32 init_adj_h = spl_fixpt_zero;
+ struct spl_fixed31_32 init_adj_v = spl_fixpt_zero;
recout_clip_in_active_timing = shift_rec(
- &spl_out->scl_data.recout, odm_slice.x, odm_slice.y);
+ &spl_scratch->scl_data.recout, odm_slice.x, odm_slice.y);
recout_dst_in_active_timing = calculate_plane_rec_in_timing_active(
spl_in, &spl_in->basic_in.dst_rect);
overlap_in_active_timing = intersect_rec(&recout_clip_in_active_timing,
@@ -555,8 +586,8 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_
&flip_horz_scan_dir);
if (orthogonal_rotation) {
- swap(src.width, src.height);
- swap(flip_vert_scan_dir, flip_horz_scan_dir);
+ spl_swap(src.width, src.height);
+ spl_swap(flip_vert_scan_dir, flip_horz_scan_dir);
}
if (spl_is_yuv420(spl_in->basic_in.format)) {
@@ -568,17 +599,17 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_
switch (spl_in->basic_in.cositing) {
case CHROMA_COSITING_LEFT:
- init_adj_h = dc_fixpt_zero;
- init_adj_v = dc_fixpt_from_fraction(sign, 2);
+ init_adj_h = spl_fixpt_zero;
+ init_adj_v = spl_fixpt_from_fraction(sign, 4);
break;
case CHROMA_COSITING_NONE:
- init_adj_h = dc_fixpt_from_fraction(sign, 2);
- init_adj_v = dc_fixpt_from_fraction(sign, 2);
+ init_adj_h = spl_fixpt_from_fraction(sign, 4);
+ init_adj_v = spl_fixpt_from_fraction(sign, 4);
break;
case CHROMA_COSITING_TOPLEFT:
default:
- init_adj_h = dc_fixpt_zero;
- init_adj_v = dc_fixpt_zero;
+ init_adj_h = spl_fixpt_zero;
+ init_adj_v = spl_fixpt_zero;
break;
}
}
@@ -586,59 +617,60 @@ static void spl_calculate_inits_and_viewports(struct spl_in *spl_in, struct spl_
spl_calculate_init_and_vp(
flip_horz_scan_dir,
recout_clip_in_recout_dst.x,
- spl_out->scl_data.recout.width,
+ spl_scratch->scl_data.recout.width,
src.width,
- spl_out->scl_data.taps.h_taps,
- spl_out->scl_data.ratios.horz,
- dc_fixpt_zero,
- &spl_out->scl_data.inits.h,
- &spl_out->scl_data.viewport.x,
- &spl_out->scl_data.viewport.width);
+ spl_scratch->scl_data.taps.h_taps,
+ spl_scratch->scl_data.ratios.horz,
+ spl_fixpt_zero,
+ &spl_scratch->scl_data.inits.h,
+ &spl_scratch->scl_data.viewport.x,
+ &spl_scratch->scl_data.viewport.width);
spl_calculate_init_and_vp(
flip_horz_scan_dir,
recout_clip_in_recout_dst.x,
- spl_out->scl_data.recout.width,
+ spl_scratch->scl_data.recout.width,
src.width / vpc_div,
- spl_out->scl_data.taps.h_taps_c,
- spl_out->scl_data.ratios.horz_c,
+ spl_scratch->scl_data.taps.h_taps_c,
+ spl_scratch->scl_data.ratios.horz_c,
init_adj_h,
- &spl_out->scl_data.inits.h_c,
- &spl_out->scl_data.viewport_c.x,
- &spl_out->scl_data.viewport_c.width);
+ &spl_scratch->scl_data.inits.h_c,
+ &spl_scratch->scl_data.viewport_c.x,
+ &spl_scratch->scl_data.viewport_c.width);
spl_calculate_init_and_vp(
flip_vert_scan_dir,
recout_clip_in_recout_dst.y,
- spl_out->scl_data.recout.height,
+ spl_scratch->scl_data.recout.height,
src.height,
- spl_out->scl_data.taps.v_taps,
- spl_out->scl_data.ratios.vert,
- dc_fixpt_zero,
- &spl_out->scl_data.inits.v,
- &spl_out->scl_data.viewport.y,
- &spl_out->scl_data.viewport.height);
+ spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.ratios.vert,
+ spl_fixpt_zero,
+ &spl_scratch->scl_data.inits.v,
+ &spl_scratch->scl_data.viewport.y,
+ &spl_scratch->scl_data.viewport.height);
spl_calculate_init_and_vp(
flip_vert_scan_dir,
recout_clip_in_recout_dst.y,
- spl_out->scl_data.recout.height,
+ spl_scratch->scl_data.recout.height,
src.height / vpc_div,
- spl_out->scl_data.taps.v_taps_c,
- spl_out->scl_data.ratios.vert_c,
+ spl_scratch->scl_data.taps.v_taps_c,
+ spl_scratch->scl_data.ratios.vert_c,
init_adj_v,
- &spl_out->scl_data.inits.v_c,
- &spl_out->scl_data.viewport_c.y,
- &spl_out->scl_data.viewport_c.height);
+ &spl_scratch->scl_data.inits.v_c,
+ &spl_scratch->scl_data.viewport_c.y,
+ &spl_scratch->scl_data.viewport_c.height);
if (orthogonal_rotation) {
- swap(spl_out->scl_data.viewport.x, spl_out->scl_data.viewport.y);
- swap(spl_out->scl_data.viewport.width, spl_out->scl_data.viewport.height);
- swap(spl_out->scl_data.viewport_c.x, spl_out->scl_data.viewport_c.y);
- swap(spl_out->scl_data.viewport_c.width, spl_out->scl_data.viewport_c.height);
+ spl_swap(spl_scratch->scl_data.viewport.x, spl_scratch->scl_data.viewport.y);
+ spl_swap(spl_scratch->scl_data.viewport.width, spl_scratch->scl_data.viewport.height);
+ spl_swap(spl_scratch->scl_data.viewport_c.x, spl_scratch->scl_data.viewport_c.y);
+ spl_swap(spl_scratch->scl_data.viewport_c.width, spl_scratch->scl_data.viewport_c.height);
}
- spl_out->scl_data.viewport.x += src.x;
- spl_out->scl_data.viewport.y += src.y;
- ASSERT(src.x % vpc_div == 0 && src.y % vpc_div == 0);
- spl_out->scl_data.viewport_c.x += src.x / vpc_div;
- spl_out->scl_data.viewport_c.y += src.y / vpc_div;
+ spl_scratch->scl_data.viewport.x += src.x;
+ spl_scratch->scl_data.viewport.y += src.y;
+ SPL_ASSERT(src.x % vpc_div == 0 && src.y % vpc_div == 0);
+ spl_scratch->scl_data.viewport_c.x += src.x / vpc_div;
+ spl_scratch->scl_data.viewport_c.y += src.y / vpc_div;
}
+
static void spl_handle_3d_recout(struct spl_in *spl_in, struct spl_rect *recout)
{
/*
@@ -647,7 +679,7 @@ static void spl_handle_3d_recout(struct spl_in *spl_in, struct spl_rect *recout)
* This may break with rotation, good thing we aren't mixing hw rotation and 3d
*/
if (spl_in->basic_in.mpc_combine_v) {
- ASSERT(spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_0 ||
+ SPL_ASSERT(spl_in->basic_in.rotation == SPL_ROTATION_ANGLE_0 ||
(spl_in->basic_out.view_format != SPL_VIEW_3D_TOP_AND_BOTTOM &&
spl_in->basic_out.view_format != SPL_VIEW_3D_SIDE_BY_SIDE));
if (spl_in->basic_out.view_format == SPL_VIEW_3D_TOP_AND_BOTTOM)
@@ -665,6 +697,7 @@ static void spl_clamp_viewport(struct spl_rect *viewport)
if (viewport->width < MIN_VIEWPORT_SIZE)
viewport->width = MIN_VIEWPORT_SIZE;
}
+
static bool spl_dscl_is_420_format(enum spl_pixel_format format)
{
if (format == SPL_PIXEL_FORMAT_420BPP8 ||
@@ -673,6 +706,7 @@ static bool spl_dscl_is_420_format(enum spl_pixel_format format)
else
return false;
}
+
static bool spl_dscl_is_video_format(enum spl_pixel_format format)
{
if (format >= SPL_PIXEL_FORMAT_VIDEO_BEGIN
@@ -681,17 +715,21 @@ static bool spl_dscl_is_video_format(enum spl_pixel_format format)
else
return false;
}
+
static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
- const struct spl_scaler_data *data)
+ const struct spl_scaler_data *data,
+ bool enable_isharp, bool enable_easf)
{
- const long long one = dc_fixpt_one.value;
+ const long long one = spl_fixpt_one.value;
enum spl_pixel_format pixel_format = spl_in->basic_in.format;
+ /* Bypass if ratio is 1:1 with no ISHARP or force scale on */
if (data->ratios.horz.value == one
&& data->ratios.vert.value == one
&& data->ratios.horz_c.value == one
&& data->ratios.vert_c.value == one
- && !spl_in->basic_out.always_scale)
+ && !spl_in->basic_out.always_scale
+ && !enable_isharp)
return SCL_MODE_SCALING_444_BYPASS;
if (!spl_dscl_is_420_format(pixel_format)) {
@@ -700,69 +738,196 @@ static enum scl_mode spl_get_dscl_mode(const struct spl_in *spl_in,
else
return SCL_MODE_SCALING_444_RGB_ENABLE;
}
- if (data->ratios.horz.value == one && data->ratios.vert.value == one)
- return SCL_MODE_SCALING_420_LUMA_BYPASS;
- if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
- return SCL_MODE_SCALING_420_CHROMA_BYPASS;
+
+ /* Bypass YUV if at 1:1 with no ISHARP or if doing 2:1 YUV
+ * downscale without EASF
+ */
+ if ((!enable_isharp) && (!enable_easf)) {
+ if (data->ratios.horz.value == one && data->ratios.vert.value == one)
+ return SCL_MODE_SCALING_420_LUMA_BYPASS;
+ if (data->ratios.horz_c.value == one && data->ratios.vert_c.value == one)
+ return SCL_MODE_SCALING_420_CHROMA_BYPASS;
+ }
return SCL_MODE_SCALING_420_YCBCR_ENABLE;
}
+
+static bool spl_choose_lls_policy(enum spl_pixel_format format,
+ enum spl_transfer_func_type tf_type,
+ enum spl_transfer_func_predefined tf_predefined_type,
+ enum linear_light_scaling *lls_pref)
+{
+ if (spl_is_yuv420(format)) {
+ *lls_pref = LLS_PREF_NO;
+ if ((tf_type == SPL_TF_TYPE_PREDEFINED) ||
+ (tf_type == SPL_TF_TYPE_DISTRIBUTED_POINTS))
+ return true;
+ } else { /* RGB or YUV444 */
+ if ((tf_type == SPL_TF_TYPE_PREDEFINED) ||
+ (tf_type == SPL_TF_TYPE_BYPASS)) {
+ *lls_pref = LLS_PREF_YES;
+ return true;
+ }
+ }
+ *lls_pref = LLS_PREF_NO;
+ return false;
+}
+
+/* Enable EASF ?*/
+static bool enable_easf(struct spl_in *spl_in, struct spl_scratch *spl_scratch)
+{
+ int vratio = 0;
+ int hratio = 0;
+ bool skip_easf = false;
+ bool lls_enable_easf = true;
+
+ if (spl_in->disable_easf)
+ skip_easf = true;
+
+ vratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert);
+ hratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz);
+
+ /*
+ * No EASF support for downscaling > 2:1
+ * EASF support for upscaling or downscaling up to 2:1
+ */
+ if ((vratio > 2) || (hratio > 2))
+ skip_easf = true;
+
+ /*
+ * If lls_pref is LLS_PREF_DONT_CARE, then use pixel format and transfer
+ * function to determine whether to use LINEAR or NONLINEAR scaling
+ */
+ if (spl_in->lls_pref == LLS_PREF_DONT_CARE)
+ lls_enable_easf = spl_choose_lls_policy(spl_in->basic_in.format,
+ spl_in->basic_in.tf_type, spl_in->basic_in.tf_predefined_type,
+ &spl_in->lls_pref);
+
+ if (!lls_enable_easf)
+ skip_easf = true;
+
+ /* Check for linear scaling or EASF preferred */
+ if (spl_in->lls_pref != LLS_PREF_YES && !spl_in->prefer_easf)
+ skip_easf = true;
+
+ return skip_easf;
+}
+
+static bool spl_get_isharp_en(struct spl_in *spl_in,
+ struct spl_scratch *spl_scratch)
+{
+ bool enable_isharp = false;
+ int vratio = 0;
+ int hratio = 0;
+ struct spl_taps taps = spl_scratch->scl_data.taps;
+
+ /* Return if adaptive sharpness is disabled */
+ if (spl_in->adaptive_sharpness.enable == false)
+ return enable_isharp;
+
+ vratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert);
+ hratio = spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz);
+
+ /* No iSHARP support for downscaling */
+ if (vratio > 1 || hratio > 1)
+ return enable_isharp;
+
+ // Scaling is up to 1:1 (no scaling) or upscaling
+
+ /*
+ * Apply sharpness to all RGB surfaces and to
+ * NV12/P010 surfaces
+ */
+
+ /*
+ * Apply sharpness if supports horizontal taps 4,6 AND
+ * vertical taps 3, 4, 6
+ */
+ if ((taps.h_taps == 4 || taps.h_taps == 6) &&
+ (taps.v_taps == 3 || taps.v_taps == 4 || taps.v_taps == 6))
+ enable_isharp = true;
+
+ return enable_isharp;
+}
+
/* Calculate optimal number of taps */
static bool spl_get_optimal_number_of_taps(
- int max_downscale_src_width, struct spl_in *spl_in, struct spl_out *spl_out,
- const struct spl_taps *in_taps)
+ int max_downscale_src_width, struct spl_in *spl_in, struct spl_scratch *spl_scratch,
+ const struct spl_taps *in_taps, bool *enable_easf_v, bool *enable_easf_h,
+ bool *enable_isharp)
{
int num_part_y, num_part_c;
int max_taps_y, max_taps_c;
int min_taps_y, min_taps_c;
enum lb_memory_config lb_config;
+ bool skip_easf = false;
- if (spl_out->scl_data.viewport.width > spl_out->scl_data.h_active &&
+ if (spl_scratch->scl_data.viewport.width > spl_scratch->scl_data.h_active &&
max_downscale_src_width != 0 &&
- spl_out->scl_data.viewport.width > max_downscale_src_width)
+ spl_scratch->scl_data.viewport.width > max_downscale_src_width)
return false;
+
+ /* Check if we are using EASF or not */
+ skip_easf = enable_easf(spl_in, spl_scratch);
+
/*
* Set default taps if none are provided
* From programming guide: taps = min{ ceil(2*H_RATIO,1), 8} for downscaling
* taps = 4 for upscaling
*/
- if (in_taps->h_taps == 0) {
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.horz) > 1)
- spl_out->scl_data.taps.h_taps = min(2 * dc_fixpt_ceil(spl_out->scl_data.ratios.horz), 8);
- else
- spl_out->scl_data.taps.h_taps = 4;
- } else
- spl_out->scl_data.taps.h_taps = in_taps->h_taps;
- if (in_taps->v_taps == 0) {
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.vert) > 1)
- spl_out->scl_data.taps.v_taps = min(dc_fixpt_ceil(dc_fixpt_mul_int(
- spl_out->scl_data.ratios.vert, 2)), 8);
- else
- spl_out->scl_data.taps.v_taps = 4;
- } else
- spl_out->scl_data.taps.v_taps = in_taps->v_taps;
- if (in_taps->v_taps_c == 0) {
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.vert_c) > 1)
- spl_out->scl_data.taps.v_taps_c = min(dc_fixpt_ceil(dc_fixpt_mul_int(
- spl_out->scl_data.ratios.vert_c, 2)), 8);
- else
- spl_out->scl_data.taps.v_taps_c = 4;
- } else
- spl_out->scl_data.taps.v_taps_c = in_taps->v_taps_c;
- if (in_taps->h_taps_c == 0) {
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.horz_c) > 1)
- spl_out->scl_data.taps.h_taps_c = min(2 * dc_fixpt_ceil(spl_out->scl_data.ratios.horz_c), 8);
+ if (skip_easf) {
+ if (in_taps->h_taps == 0) {
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz) > 1)
+ spl_scratch->scl_data.taps.h_taps = spl_min(2 * spl_fixpt_ceil(
+ spl_scratch->scl_data.ratios.horz), 8);
+ else
+ spl_scratch->scl_data.taps.h_taps = 4;
+ } else
+ spl_scratch->scl_data.taps.h_taps = in_taps->h_taps;
+ if (in_taps->v_taps == 0) {
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 1)
+ spl_scratch->scl_data.taps.v_taps = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int(
+ spl_scratch->scl_data.ratios.vert, 2)), 8);
+ else
+ spl_scratch->scl_data.taps.v_taps = 4;
+ } else
+ spl_scratch->scl_data.taps.v_taps = in_taps->v_taps;
+ if (in_taps->v_taps_c == 0) {
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 1)
+ spl_scratch->scl_data.taps.v_taps_c = spl_min(spl_fixpt_ceil(spl_fixpt_mul_int(
+ spl_scratch->scl_data.ratios.vert_c, 2)), 8);
+ else
+ spl_scratch->scl_data.taps.v_taps_c = 4;
+ } else
+ spl_scratch->scl_data.taps.v_taps_c = in_taps->v_taps_c;
+ if (in_taps->h_taps_c == 0) {
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.horz_c) > 1)
+ spl_scratch->scl_data.taps.h_taps_c = spl_min(2 * spl_fixpt_ceil(
+ spl_scratch->scl_data.ratios.horz_c), 8);
+ else
+ spl_scratch->scl_data.taps.h_taps_c = 4;
+ } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
+ /* Only 1 and even h_taps_c are supported by hw */
+ spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c - 1;
else
- spl_out->scl_data.taps.h_taps_c = 4;
- } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
- /* Only 1 and even h_taps_c are supported by hw */
- spl_out->scl_data.taps.h_taps_c = in_taps->h_taps_c - 1;
- else
- spl_out->scl_data.taps.h_taps_c = in_taps->h_taps_c;
+ spl_scratch->scl_data.taps.h_taps_c = in_taps->h_taps_c;
+ } else {
+ if (spl_is_yuv420(spl_in->basic_in.format)) {
+ spl_scratch->scl_data.taps.h_taps = 6;
+ spl_scratch->scl_data.taps.v_taps = 6;
+ spl_scratch->scl_data.taps.h_taps_c = 4;
+ spl_scratch->scl_data.taps.v_taps_c = 4;
+ } else { /* RGB */
+ spl_scratch->scl_data.taps.h_taps = 6;
+ spl_scratch->scl_data.taps.v_taps = 6;
+ spl_scratch->scl_data.taps.h_taps_c = 6;
+ spl_scratch->scl_data.taps.v_taps_c = 6;
+ }
+ }
/*Ensure we can support the requested number of vtaps*/
- min_taps_y = dc_fixpt_ceil(spl_out->scl_data.ratios.vert);
- min_taps_c = dc_fixpt_ceil(spl_out->scl_data.ratios.vert_c);
+ min_taps_y = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert);
+ min_taps_c = spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c);
/* Use LB_MEMORY_CONFIG_3 for 4:2:0 */
if ((spl_in->basic_in.format == SPL_PIXEL_FORMAT_420BPP8)
@@ -771,16 +936,16 @@ static bool spl_get_optimal_number_of_taps(
else
lb_config = LB_MEMORY_CONFIG_0;
// Determine max vtap support by calculating how much line buffer can fit
- spl_in->funcs->spl_calc_lb_num_partitions(spl_in->basic_out.alpha_en, &spl_out->scl_data,
+ spl_in->funcs->spl_calc_lb_num_partitions(spl_in->basic_out.alpha_en, &spl_scratch->scl_data,
lb_config, &num_part_y, &num_part_c);
/* MAX_V_TAPS = MIN (NUM_LINES - MAX(CEILING(V_RATIO,1)-2, 0), 8) */
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.vert) > 2)
- max_taps_y = num_part_y - (dc_fixpt_ceil(spl_out->scl_data.ratios.vert) - 2);
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) > 2)
+ max_taps_y = num_part_y - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert) - 2);
else
max_taps_y = num_part_y;
- if (dc_fixpt_ceil(spl_out->scl_data.ratios.vert_c) > 2)
- max_taps_c = num_part_c - (dc_fixpt_ceil(spl_out->scl_data.ratios.vert_c) - 2);
+ if (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) > 2)
+ max_taps_c = num_part_c - (spl_fixpt_ceil(spl_scratch->scl_data.ratios.vert_c) - 2);
else
max_taps_c = num_part_c;
@@ -789,48 +954,108 @@ static bool spl_get_optimal_number_of_taps(
else if (max_taps_c < min_taps_c)
return false;
- if (spl_out->scl_data.taps.v_taps > max_taps_y)
- spl_out->scl_data.taps.v_taps = max_taps_y;
-
- if (spl_out->scl_data.taps.v_taps_c > max_taps_c)
- spl_out->scl_data.taps.v_taps_c = max_taps_c;
- if (spl_in->prefer_easf) {
- // EASF can be enabled only for taps 3,4,6
- // If optimal no of taps is 5, then set it to 4
- // If optimal no of taps is 7 or 8, then set it to 6
- if (spl_out->scl_data.taps.v_taps == 5)
- spl_out->scl_data.taps.v_taps = 4;
- if (spl_out->scl_data.taps.v_taps == 7 || spl_out->scl_data.taps.v_taps == 8)
- spl_out->scl_data.taps.v_taps = 6;
-
- if (spl_out->scl_data.taps.v_taps_c == 5)
- spl_out->scl_data.taps.v_taps_c = 4;
- if (spl_out->scl_data.taps.v_taps_c == 7 || spl_out->scl_data.taps.v_taps_c == 8)
- spl_out->scl_data.taps.v_taps_c = 6;
-
- if (spl_out->scl_data.taps.h_taps == 5)
- spl_out->scl_data.taps.h_taps = 4;
- if (spl_out->scl_data.taps.h_taps == 7 || spl_out->scl_data.taps.h_taps == 8)
- spl_out->scl_data.taps.h_taps = 6;
-
- if (spl_out->scl_data.taps.h_taps_c == 5)
- spl_out->scl_data.taps.h_taps_c = 4;
- if (spl_out->scl_data.taps.h_taps_c == 7 || spl_out->scl_data.taps.h_taps_c == 8)
- spl_out->scl_data.taps.h_taps_c = 6;
+ if (spl_scratch->scl_data.taps.v_taps > max_taps_y)
+ spl_scratch->scl_data.taps.v_taps = max_taps_y;
+ if (spl_scratch->scl_data.taps.v_taps_c > max_taps_c)
+ spl_scratch->scl_data.taps.v_taps_c = max_taps_c;
+
+ if (!skip_easf) {
+ /*
+ * RGB ( L + NL ) and Linear HDR support 6x6, 6x4, 6x3, 4x4, 4x3
+ * NL YUV420 only supports 6x6, 6x4 for Y and 4x4 for UV
+ *
+ * If LB does not support 3, 4, or 6 taps, then disable EASF_V
+ * and only enable EASF_H. So for RGB, support 6x2, 4x2
+ * and for NL YUV420, support 6x2 for Y and 4x2 for UV
+ *
+ * All other cases, have to disable EASF_V and EASF_H
+ *
+ * If optimal no of taps is 5, then set it to 4
+ * If optimal no of taps is 7 or 8, then fine since max tap is 6
+ *
+ */
+ if (spl_scratch->scl_data.taps.v_taps == 5)
+ spl_scratch->scl_data.taps.v_taps = 4;
+
+ if (spl_scratch->scl_data.taps.v_taps_c == 5)
+ spl_scratch->scl_data.taps.v_taps_c = 4;
+
+ if (spl_scratch->scl_data.taps.h_taps == 5)
+ spl_scratch->scl_data.taps.h_taps = 4;
+
+ if (spl_scratch->scl_data.taps.h_taps_c == 5)
+ spl_scratch->scl_data.taps.h_taps_c = 4;
+
+ if (spl_is_yuv420(spl_in->basic_in.format)) {
+ if ((spl_scratch->scl_data.taps.h_taps <= 4) ||
+ (spl_scratch->scl_data.taps.h_taps_c <= 3)) {
+ *enable_easf_v = false;
+ *enable_easf_h = false;
+ } else if ((spl_scratch->scl_data.taps.v_taps <= 3) ||
+ (spl_scratch->scl_data.taps.v_taps_c <= 3)) {
+ *enable_easf_v = false;
+ *enable_easf_h = true;
+ } else {
+ *enable_easf_v = true;
+ *enable_easf_h = true;
+ }
+ SPL_ASSERT((spl_scratch->scl_data.taps.v_taps > 1) &&
+ (spl_scratch->scl_data.taps.v_taps_c > 1));
+ } else { /* RGB */
+ if (spl_scratch->scl_data.taps.h_taps <= 3) {
+ *enable_easf_v = false;
+ *enable_easf_h = false;
+ } else if (spl_scratch->scl_data.taps.v_taps < 3) {
+ *enable_easf_v = false;
+ *enable_easf_h = true;
+ } else {
+ *enable_easf_v = true;
+ *enable_easf_h = true;
+ }
+ SPL_ASSERT(spl_scratch->scl_data.taps.v_taps > 1);
+ }
+ } else {
+ *enable_easf_v = false;
+ *enable_easf_h = false;
} // end of if prefer_easf
- if (!spl_in->basic_out.always_scale) {
- if (IDENTITY_RATIO(spl_out->scl_data.ratios.horz))
- spl_out->scl_data.taps.h_taps = 1;
- if (IDENTITY_RATIO(spl_out->scl_data.ratios.vert))
- spl_out->scl_data.taps.v_taps = 1;
- if (IDENTITY_RATIO(spl_out->scl_data.ratios.horz_c))
- spl_out->scl_data.taps.h_taps_c = 1;
- if (IDENTITY_RATIO(spl_out->scl_data.ratios.vert_c))
- spl_out->scl_data.taps.v_taps_c = 1;
+
+ /* Sharpener requires scaler to be enabled, including for 1:1
+ * Check if ISHARP can be enabled
+ * If ISHARP is not enabled, for 1:1, set taps to 1 and disable
+ * EASF
+ * For case of 2:1 YUV where chroma is 1:1, set taps to 1 if
+ * EASF is not enabled
+ */
+
+ *enable_isharp = spl_get_isharp_en(spl_in, spl_scratch);
+ if (!*enable_isharp && !spl_in->basic_out.always_scale) {
+ if ((IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz)) &&
+ (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert))) {
+ spl_scratch->scl_data.taps.h_taps = 1;
+ spl_scratch->scl_data.taps.v_taps = 1;
+
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c))
+ spl_scratch->scl_data.taps.h_taps_c = 1;
+
+ if (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c))
+ spl_scratch->scl_data.taps.v_taps_c = 1;
+
+ *enable_easf_v = false;
+ *enable_easf_h = false;
+ } else {
+ if ((!*enable_easf_h) &&
+ (IDENTITY_RATIO(spl_scratch->scl_data.ratios.horz_c)))
+ spl_scratch->scl_data.taps.h_taps_c = 1;
+
+ if ((!*enable_easf_v) &&
+ (IDENTITY_RATIO(spl_scratch->scl_data.ratios.vert_c)))
+ spl_scratch->scl_data.taps.v_taps_c = 1;
+ }
}
return true;
}
+
static void spl_set_black_color_data(enum spl_pixel_format format,
struct scl_black_color *scl_black_color)
{
@@ -848,38 +1073,38 @@ static void spl_set_black_color_data(enum spl_pixel_format format,
static void spl_set_manual_ratio_init_data(struct dscl_prog_data *dscl_prog_data,
const struct spl_scaler_data *scl_data)
{
- struct fixed31_32 bot;
+ struct spl_fixed31_32 bot;
- dscl_prog_data->ratios.h_scale_ratio = dc_fixpt_u3d19(scl_data->ratios.horz) << 5;
- dscl_prog_data->ratios.v_scale_ratio = dc_fixpt_u3d19(scl_data->ratios.vert) << 5;
- dscl_prog_data->ratios.h_scale_ratio_c = dc_fixpt_u3d19(scl_data->ratios.horz_c) << 5;
- dscl_prog_data->ratios.v_scale_ratio_c = dc_fixpt_u3d19(scl_data->ratios.vert_c) << 5;
+ dscl_prog_data->ratios.h_scale_ratio = spl_fixpt_u3d19(scl_data->ratios.horz) << 5;
+ dscl_prog_data->ratios.v_scale_ratio = spl_fixpt_u3d19(scl_data->ratios.vert) << 5;
+ dscl_prog_data->ratios.h_scale_ratio_c = spl_fixpt_u3d19(scl_data->ratios.horz_c) << 5;
+ dscl_prog_data->ratios.v_scale_ratio_c = spl_fixpt_u3d19(scl_data->ratios.vert_c) << 5;
/*
* 0.24 format for fraction, first five bits zeroed
*/
dscl_prog_data->init.h_filter_init_frac =
- dc_fixpt_u0d19(scl_data->inits.h) << 5;
+ spl_fixpt_u0d19(scl_data->inits.h) << 5;
dscl_prog_data->init.h_filter_init_int =
- dc_fixpt_floor(scl_data->inits.h);
+ spl_fixpt_floor(scl_data->inits.h);
dscl_prog_data->init.h_filter_init_frac_c =
- dc_fixpt_u0d19(scl_data->inits.h_c) << 5;
+ spl_fixpt_u0d19(scl_data->inits.h_c) << 5;
dscl_prog_data->init.h_filter_init_int_c =
- dc_fixpt_floor(scl_data->inits.h_c);
+ spl_fixpt_floor(scl_data->inits.h_c);
dscl_prog_data->init.v_filter_init_frac =
- dc_fixpt_u0d19(scl_data->inits.v) << 5;
+ spl_fixpt_u0d19(scl_data->inits.v) << 5;
dscl_prog_data->init.v_filter_init_int =
- dc_fixpt_floor(scl_data->inits.v);
+ spl_fixpt_floor(scl_data->inits.v);
dscl_prog_data->init.v_filter_init_frac_c =
- dc_fixpt_u0d19(scl_data->inits.v_c) << 5;
+ spl_fixpt_u0d19(scl_data->inits.v_c) << 5;
dscl_prog_data->init.v_filter_init_int_c =
- dc_fixpt_floor(scl_data->inits.v_c);
-
- bot = dc_fixpt_add(scl_data->inits.v, scl_data->ratios.vert);
- dscl_prog_data->init.v_filter_init_bot_frac = dc_fixpt_u0d19(bot) << 5;
- dscl_prog_data->init.v_filter_init_bot_int = dc_fixpt_floor(bot);
- bot = dc_fixpt_add(scl_data->inits.v_c, scl_data->ratios.vert_c);
- dscl_prog_data->init.v_filter_init_bot_frac_c = dc_fixpt_u0d19(bot) << 5;
- dscl_prog_data->init.v_filter_init_bot_int_c = dc_fixpt_floor(bot);
+ spl_fixpt_floor(scl_data->inits.v_c);
+
+ bot = spl_fixpt_add(scl_data->inits.v, scl_data->ratios.vert);
+ dscl_prog_data->init.v_filter_init_bot_frac = spl_fixpt_u0d19(bot) << 5;
+ dscl_prog_data->init.v_filter_init_bot_int = spl_fixpt_floor(bot);
+ bot = spl_fixpt_add(scl_data->inits.v_c, scl_data->ratios.vert_c);
+ dscl_prog_data->init.v_filter_init_bot_frac_c = spl_fixpt_u0d19(bot) << 5;
+ dscl_prog_data->init.v_filter_init_bot_int_c = spl_fixpt_floor(bot);
}
static void spl_set_taps_data(struct dscl_prog_data *dscl_prog_data,
@@ -890,79 +1115,28 @@ static void spl_set_taps_data(struct dscl_prog_data *dscl_prog_data,
dscl_prog_data->taps.v_taps_c = scl_data->taps.v_taps_c - 1;
dscl_prog_data->taps.h_taps_c = scl_data->taps.h_taps_c - 1;
}
-static const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct fixed31_32 ratio)
-{
- if (taps == 8)
- return spl_get_filter_8tap_64p(ratio);
- else if (taps == 7)
- return spl_get_filter_7tap_64p(ratio);
- else if (taps == 6)
- return spl_get_filter_6tap_64p(ratio);
- else if (taps == 5)
- return spl_get_filter_5tap_64p(ratio);
- else if (taps == 4)
- return spl_get_filter_4tap_64p(ratio);
- else if (taps == 3)
- return spl_get_filter_3tap_64p(ratio);
- else if (taps == 2)
- return spl_get_filter_2tap_64p();
- else if (taps == 1)
- return NULL;
- else {
- /* should never happen, bug */
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-}
-static void spl_set_filters_data(struct dscl_prog_data *dscl_prog_data,
- const struct spl_scaler_data *data)
-{
- dscl_prog_data->filter_h = spl_dscl_get_filter_coeffs_64p(
- data->taps.h_taps, data->ratios.horz);
- dscl_prog_data->filter_v = spl_dscl_get_filter_coeffs_64p(
- data->taps.v_taps, data->ratios.vert);
- dscl_prog_data->filter_h_c = spl_dscl_get_filter_coeffs_64p(
- data->taps.h_taps_c, data->ratios.horz_c);
- dscl_prog_data->filter_v_c = spl_dscl_get_filter_coeffs_64p(
- data->taps.v_taps_c, data->ratios.vert_c);
-}
-#ifdef CONFIG_DRM_AMD_DC_FP
-static const uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps)
-{
- if ((taps == 3) || (taps == 4) || (taps == 6))
- return spl_get_filter_isharp_bs_4tap_64p();
- else {
- /* should never happen, bug */
- BREAK_TO_DEBUGGER();
- return NULL;
- }
-}
-static void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data,
- const struct spl_scaler_data *data)
-{
- dscl_prog_data->filter_blur_scale_h = spl_dscl_get_blur_scale_coeffs_64p(
- data->taps.h_taps);
- dscl_prog_data->filter_blur_scale_v = spl_dscl_get_blur_scale_coeffs_64p(
- data->taps.v_taps);
-}
-#endif
+
/* Populate dscl prog data structure from scaler data calculated by SPL */
-static void spl_set_dscl_prog_data(struct spl_in *spl_in, struct spl_out *spl_out)
+static void spl_set_dscl_prog_data(struct spl_in *spl_in, struct spl_scratch *spl_scratch,
+ struct spl_out *spl_out, bool enable_easf_v, bool enable_easf_h, bool enable_isharp)
{
struct dscl_prog_data *dscl_prog_data = spl_out->dscl_prog_data;
- const struct spl_scaler_data *data = &spl_out->scl_data;
+ const struct spl_scaler_data *data = &spl_scratch->scl_data;
struct scl_black_color *scl_black_color = &dscl_prog_data->scl_black_color;
+ bool enable_easf = enable_easf_v || enable_easf_h;
+
// Set values for recout
- dscl_prog_data->recout = spl_out->scl_data.recout;
+ dscl_prog_data->recout = spl_scratch->scl_data.recout;
// Set values for MPC Size
- dscl_prog_data->mpc_size.width = spl_out->scl_data.h_active;
- dscl_prog_data->mpc_size.height = spl_out->scl_data.v_active;
+ dscl_prog_data->mpc_size.width = spl_scratch->scl_data.h_active;
+ dscl_prog_data->mpc_size.height = spl_scratch->scl_data.v_active;
// SCL_MODE - Set SCL_MODE data
- dscl_prog_data->dscl_mode = spl_get_dscl_mode(spl_in, data);
+ dscl_prog_data->dscl_mode = spl_get_dscl_mode(spl_in, data, enable_isharp,
+ enable_easf);
// SCL_BLACK_COLOR
spl_set_black_color_data(spl_in->basic_in.format, scl_black_color);
@@ -973,103 +1147,135 @@ static void spl_set_dscl_prog_data(struct spl_in *spl_in, struct spl_out *spl_ou
// Set HTaps/VTaps
spl_set_taps_data(dscl_prog_data, data);
// Set viewport
- dscl_prog_data->viewport = spl_out->scl_data.viewport;
+ dscl_prog_data->viewport = spl_scratch->scl_data.viewport;
// Set viewport_c
- dscl_prog_data->viewport_c = spl_out->scl_data.viewport_c;
+ dscl_prog_data->viewport_c = spl_scratch->scl_data.viewport_c;
// Set filters data
- spl_set_filters_data(dscl_prog_data, data);
+ spl_set_filters_data(dscl_prog_data, data, enable_easf_v, enable_easf_h);
}
-/* Enable EASF ?*/
-static bool enable_easf(int scale_ratio, int taps,
- enum linear_light_scaling lls_pref, bool prefer_easf)
+
+/* Calculate C0-C3 coefficients based on HDR_mult */
+static void spl_calculate_c0_c3_hdr(struct dscl_prog_data *dscl_prog_data, uint32_t hdr_multx100)
{
- // Is downscaling > 6:1 ?
- if (scale_ratio > 6) {
- // END - No EASF support for downscaling > 6:1
- return false;
- }
- // Is upscaling or downscaling up to 2:1?
- if (scale_ratio <= 2) {
- // Is linear scaling or EASF preferred?
- if (lls_pref == LLS_PREF_YES || prefer_easf) {
- // LB support taps 3, 4, 6
- if (taps == 3 || taps == 4 || taps == 6) {
- // END - EASF supported
- return true;
- }
- }
- }
- // END - EASF not supported
- return false;
+ struct spl_fixed31_32 hdr_mult, c0_mult, c1_mult, c2_mult;
+ struct spl_fixed31_32 c0_calc, c1_calc, c2_calc;
+ struct spl_custom_float_format fmt;
+
+ SPL_ASSERT(hdr_multx100);
+ hdr_mult = spl_fixpt_from_fraction((long long)hdr_multx100, 100LL);
+ c0_mult = spl_fixpt_from_fraction(2126LL, 10000LL);
+ c1_mult = spl_fixpt_from_fraction(7152LL, 10000LL);
+ c2_mult = spl_fixpt_from_fraction(722LL, 10000LL);
+
+ c0_calc = spl_fixpt_mul(hdr_mult, spl_fixpt_mul(c0_mult, spl_fixpt_from_fraction(
+ 16384LL, 125LL)));
+ c1_calc = spl_fixpt_mul(hdr_mult, spl_fixpt_mul(c1_mult, spl_fixpt_from_fraction(
+ 16384LL, 125LL)));
+ c2_calc = spl_fixpt_mul(hdr_mult, spl_fixpt_mul(c2_mult, spl_fixpt_from_fraction(
+ 16384LL, 125LL)));
+
+ fmt.exponenta_bits = 5;
+ fmt.mantissa_bits = 10;
+ fmt.sign = true;
+
+ // fp1.5.10, C0 coefficient (LN_rec709: HDR_MULT * 0.212600 * 2^14/125)
+ spl_convert_to_custom_float_format(c0_calc, &fmt, &dscl_prog_data->easf_matrix_c0);
+ // fp1.5.10, C1 coefficient (LN_rec709: HDR_MULT * 0.715200 * 2^14/125)
+ spl_convert_to_custom_float_format(c1_calc, &fmt, &dscl_prog_data->easf_matrix_c1);
+ // fp1.5.10, C2 coefficient (LN_rec709: HDR_MULT * 0.072200 * 2^14/125)
+ spl_convert_to_custom_float_format(c2_calc, &fmt, &dscl_prog_data->easf_matrix_c2);
+ dscl_prog_data->easf_matrix_c3 = 0x0; // fp1.5.10, C3 coefficient
}
+
/* Set EASF data */
-static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
- bool enable_easf_v, bool enable_easf_h, enum linear_light_scaling lls_pref,
- enum spl_pixel_format format)
+static void spl_set_easf_data(struct spl_scratch *spl_scratch, struct spl_out *spl_out, bool enable_easf_v,
+ bool enable_easf_h, enum linear_light_scaling lls_pref,
+ enum spl_pixel_format format, enum system_setup setup,
+ uint32_t hdr_multx100)
{
- if (spl_is_yuv420(format)) /* TODO: 0 = RGB, 1 = YUV */
- dscl_prog_data->easf_matrix_mode = 1;
- else
- dscl_prog_data->easf_matrix_mode = 0;
-
+ struct dscl_prog_data *dscl_prog_data = spl_out->dscl_prog_data;
if (enable_easf_v) {
dscl_prog_data->easf_v_en = true;
dscl_prog_data->easf_v_ring = 0;
- dscl_prog_data->easf_v_sharp_factor = 1;
+ dscl_prog_data->easf_v_sharp_factor = 0;
dscl_prog_data->easf_v_bf1_en = 1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_v_bf2_mode = 0xF; // 4-bit, BF2 calculation mode
- dscl_prog_data->easf_v_bf3_mode = 2; // 2-bit, BF3 chroma mode correction calculation mode
- dscl_prog_data->easf_v_bf2_flat1_gain = 4; // U1.3, BF2 Flat1 Gain control
- dscl_prog_data->easf_v_bf2_flat2_gain = 8; // U4.0, BF2 Flat2 Gain control
- dscl_prog_data->easf_v_bf2_roc_gain = 4; // U2.2, Rate Of Change control
+ /* 2-bit, BF3 chroma mode correction calculation mode */
+ dscl_prog_data->easf_v_bf3_mode = spl_get_v_bf3_mode(
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ minCoef ]*/
dscl_prog_data->easf_v_ringest_3tap_dntilt_uptilt =
- 0x9F00;// FP1.5.10 [minCoef] (-0.036109167214271)
+ spl_get_3tap_dntilt_uptilt_offset(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ upTiltMaxVal ]*/
dscl_prog_data->easf_v_ringest_3tap_uptilt_max =
- 0x24FE; // FP1.5.10 [upTiltMaxVal] ( 0.904556445553545)
+ spl_get_3tap_uptilt_maxval(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ dnTiltSlope ]*/
dscl_prog_data->easf_v_ringest_3tap_dntilt_slope =
- 0x3940; // FP1.5.10 [dnTiltSlope] ( 0.910488988173371)
+ spl_get_3tap_dntilt_slope(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ upTilt1Slope ]*/
dscl_prog_data->easf_v_ringest_3tap_uptilt1_slope =
- 0x359C; // FP1.5.10 [upTilt1Slope] ( 0.125620179040899)
+ spl_get_3tap_uptilt1_slope(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ upTilt2Slope ]*/
dscl_prog_data->easf_v_ringest_3tap_uptilt2_slope =
- 0x359C; // FP1.5.10 [upTilt2Slope] ( 0.006786817723568)
+ spl_get_3tap_uptilt2_slope(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10 [ upTilt2Offset ]*/
dscl_prog_data->easf_v_ringest_3tap_uptilt2_offset =
- 0x9F00; // FP1.5.10 [upTilt2Offset] (-0.006139059716651)
+ spl_get_3tap_uptilt2_offset(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10; (2.0) Ring reducer gain for 4 or 6-tap mode [H_REDUCER_GAIN4] */
dscl_prog_data->easf_v_ringest_eventap_reduceg1 =
- 0x4000; // FP1.5.10; (2.0) Ring reducer gain for 4 or 6-tap mode [H_REDUCER_GAIN4]
+ spl_get_reducer_gain4(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10; (2.5) Ring reducer gain for 6-tap mode [V_REDUCER_GAIN6] */
dscl_prog_data->easf_v_ringest_eventap_reduceg2 =
- 0x4100; // FP1.5.10; (2.5) Ring reducer gain for 6-tap mode [V_REDUCER_GAIN6]
+ spl_get_reducer_gain6(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10; (-0.135742) Ring gain for 6-tap set to -139/1024 */
dscl_prog_data->easf_v_ringest_eventap_gain1 =
- 0xB058; // FP1.5.10; (-0.135742) Ring gain for 6-tap set to -139/1024
+ spl_get_gainRing4(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
+ /* FP1.5.10; (-0.024414) Ring gain for 6-tap set to -25/1024 */
dscl_prog_data->easf_v_ringest_eventap_gain2 =
- 0xA640; // FP1.5.10; (-0.024414) Ring gain for 6-tap set to -25/1024
+ spl_get_gainRing6(spl_scratch->scl_data.taps.v_taps,
+ spl_scratch->scl_data.recip_ratios.vert);
dscl_prog_data->easf_v_bf_maxa = 63; //Vertical Max BF value A in U0.6 format.Selected if V_FCNTL == 0
dscl_prog_data->easf_v_bf_maxb = 63; //Vertical Max BF value A in U0.6 format.Selected if V_FCNTL == 1
dscl_prog_data->easf_v_bf_mina = 0; //Vertical Min BF value A in U0.6 format.Selected if V_FCNTL == 0
dscl_prog_data->easf_v_bf_minb = 0; //Vertical Min BF value A in U0.6 format.Selected if V_FCNTL == 1
- dscl_prog_data->easf_v_bf1_pwl_in_seg0 = -512; // S0.10, BF1 PWL Segment 0
- dscl_prog_data->easf_v_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
- dscl_prog_data->easf_v_bf1_pwl_slope_seg0 = 3; // S7.3, BF1 Slope PWL Segment 0
- dscl_prog_data->easf_v_bf1_pwl_in_seg1 = -20; // S0.10, BF1 PWL Segment 1
- dscl_prog_data->easf_v_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
- dscl_prog_data->easf_v_bf1_pwl_slope_seg1 = 326; // S7.3, BF1 Slope PWL Segment 1
- dscl_prog_data->easf_v_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
- dscl_prog_data->easf_v_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
- dscl_prog_data->easf_v_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
- dscl_prog_data->easf_v_bf1_pwl_in_seg3 = 16; // S0.10, BF1 PWL Segment 3
- dscl_prog_data->easf_v_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
- dscl_prog_data->easf_v_bf1_pwl_slope_seg3 = -56; // S7.3, BF1 Slope PWL Segment 3
- dscl_prog_data->easf_v_bf1_pwl_in_seg4 = 32; // S0.10, BF1 PWL Segment 4
- dscl_prog_data->easf_v_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
- dscl_prog_data->easf_v_bf1_pwl_slope_seg4 = -48; // S7.3, BF1 Slope PWL Segment 4
- dscl_prog_data->easf_v_bf1_pwl_in_seg5 = 48; // S0.10, BF1 PWL Segment 5
- dscl_prog_data->easf_v_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
- dscl_prog_data->easf_v_bf1_pwl_slope_seg5 = -240; // S7.3, BF1 Slope PWL Segment 5
- dscl_prog_data->easf_v_bf1_pwl_in_seg6 = 64; // S0.10, BF1 PWL Segment 6
- dscl_prog_data->easf_v_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
- dscl_prog_data->easf_v_bf1_pwl_slope_seg6 = -160; // S7.3, BF1 Slope PWL Segment 6
- dscl_prog_data->easf_v_bf1_pwl_in_seg7 = 80; // S0.10, BF1 PWL Segment 7
- dscl_prog_data->easf_v_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
if (lls_pref == LLS_PREF_YES) {
+ dscl_prog_data->easf_v_bf2_flat1_gain = 4; // U1.3, BF2 Flat1 Gain control
+ dscl_prog_data->easf_v_bf2_flat2_gain = 8; // U4.0, BF2 Flat2 Gain control
+ dscl_prog_data->easf_v_bf2_roc_gain = 4; // U2.2, Rate Of Change control
+
+ dscl_prog_data->easf_v_bf1_pwl_in_seg0 = 0x600; // S0.10, BF1 PWL Segment 0 = -512
+ dscl_prog_data->easf_v_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg0 = 3; // S7.3, BF1 Slope PWL Segment 0
+ dscl_prog_data->easf_v_bf1_pwl_in_seg1 = 0x7EC; // S0.10, BF1 PWL Segment 1 = -20
+ dscl_prog_data->easf_v_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg1 = 326; // S7.3, BF1 Slope PWL Segment 1
+ dscl_prog_data->easf_v_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_in_seg3 = 16; // S0.10, BF1 PWL Segment 3
+ dscl_prog_data->easf_v_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg3 = 0x7C8; // S7.3, BF1 Slope PWL Segment 3 = -56
+ dscl_prog_data->easf_v_bf1_pwl_in_seg4 = 32; // S0.10, BF1 PWL Segment 4
+ dscl_prog_data->easf_v_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg4 = 0x7D0; // S7.3, BF1 Slope PWL Segment 4 = -48
+ dscl_prog_data->easf_v_bf1_pwl_in_seg5 = 48; // S0.10, BF1 PWL Segment 5
+ dscl_prog_data->easf_v_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg5 = 0x710; // S7.3, BF1 Slope PWL Segment 5 = -240
+ dscl_prog_data->easf_v_bf1_pwl_in_seg6 = 64; // S0.10, BF1 PWL Segment 6
+ dscl_prog_data->easf_v_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg6 = 0x760; // S7.3, BF1 Slope PWL Segment 6 = -160
+ dscl_prog_data->easf_v_bf1_pwl_in_seg7 = 80; // S0.10, BF1 PWL Segment 7
+ dscl_prog_data->easf_v_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
+
dscl_prog_data->easf_v_bf3_pwl_in_set0 = 0x000; // FP0.6.6, BF3 Input value PWL Segment 0
dscl_prog_data->easf_v_bf3_pwl_base_set0 = 63; // S0.6, BF3 Base PWL Segment 0
dscl_prog_data->easf_v_bf3_pwl_slope_set0 = 0x12C5; // FP1.6.6, BF3 Slope PWL Segment 0
@@ -1090,13 +1296,41 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
0x136B; // FP1.6.6, BF3 Slope PWL Segment 3
dscl_prog_data->easf_v_bf3_pwl_in_set4 =
0x0C37; // FP0.6.6, BF3 Input value PWL Segment 4 (0.125 * 125^3)
- dscl_prog_data->easf_v_bf3_pwl_base_set4 = -50; // S0.6, BF3 Base PWL Segment 4
+ dscl_prog_data->easf_v_bf3_pwl_base_set4 = 0x4E; // S0.6, BF3 Base PWL Segment 4 = -50
dscl_prog_data->easf_v_bf3_pwl_slope_set4 =
0x1200; // FP1.6.6, BF3 Slope PWL Segment 4
dscl_prog_data->easf_v_bf3_pwl_in_set5 =
0x0CF7; // FP0.6.6, BF3 Input value PWL Segment 5 (1.0 * 125^3)
- dscl_prog_data->easf_v_bf3_pwl_base_set5 = -63; // S0.6, BF3 Base PWL Segment 5
+ dscl_prog_data->easf_v_bf3_pwl_base_set5 = 0x41; // S0.6, BF3 Base PWL Segment 5 = -63
} else {
+ dscl_prog_data->easf_v_bf2_flat1_gain = 13; // U1.3, BF2 Flat1 Gain control
+ dscl_prog_data->easf_v_bf2_flat2_gain = 15; // U4.0, BF2 Flat2 Gain control
+ dscl_prog_data->easf_v_bf2_roc_gain = 14; // U2.2, Rate Of Change control
+
+ dscl_prog_data->easf_v_bf1_pwl_in_seg0 = 0x440; // S0.10, BF1 PWL Segment 0 = -960
+ dscl_prog_data->easf_v_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg0 = 2; // S7.3, BF1 Slope PWL Segment 0
+ dscl_prog_data->easf_v_bf1_pwl_in_seg1 = 0x7C4; // S0.10, BF1 PWL Segment 1 = -60
+ dscl_prog_data->easf_v_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg1 = 109; // S7.3, BF1 Slope PWL Segment 1
+ dscl_prog_data->easf_v_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
+ dscl_prog_data->easf_v_bf1_pwl_in_seg3 = 48; // S0.10, BF1 PWL Segment 3
+ dscl_prog_data->easf_v_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg3 = 0x7ED; // S7.3, BF1 Slope PWL Segment 3 = -19
+ dscl_prog_data->easf_v_bf1_pwl_in_seg4 = 96; // S0.10, BF1 PWL Segment 4
+ dscl_prog_data->easf_v_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg4 = 0x7F0; // S7.3, BF1 Slope PWL Segment 4 = -16
+ dscl_prog_data->easf_v_bf1_pwl_in_seg5 = 144; // S0.10, BF1 PWL Segment 5
+ dscl_prog_data->easf_v_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg5 = 0x7B0; // S7.3, BF1 Slope PWL Segment 5 = -80
+ dscl_prog_data->easf_v_bf1_pwl_in_seg6 = 192; // S0.10, BF1 PWL Segment 6
+ dscl_prog_data->easf_v_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
+ dscl_prog_data->easf_v_bf1_pwl_slope_seg6 = 0x7CB; // S7.3, BF1 Slope PWL Segment 6 = -53
+ dscl_prog_data->easf_v_bf1_pwl_in_seg7 = 240; // S0.10, BF1 PWL Segment 7
+ dscl_prog_data->easf_v_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
+
dscl_prog_data->easf_v_bf3_pwl_in_set0 = 0x000; // FP0.6.6, BF3 Input value PWL Segment 0
dscl_prog_data->easf_v_bf3_pwl_base_set0 = 63; // S0.6, BF3 Base PWL Segment 0
dscl_prog_data->easf_v_bf3_pwl_slope_set0 = 0x0000; // FP1.6.6, BF3 Slope PWL Segment 0
@@ -1115,11 +1349,11 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
0x1878; // FP1.6.6, BF3 Slope PWL Segment 3
dscl_prog_data->easf_v_bf3_pwl_in_set4 =
0x0761; // FP0.6.6, BF3 Input value PWL Segment 4 (0.375)
- dscl_prog_data->easf_v_bf3_pwl_base_set4 = -60; // S0.6, BF3 Base PWL Segment 4
+ dscl_prog_data->easf_v_bf3_pwl_base_set4 = 0x44; // S0.6, BF3 Base PWL Segment 4 = -60
dscl_prog_data->easf_v_bf3_pwl_slope_set4 = 0x1760; // FP1.6.6, BF3 Slope PWL Segment 4
dscl_prog_data->easf_v_bf3_pwl_in_set5 =
0x0780; // FP0.6.6, BF3 Input value PWL Segment 5 (0.5)
- dscl_prog_data->easf_v_bf3_pwl_base_set5 = -63; // S0.6, BF3 Base PWL Segment 5
+ dscl_prog_data->easf_v_bf3_pwl_base_set5 = 0x41; // S0.6, BF3 Base PWL Segment 5 = -63
}
} else
dscl_prog_data->easf_v_en = false;
@@ -1127,52 +1361,63 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
if (enable_easf_h) {
dscl_prog_data->easf_h_en = true;
dscl_prog_data->easf_h_ring = 0;
- dscl_prog_data->easf_h_sharp_factor = 1;
+ dscl_prog_data->easf_h_sharp_factor = 0;
dscl_prog_data->easf_h_bf1_en =
1; // 1-bit, BF1 calculation enable, 0=disable, 1=enable
dscl_prog_data->easf_h_bf2_mode =
0xF; // 4-bit, BF2 calculation mode
- dscl_prog_data->easf_h_bf3_mode =
- 2; // 2-bit, BF3 chroma mode correction calculation mode
- dscl_prog_data->easf_h_bf2_flat1_gain = 4; // U1.3, BF2 Flat1 Gain control
- dscl_prog_data->easf_h_bf2_flat2_gain = 8; // U4.0, BF2 Flat2 Gain control
- dscl_prog_data->easf_h_bf2_roc_gain = 4; // U2.2, Rate Of Change control
+ /* 2-bit, BF3 chroma mode correction calculation mode */
+ dscl_prog_data->easf_h_bf3_mode = spl_get_h_bf3_mode(
+ spl_scratch->scl_data.recip_ratios.horz);
+ /* FP1.5.10; (2.0) Ring reducer gain for 4 or 6-tap mode [H_REDUCER_GAIN4] */
dscl_prog_data->easf_h_ringest_eventap_reduceg1 =
- 0x4000; // FP1.5.10; (2.0) Ring reducer gain for 4 or 6-tap mode [H_REDUCER_GAIN4]
+ spl_get_reducer_gain4(spl_scratch->scl_data.taps.h_taps,
+ spl_scratch->scl_data.recip_ratios.horz);
+ /* FP1.5.10; (2.5) Ring reducer gain for 6-tap mode [V_REDUCER_GAIN6] */
dscl_prog_data->easf_h_ringest_eventap_reduceg2 =
- 0x4100; // FP1.5.10; (2.5) Ring reducer gain for 6-tap mode [V_REDUCER_GAIN6]
+ spl_get_reducer_gain6(spl_scratch->scl_data.taps.h_taps,
+ spl_scratch->scl_data.recip_ratios.horz);
+ /* FP1.5.10; (-0.135742) Ring gain for 6-tap set to -139/1024 */
dscl_prog_data->easf_h_ringest_eventap_gain1 =
- 0xB058; // FP1.5.10; (-0.135742) Ring gain for 6-tap set to -139/1024
+ spl_get_gainRing4(spl_scratch->scl_data.taps.h_taps,
+ spl_scratch->scl_data.recip_ratios.horz);
+ /* FP1.5.10; (-0.024414) Ring gain for 6-tap set to -25/1024 */
dscl_prog_data->easf_h_ringest_eventap_gain2 =
- 0xA640; // FP1.5.10; (-0.024414) Ring gain for 6-tap set to -25/1024
+ spl_get_gainRing6(spl_scratch->scl_data.taps.h_taps,
+ spl_scratch->scl_data.recip_ratios.horz);
dscl_prog_data->easf_h_bf_maxa = 63; //Horz Max BF value A in U0.6 format.Selected if H_FCNTL==0
dscl_prog_data->easf_h_bf_maxb = 63; //Horz Max BF value B in U0.6 format.Selected if H_FCNTL==1
dscl_prog_data->easf_h_bf_mina = 0; //Horz Min BF value B in U0.6 format.Selected if H_FCNTL==0
dscl_prog_data->easf_h_bf_minb = 0; //Horz Min BF value B in U0.6 format.Selected if H_FCNTL==1
- dscl_prog_data->easf_h_bf1_pwl_in_seg0 = -512; // S0.10, BF1 PWL Segment 0
- dscl_prog_data->easf_h_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
- dscl_prog_data->easf_h_bf1_pwl_slope_seg0 = 3; // S7.3, BF1 Slope PWL Segment 0
- dscl_prog_data->easf_h_bf1_pwl_in_seg1 = -20; // S0.10, BF1 PWL Segment 1
- dscl_prog_data->easf_h_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
- dscl_prog_data->easf_h_bf1_pwl_slope_seg1 = 326; // S7.3, BF1 Slope PWL Segment 1
- dscl_prog_data->easf_h_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
- dscl_prog_data->easf_h_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
- dscl_prog_data->easf_h_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
- dscl_prog_data->easf_h_bf1_pwl_in_seg3 = 16; // S0.10, BF1 PWL Segment 3
- dscl_prog_data->easf_h_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
- dscl_prog_data->easf_h_bf1_pwl_slope_seg3 = -56; // S7.3, BF1 Slope PWL Segment 3
- dscl_prog_data->easf_h_bf1_pwl_in_seg4 = 32; // S0.10, BF1 PWL Segment 4
- dscl_prog_data->easf_h_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
- dscl_prog_data->easf_h_bf1_pwl_slope_seg4 = -48; // S7.3, BF1 Slope PWL Segment 4
- dscl_prog_data->easf_h_bf1_pwl_in_seg5 = 48; // S0.10, BF1 PWL Segment 5
- dscl_prog_data->easf_h_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
- dscl_prog_data->easf_h_bf1_pwl_slope_seg5 = -240; // S7.3, BF1 Slope PWL Segment 5
- dscl_prog_data->easf_h_bf1_pwl_in_seg6 = 64; // S0.10, BF1 PWL Segment 6
- dscl_prog_data->easf_h_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
- dscl_prog_data->easf_h_bf1_pwl_slope_seg6 = -160; // S7.3, BF1 Slope PWL Segment 6
- dscl_prog_data->easf_h_bf1_pwl_in_seg7 = 80; // S0.10, BF1 PWL Segment 7
- dscl_prog_data->easf_h_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
if (lls_pref == LLS_PREF_YES) {
+ dscl_prog_data->easf_h_bf2_flat1_gain = 4; // U1.3, BF2 Flat1 Gain control
+ dscl_prog_data->easf_h_bf2_flat2_gain = 8; // U4.0, BF2 Flat2 Gain control
+ dscl_prog_data->easf_h_bf2_roc_gain = 4; // U2.2, Rate Of Change control
+
+ dscl_prog_data->easf_h_bf1_pwl_in_seg0 = 0x600; // S0.10, BF1 PWL Segment 0 = -512
+ dscl_prog_data->easf_h_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg0 = 3; // S7.3, BF1 Slope PWL Segment 0
+ dscl_prog_data->easf_h_bf1_pwl_in_seg1 = 0x7EC; // S0.10, BF1 PWL Segment 1 = -20
+ dscl_prog_data->easf_h_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg1 = 326; // S7.3, BF1 Slope PWL Segment 1
+ dscl_prog_data->easf_h_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_in_seg3 = 16; // S0.10, BF1 PWL Segment 3
+ dscl_prog_data->easf_h_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg3 = 0x7C8; // S7.3, BF1 Slope PWL Segment 3 = -56
+ dscl_prog_data->easf_h_bf1_pwl_in_seg4 = 32; // S0.10, BF1 PWL Segment 4
+ dscl_prog_data->easf_h_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg4 = 0x7D0; // S7.3, BF1 Slope PWL Segment 4 = -48
+ dscl_prog_data->easf_h_bf1_pwl_in_seg5 = 48; // S0.10, BF1 PWL Segment 5
+ dscl_prog_data->easf_h_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg5 = 0x710; // S7.3, BF1 Slope PWL Segment 5 = -240
+ dscl_prog_data->easf_h_bf1_pwl_in_seg6 = 64; // S0.10, BF1 PWL Segment 6
+ dscl_prog_data->easf_h_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg6 = 0x760; // S7.3, BF1 Slope PWL Segment 6 = -160
+ dscl_prog_data->easf_h_bf1_pwl_in_seg7 = 80; // S0.10, BF1 PWL Segment 7
+ dscl_prog_data->easf_h_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
+
dscl_prog_data->easf_h_bf3_pwl_in_set0 = 0x000; // FP0.6.6, BF3 Input value PWL Segment 0
dscl_prog_data->easf_h_bf3_pwl_base_set0 = 63; // S0.6, BF3 Base PWL Segment 0
dscl_prog_data->easf_h_bf3_pwl_slope_set0 = 0x12C5; // FP1.6.6, BF3 Slope PWL Segment 0
@@ -1190,12 +1435,40 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
dscl_prog_data->easf_h_bf3_pwl_slope_set3 = 0x136B; // FP1.6.6, BF3 Slope PWL Segment 3
dscl_prog_data->easf_h_bf3_pwl_in_set4 =
0x0C37; // FP0.6.6, BF3 Input value PWL Segment 4 (0.125 * 125^3)
- dscl_prog_data->easf_h_bf3_pwl_base_set4 = -50; // S0.6, BF3 Base PWL Segment 4
+ dscl_prog_data->easf_h_bf3_pwl_base_set4 = 0x4E; // S0.6, BF3 Base PWL Segment 4 = -50
dscl_prog_data->easf_h_bf3_pwl_slope_set4 = 0x1200; // FP1.6.6, BF3 Slope PWL Segment 4
dscl_prog_data->easf_h_bf3_pwl_in_set5 =
0x0CF7; // FP0.6.6, BF3 Input value PWL Segment 5 (1.0 * 125^3)
- dscl_prog_data->easf_h_bf3_pwl_base_set5 = -63; // S0.6, BF3 Base PWL Segment 5
+ dscl_prog_data->easf_h_bf3_pwl_base_set5 = 0x41; // S0.6, BF3 Base PWL Segment 5 = -63
} else {
+ dscl_prog_data->easf_h_bf2_flat1_gain = 13; // U1.3, BF2 Flat1 Gain control
+ dscl_prog_data->easf_h_bf2_flat2_gain = 15; // U4.0, BF2 Flat2 Gain control
+ dscl_prog_data->easf_h_bf2_roc_gain = 14; // U2.2, Rate Of Change control
+
+ dscl_prog_data->easf_h_bf1_pwl_in_seg0 = 0x440; // S0.10, BF1 PWL Segment 0 = -960
+ dscl_prog_data->easf_h_bf1_pwl_base_seg0 = 0; // U0.6, BF1 Base PWL Segment 0
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg0 = 2; // S7.3, BF1 Slope PWL Segment 0
+ dscl_prog_data->easf_h_bf1_pwl_in_seg1 = 0x7C4; // S0.10, BF1 PWL Segment 1 = -60
+ dscl_prog_data->easf_h_bf1_pwl_base_seg1 = 12; // U0.6, BF1 Base PWL Segment 1
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg1 = 109; // S7.3, BF1 Slope PWL Segment 1
+ dscl_prog_data->easf_h_bf1_pwl_in_seg2 = 0; // S0.10, BF1 PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_base_seg2 = 63; // U0.6, BF1 Base PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg2 = 0; // S7.3, BF1 Slope PWL Segment 2
+ dscl_prog_data->easf_h_bf1_pwl_in_seg3 = 48; // S0.10, BF1 PWL Segment 3
+ dscl_prog_data->easf_h_bf1_pwl_base_seg3 = 63; // U0.6, BF1 Base PWL Segment 3
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg3 = 0x7ED; // S7.3, BF1 Slope PWL Segment 3 = -19
+ dscl_prog_data->easf_h_bf1_pwl_in_seg4 = 96; // S0.10, BF1 PWL Segment 4
+ dscl_prog_data->easf_h_bf1_pwl_base_seg4 = 56; // U0.6, BF1 Base PWL Segment 4
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg4 = 0x7F0; // S7.3, BF1 Slope PWL Segment 4 = -16
+ dscl_prog_data->easf_h_bf1_pwl_in_seg5 = 144; // S0.10, BF1 PWL Segment 5
+ dscl_prog_data->easf_h_bf1_pwl_base_seg5 = 50; // U0.6, BF1 Base PWL Segment 5
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg5 = 0x7B0; // S7.3, BF1 Slope PWL Segment 5 = -80
+ dscl_prog_data->easf_h_bf1_pwl_in_seg6 = 192; // S0.10, BF1 PWL Segment 6
+ dscl_prog_data->easf_h_bf1_pwl_base_seg6 = 20; // U0.6, BF1 Base PWL Segment 6
+ dscl_prog_data->easf_h_bf1_pwl_slope_seg6 = 0x7CB; // S7.3, BF1 Slope PWL Segment 6 = -53
+ dscl_prog_data->easf_h_bf1_pwl_in_seg7 = 240; // S0.10, BF1 PWL Segment 7
+ dscl_prog_data->easf_h_bf1_pwl_base_seg7 = 0; // U0.6, BF1 Base PWL Segment 7
+
dscl_prog_data->easf_h_bf3_pwl_in_set0 = 0x000; // FP0.6.6, BF3 Input value PWL Segment 0
dscl_prog_data->easf_h_bf3_pwl_base_set0 = 63; // S0.6, BF3 Base PWL Segment 0
dscl_prog_data->easf_h_bf3_pwl_slope_set0 = 0x0000; // FP1.6.6, BF3 Slope PWL Segment 0
@@ -1213,25 +1486,30 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
dscl_prog_data->easf_h_bf3_pwl_slope_set3 = 0x1878; // FP1.6.6, BF3 Slope PWL Segment 3
dscl_prog_data->easf_h_bf3_pwl_in_set4 =
0x0761; // FP0.6.6, BF3 Input value PWL Segment 4 (0.375)
- dscl_prog_data->easf_h_bf3_pwl_base_set4 = -60; // S0.6, BF3 Base PWL Segment 4
+ dscl_prog_data->easf_h_bf3_pwl_base_set4 = 0x44; // S0.6, BF3 Base PWL Segment 4 = -60
dscl_prog_data->easf_h_bf3_pwl_slope_set4 = 0x1760; // FP1.6.6, BF3 Slope PWL Segment 4
dscl_prog_data->easf_h_bf3_pwl_in_set5 =
0x0780; // FP0.6.6, BF3 Input value PWL Segment 5 (0.5)
- dscl_prog_data->easf_h_bf3_pwl_base_set5 = -63; // S0.6, BF3 Base PWL Segment 5
+ dscl_prog_data->easf_h_bf3_pwl_base_set5 = 0x41; // S0.6, BF3 Base PWL Segment 5 = -63
} // if (lls_pref == LLS_PREF_YES)
} else
dscl_prog_data->easf_h_en = false;
if (lls_pref == LLS_PREF_YES) {
dscl_prog_data->easf_ltonl_en = 1; // Linear input
- dscl_prog_data->easf_matrix_c0 =
- 0x504E; // fp1.5.10, C0 coefficient (LN_BT2020: 0.2627 * (2^14)/125 = 34.43750000)
- dscl_prog_data->easf_matrix_c1 =
- 0x558E; // fp1.5.10, C1 coefficient (LN_BT2020: 0.6780 * (2^14)/125 = 88.87500000)
- dscl_prog_data->easf_matrix_c2 =
- 0x47C6; // fp1.5.10, C2 coefficient (LN_BT2020: 0.0593 * (2^14)/125 = 7.77343750)
- dscl_prog_data->easf_matrix_c3 =
- 0x0; // fp1.5.10, C3 coefficient
+ if ((setup == HDR_L) && (spl_is_rgb8(format))) {
+ /* Calculate C0-C3 coefficients based on HDR multiplier */
+ spl_calculate_c0_c3_hdr(dscl_prog_data, hdr_multx100);
+ } else { // HDR_L ( DWM ) and SDR_L
+ dscl_prog_data->easf_matrix_c0 =
+ 0x4EF7; // fp1.5.10, C0 coefficient (LN_rec709: 0.2126 * (2^14)/125 = 27.86590720)
+ dscl_prog_data->easf_matrix_c1 =
+ 0x55DC; // fp1.5.10, C1 coefficient (LN_rec709: 0.7152 * (2^14)/125 = 93.74269440)
+ dscl_prog_data->easf_matrix_c2 =
+ 0x48BB; // fp1.5.10, C2 coefficient (LN_rec709: 0.0722 * (2^14)/125 = 9.46339840)
+ dscl_prog_data->easf_matrix_c3 =
+ 0x0; // fp1.5.10, C3 coefficient
+ }
} else {
dscl_prog_data->easf_ltonl_en = 0; // Non-Linear input
dscl_prog_data->easf_matrix_c0 =
@@ -1243,27 +1521,43 @@ static void spl_set_easf_data(struct dscl_prog_data *dscl_prog_data,
dscl_prog_data->easf_matrix_c3 =
0x0; // fp1.5.10, C3 coefficient
}
+
+ if (spl_is_yuv420(format)) { /* TODO: 0 = RGB, 1 = YUV */
+ dscl_prog_data->easf_matrix_mode = 1;
+ /*
+ * 2-bit, BF3 chroma mode correction calculation mode
+ * Needs to be disabled for YUV420 mode
+ * Override lookup value
+ */
+ dscl_prog_data->easf_v_bf3_mode = 0;
+ dscl_prog_data->easf_h_bf3_mode = 0;
+ } else
+ dscl_prog_data->easf_matrix_mode = 0;
+
}
+
/*Set isharp noise detection */
-static void spl_set_isharp_noise_det_mode(struct dscl_prog_data *dscl_prog_data)
+static void spl_set_isharp_noise_det_mode(struct dscl_prog_data *dscl_prog_data,
+ const struct spl_scaler_data *data)
{
// ISHARP_NOISEDET_MODE
// 0: 3x5 as VxH
// 1: 4x5 as VxH
// 2:
// 3: 5x5 as VxH
- if (dscl_prog_data->taps.v_taps == 6)
- dscl_prog_data->isharp_noise_det.mode = 3; // ISHARP_NOISEDET_MODE
- else if (dscl_prog_data->taps.h_taps == 4)
- dscl_prog_data->isharp_noise_det.mode = 1; // ISHARP_NOISEDET_MODE
- else if (dscl_prog_data->taps.h_taps == 3)
- dscl_prog_data->isharp_noise_det.mode = 0; // ISHARP_NOISEDET_MODE
+ if (data->taps.v_taps == 6)
+ dscl_prog_data->isharp_noise_det.mode = 3;
+ else if (data->taps.v_taps == 4)
+ dscl_prog_data->isharp_noise_det.mode = 1;
+ else if (data->taps.v_taps == 3)
+ dscl_prog_data->isharp_noise_det.mode = 0;
};
/* Set Sharpener data */
static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data,
struct adaptive_sharpness adp_sharpness, bool enable_isharp,
enum linear_light_scaling lls_pref, enum spl_pixel_format format,
- const struct spl_scaler_data *data)
+ const struct spl_scaler_data *data, struct spl_fixed31_32 ratio,
+ enum system_setup setup)
{
/* Turn off sharpener if not required */
if (!enable_isharp) {
@@ -1272,10 +1566,12 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data,
}
dscl_prog_data->isharp_en = 1; // ISHARP_EN
- dscl_prog_data->isharp_noise_det.enable = 1; // ISHARP_NOISEDET_EN
// Set ISHARP_NOISEDET_MODE if htaps = 6-tap
- if (dscl_prog_data->taps.h_taps == 6)
- spl_set_isharp_noise_det_mode(dscl_prog_data); // ISHARP_NOISEDET_MODE
+ if (data->taps.h_taps == 6) {
+ dscl_prog_data->isharp_noise_det.enable = 1; /* ISHARP_NOISEDET_EN */
+ spl_set_isharp_noise_det_mode(dscl_prog_data, data); /* ISHARP_NOISEDET_MODE */
+ } else
+ dscl_prog_data->isharp_noise_det.enable = 0; // ISHARP_NOISEDET_EN
// Program noise detection threshold
dscl_prog_data->isharp_noise_det.uthreshold = 24; // ISHARP_NOISEDET_UTHRE
dscl_prog_data->isharp_noise_det.dthreshold = 4; // ISHARP_NOISEDET_DTHRE
@@ -1284,50 +1580,93 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data,
dscl_prog_data->isharp_noise_det.pwl_end_in = 13; // ISHARP_NOISEDET_PWL_END_IN
dscl_prog_data->isharp_noise_det.pwl_slope = 1623; // ISHARP_NOISEDET_PWL_SLOPE
- if ((lls_pref == LLS_PREF_NO) && !spl_is_yuv420(format)) /* ISHARP_FMT_MODE */
+ if (lls_pref == LLS_PREF_NO) /* ISHARP_FMT_MODE */
dscl_prog_data->isharp_fmt.mode = 1;
else
dscl_prog_data->isharp_fmt.mode = 0;
dscl_prog_data->isharp_fmt.norm = 0x3C00; // ISHARP_FMT_NORM
dscl_prog_data->isharp_lba.mode = 0; // ISHARP_LBA_MODE
- // ISHARP_LBA_PWL_SEG0: ISHARP Local Brightness Adjustment PWL Segment 0
- dscl_prog_data->isharp_lba.in_seg[0] = 0; // ISHARP LBA PWL for Seg 0. INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[0] = 0; // ISHARP LBA PWL for Seg 0. BASE value in U0.6 format
- dscl_prog_data->isharp_lba.slope_seg[0] = 32; // ISHARP LBA for Seg 0. SLOPE value in S5.3 format
- // ISHARP_LBA_PWL_SEG1: ISHARP LBA PWL Segment 1
- dscl_prog_data->isharp_lba.in_seg[1] = 256; // ISHARP LBA PWL for Seg 1. INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[1] = 63; // ISHARP LBA PWL for Seg 1. BASE value in U0.6 format
- dscl_prog_data->isharp_lba.slope_seg[1] = 0; // ISHARP LBA for Seg 1. SLOPE value in S5.3 format
- // ISHARP_LBA_PWL_SEG2: ISHARP LBA PWL Segment 2
- dscl_prog_data->isharp_lba.in_seg[2] = 614; // ISHARP LBA PWL for Seg 2. INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[2] = 63; // ISHARP LBA PWL for Seg 2. BASE value in U0.6 format
- dscl_prog_data->isharp_lba.slope_seg[2] = -20; // ISHARP LBA for Seg 2. SLOPE value in S5.3 format
- // ISHARP_LBA_PWL_SEG3: ISHARP LBA PWL Segment 3
- dscl_prog_data->isharp_lba.in_seg[3] = 1023; // ISHARP LBA PWL for Seg 3.INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[3] = 0; // ISHARP LBA PWL for Seg 3. BASE value in U0.6 format
- dscl_prog_data->isharp_lba.slope_seg[3] = 0; // ISHARP LBA for Seg 3. SLOPE value in S5.3 format
- // ISHARP_LBA_PWL_SEG4: ISHARP LBA PWL Segment 4
- dscl_prog_data->isharp_lba.in_seg[4] = 1023; // ISHARP LBA PWL for Seg 4.INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[4] = 0; // ISHARP LBA PWL for Seg 4. BASE value in U0.6 format
- dscl_prog_data->isharp_lba.slope_seg[4] = 0; // ISHARP LBA for Seg 4. SLOPE value in S5.3 format
- // ISHARP_LBA_PWL_SEG5: ISHARP LBA PWL Segment 5
- dscl_prog_data->isharp_lba.in_seg[5] = 1023; // ISHARP LBA PWL for Seg 5.INPUT value in U0.10 format
- dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format
- switch (adp_sharpness.sharpness) {
- case SHARPNESS_LOW:
- dscl_prog_data->isharp_delta = spl_get_filter_isharp_1D_lut_0p5x();
- break;
- case SHARPNESS_MID:
- dscl_prog_data->isharp_delta = spl_get_filter_isharp_1D_lut_1p0x();
- break;
- case SHARPNESS_HIGH:
- dscl_prog_data->isharp_delta = spl_get_filter_isharp_1D_lut_2p0x();
- break;
- default:
- BREAK_TO_DEBUGGER();
+
+ if (setup == SDR_L) {
+ // ISHARP_LBA_PWL_SEG0: ISHARP Local Brightness Adjustment PWL Segment 0
+ dscl_prog_data->isharp_lba.in_seg[0] = 0; // ISHARP LBA PWL for Seg 0. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[0] = 0; // ISHARP LBA PWL for Seg 0. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[0] = 62; // ISHARP LBA for Seg 0. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG1: ISHARP LBA PWL Segment 1
+ dscl_prog_data->isharp_lba.in_seg[1] = 130; // ISHARP LBA PWL for Seg 1. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[1] = 63; // ISHARP LBA PWL for Seg 1. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[1] = 0; // ISHARP LBA for Seg 1. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG2: ISHARP LBA PWL Segment 2
+ dscl_prog_data->isharp_lba.in_seg[2] = 450; // ISHARP LBA PWL for Seg 2. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[2] = 63; // ISHARP LBA PWL for Seg 2. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[2] = 0x18D; // ISHARP LBA for Seg 2. SLOPE value in S5.3 format = -115
+ // ISHARP_LBA_PWL_SEG3: ISHARP LBA PWL Segment 3
+ dscl_prog_data->isharp_lba.in_seg[3] = 520; // ISHARP LBA PWL for Seg 3.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[3] = 0; // ISHARP LBA PWL for Seg 3. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[3] = 0; // ISHARP LBA for Seg 3. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG4: ISHARP LBA PWL Segment 4
+ dscl_prog_data->isharp_lba.in_seg[4] = 520; // ISHARP LBA PWL for Seg 4.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[4] = 0; // ISHARP LBA PWL for Seg 4. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[4] = 0; // ISHARP LBA for Seg 4. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG5: ISHARP LBA PWL Segment 5
+ dscl_prog_data->isharp_lba.in_seg[5] = 520; // ISHARP LBA PWL for Seg 5.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format
+ } else if (setup == HDR_L) {
+ // ISHARP_LBA_PWL_SEG0: ISHARP Local Brightness Adjustment PWL Segment 0
+ dscl_prog_data->isharp_lba.in_seg[0] = 0; // ISHARP LBA PWL for Seg 0. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[0] = 0; // ISHARP LBA PWL for Seg 0. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[0] = 32; // ISHARP LBA for Seg 0. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG1: ISHARP LBA PWL Segment 1
+ dscl_prog_data->isharp_lba.in_seg[1] = 254; // ISHARP LBA PWL for Seg 1. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[1] = 63; // ISHARP LBA PWL for Seg 1. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[1] = 0; // ISHARP LBA for Seg 1. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG2: ISHARP LBA PWL Segment 2
+ dscl_prog_data->isharp_lba.in_seg[2] = 559; // ISHARP LBA PWL for Seg 2. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[2] = 63; // ISHARP LBA PWL for Seg 2. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[2] = 0x10C; // ISHARP LBA for Seg 2. SLOPE value in S5.3 format = -244
+ // ISHARP_LBA_PWL_SEG3: ISHARP LBA PWL Segment 3
+ dscl_prog_data->isharp_lba.in_seg[3] = 592; // ISHARP LBA PWL for Seg 3.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[3] = 0; // ISHARP LBA PWL for Seg 3. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[3] = 0; // ISHARP LBA for Seg 3. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG4: ISHARP LBA PWL Segment 4
+ dscl_prog_data->isharp_lba.in_seg[4] = 1023; // ISHARP LBA PWL for Seg 4.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[4] = 0; // ISHARP LBA PWL for Seg 4. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[4] = 0; // ISHARP LBA for Seg 4. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG5: ISHARP LBA PWL Segment 5
+ dscl_prog_data->isharp_lba.in_seg[5] = 1023; // ISHARP LBA PWL for Seg 5.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format
+ } else {
+ // ISHARP_LBA_PWL_SEG0: ISHARP Local Brightness Adjustment PWL Segment 0
+ dscl_prog_data->isharp_lba.in_seg[0] = 0; // ISHARP LBA PWL for Seg 0. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[0] = 0; // ISHARP LBA PWL for Seg 0. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[0] = 40; // ISHARP LBA for Seg 0. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG1: ISHARP LBA PWL Segment 1
+ dscl_prog_data->isharp_lba.in_seg[1] = 204; // ISHARP LBA PWL for Seg 1. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[1] = 63; // ISHARP LBA PWL for Seg 1. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[1] = 0; // ISHARP LBA for Seg 1. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG2: ISHARP LBA PWL Segment 2
+ dscl_prog_data->isharp_lba.in_seg[2] = 818; // ISHARP LBA PWL for Seg 2. INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[2] = 63; // ISHARP LBA PWL for Seg 2. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[2] = 0x1D9; // ISHARP LBA for Seg 2. SLOPE value in S5.3 format = -39
+ // ISHARP_LBA_PWL_SEG3: ISHARP LBA PWL Segment 3
+ dscl_prog_data->isharp_lba.in_seg[3] = 1023; // ISHARP LBA PWL for Seg 3.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[3] = 0; // ISHARP LBA PWL for Seg 3. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[3] = 0; // ISHARP LBA for Seg 3. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG4: ISHARP LBA PWL Segment 4
+ dscl_prog_data->isharp_lba.in_seg[4] = 1023; // ISHARP LBA PWL for Seg 4.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[4] = 0; // ISHARP LBA PWL for Seg 4. BASE value in U0.6 format
+ dscl_prog_data->isharp_lba.slope_seg[4] = 0; // ISHARP LBA for Seg 4. SLOPE value in S5.3 format
+ // ISHARP_LBA_PWL_SEG5: ISHARP LBA PWL Segment 5
+ dscl_prog_data->isharp_lba.in_seg[5] = 1023; // ISHARP LBA PWL for Seg 5.INPUT value in U0.10 format
+ dscl_prog_data->isharp_lba.base_seg[5] = 0; // ISHARP LBA PWL for Seg 5. BASE value in U0.6 format
}
+
+ spl_build_isharp_1dlut_from_reference_curve(ratio, setup, adp_sharpness);
+ dscl_prog_data->isharp_delta = spl_get_pregen_filter_isharp_1D_lut(setup);
+ dscl_prog_data->sharpness_level = adp_sharpness.sharpness_level;
+
// Program the nldelta soft clip values
if (lls_pref == LLS_PREF_YES) {
dscl_prog_data->isharp_nldelta_sclip.enable_p = 0; /* ISHARP_NLDELTA_SCLIP_EN_P */
@@ -1346,62 +1685,7 @@ static void spl_set_isharp_data(struct dscl_prog_data *dscl_prog_data,
}
// Set the values as per lookup table
-#ifdef CONFIG_DRM_AMD_DC_FP
spl_set_blur_scale_data(dscl_prog_data, data);
-#endif
-}
-static bool spl_get_isharp_en(struct adaptive_sharpness adp_sharpness,
- int vscale_ratio, int hscale_ratio, struct spl_taps taps,
- enum spl_pixel_format format)
-{
- bool enable_isharp = false;
-
- if (adp_sharpness.enable == false)
- return enable_isharp; // Return if adaptive sharpness is disabled
- // Is downscaling ?
- if (vscale_ratio > 1 || hscale_ratio > 1) {
- // END - No iSHARP support for downscaling
- return enable_isharp;
- }
- // Scaling is up to 1:1 (no scaling) or upscaling
-
- /* Only apply sharpness to NV12 and not P010 */
- if (format != SPL_PIXEL_FORMAT_420BPP8)
- return enable_isharp;
-
- // LB support horizontal taps 4,6 or vertical taps 3, 4, 6
- if (taps.h_taps == 4 || taps.h_taps == 6 ||
- taps.v_taps == 3 || taps.v_taps == 4 || taps.v_taps == 6) {
- // END - iSHARP supported
- enable_isharp = true;
- }
- return enable_isharp;
-}
-
-static bool spl_choose_lls_policy(enum spl_pixel_format format,
- enum spl_transfer_func_type tf_type,
- enum spl_transfer_func_predefined tf_predefined_type,
- enum linear_light_scaling *lls_pref)
-{
- if (spl_is_yuv420(format)) {
- *lls_pref = LLS_PREF_NO;
- if ((tf_type == SPL_TF_TYPE_PREDEFINED) || (tf_type == SPL_TF_TYPE_DISTRIBUTED_POINTS))
- return true;
- } else { /* RGB or YUV444 */
- if (tf_type == SPL_TF_TYPE_PREDEFINED) {
- if ((tf_predefined_type == SPL_TRANSFER_FUNCTION_HLG) ||
- (tf_predefined_type == SPL_TRANSFER_FUNCTION_HLG12))
- *lls_pref = LLS_PREF_NO;
- else
- *lls_pref = LLS_PREF_YES;
- return true;
- } else if (tf_type == SPL_TF_TYPE_BYPASS) {
- *lls_pref = LLS_PREF_YES;
- return true;
- }
- }
- *lls_pref = LLS_PREF_NO;
- return false;
}
/* Calculate scaler parameters */
@@ -1410,67 +1694,74 @@ bool spl_calculate_scaler_params(struct spl_in *spl_in, struct spl_out *spl_out)
bool res = false;
bool enable_easf_v = false;
bool enable_easf_h = false;
- bool lls_enable_easf = true;
int vratio = 0;
int hratio = 0;
- const struct spl_scaler_data *data = &spl_out->scl_data;
+ struct spl_scratch spl_scratch;
+ struct spl_fixed31_32 isharp_scale_ratio;
+ enum system_setup setup;
+ bool enable_isharp = false;
+ const struct spl_scaler_data *data = &spl_scratch.scl_data;
+
+ memset(&spl_scratch, 0, sizeof(struct spl_scratch));
+ spl_scratch.scl_data.h_active = spl_in->h_active;
+ spl_scratch.scl_data.v_active = spl_in->v_active;
+
// All SPL calls
/* recout calculation */
/* depends on h_active */
- spl_calculate_recout(spl_in, spl_out);
+ spl_calculate_recout(spl_in, &spl_scratch, spl_out);
/* depends on pixel format */
- spl_calculate_scaling_ratios(spl_in, spl_out);
+ spl_calculate_scaling_ratios(spl_in, &spl_scratch, spl_out);
/* depends on scaling ratios and recout, does not calculate offset yet */
- spl_calculate_viewport_size(spl_in, spl_out);
+ spl_calculate_viewport_size(spl_in, &spl_scratch);
res = spl_get_optimal_number_of_taps(
spl_in->basic_out.max_downscale_src_width, spl_in,
- spl_out, &spl_in->scaling_quality);
+ &spl_scratch, &spl_in->scaling_quality, &enable_easf_v,
+ &enable_easf_h, &enable_isharp);
/*
* Depends on recout, scaling ratios, h_active and taps
* May need to re-check lb size after this in some obscure scenario
*/
if (res)
- spl_calculate_inits_and_viewports(spl_in, spl_out);
+ spl_calculate_inits_and_viewports(spl_in, &spl_scratch);
// Handle 3d recout
- spl_handle_3d_recout(spl_in, &spl_out->scl_data.recout);
+ spl_handle_3d_recout(spl_in, &spl_scratch.scl_data.recout);
// Clamp
- spl_clamp_viewport(&spl_out->scl_data.viewport);
+ spl_clamp_viewport(&spl_scratch.scl_data.viewport);
if (!res)
return res;
- /*
- * If lls_pref is LLS_PREF_DONT_CARE, then use pixel format and transfer
- * function to determine whether to use LINEAR or NONLINEAR scaling
- */
- if (spl_in->lls_pref == LLS_PREF_DONT_CARE)
- lls_enable_easf = spl_choose_lls_policy(spl_in->basic_in.format,
- spl_in->basic_in.tf_type, spl_in->basic_in.tf_predefined_type,
- &spl_in->lls_pref);
-
// Save all calculated parameters in dscl_prog_data structure to program hw registers
- spl_set_dscl_prog_data(spl_in, spl_out);
+ spl_set_dscl_prog_data(spl_in, &spl_scratch, spl_out, enable_easf_v, enable_easf_h, enable_isharp);
- vratio = dc_fixpt_ceil(spl_out->scl_data.ratios.vert);
- hratio = dc_fixpt_ceil(spl_out->scl_data.ratios.horz);
- if (!lls_enable_easf || spl_in->disable_easf) {
- enable_easf_v = false;
- enable_easf_h = false;
+ if (spl_in->lls_pref == LLS_PREF_YES) {
+ if (spl_in->is_hdr_on)
+ setup = HDR_L;
+ else
+ setup = SDR_L;
} else {
- /* Enable EASF on vertical? */
- enable_easf_v = enable_easf(vratio, spl_out->scl_data.taps.v_taps, spl_in->lls_pref, spl_in->prefer_easf);
- /* Enable EASF on horizontal? */
- enable_easf_h = enable_easf(hratio, spl_out->scl_data.taps.h_taps, spl_in->lls_pref, spl_in->prefer_easf);
+ if (spl_in->is_hdr_on)
+ setup = HDR_NL;
+ else
+ setup = SDR_NL;
}
+
// Set EASF
- spl_set_easf_data(spl_out->dscl_prog_data, enable_easf_v, enable_easf_h, spl_in->lls_pref,
- spl_in->basic_in.format);
+ spl_set_easf_data(&spl_scratch, spl_out, enable_easf_v, enable_easf_h, spl_in->lls_pref,
+ spl_in->basic_in.format, setup, spl_in->hdr_multx100);
+
// Set iSHARP
- bool enable_isharp = spl_get_isharp_en(spl_in->adaptive_sharpness, vratio, hratio,
- spl_out->scl_data.taps, spl_in->basic_in.format);
+ vratio = spl_fixpt_ceil(spl_scratch.scl_data.ratios.vert);
+ hratio = spl_fixpt_ceil(spl_scratch.scl_data.ratios.horz);
+ if (vratio <= hratio)
+ isharp_scale_ratio = spl_scratch.scl_data.recip_ratios.vert;
+ else
+ isharp_scale_ratio = spl_scratch.scl_data.recip_ratios.horz;
+
spl_set_isharp_data(spl_out->dscl_prog_data, spl_in->adaptive_sharpness, enable_isharp,
- spl_in->lls_pref, spl_in->basic_in.format, data);
+ spl_in->lls_pref, spl_in->basic_in.format, data, isharp_scale_ratio, setup);
return res;
}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c
new file mode 100644
index 000000000000..99238644e0a1
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "dc_spl_filters.h"
+
+void convert_filter_s1_10_to_s1_12(const uint16_t *s1_10_filter,
+ uint16_t *s1_12_filter, int num_taps)
+{
+ int num_entries = NUM_PHASES_COEFF * num_taps;
+ int i;
+
+ for (i = 0; i < num_entries; i++)
+ *(s1_12_filter + i) = *(s1_10_filter + i) * 4;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h
new file mode 100644
index 000000000000..20439cdbdb10
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_filters.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef __DC_SPL_FILTERS_H__
+#define __DC_SPL_FILTERS_H__
+
+#include "dc_spl_types.h"
+
+#define NUM_PHASES_COEFF 33
+
+void convert_filter_s1_10_to_s1_12(const uint16_t *s1_10_filter,
+ uint16_t *s1_12_filter, int num_taps);
+
+#endif /* __DC_SPL_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c
index a5d9a6223d06..33712f50d303 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.c
@@ -2,6 +2,8 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
+#include "spl_debug.h"
+#include "dc_spl_filters.h"
#include "dc_spl_isharp_filters.h"
//========================================
@@ -15,7 +17,7 @@
// C_start = 40.000000
// C_end = 64.000000
//========================================
-static const uint32_t filter_isharp_1D_lut_0[32] = {
+static const uint32_t filter_isharp_1D_lut_0[ISHARP_LUT_TABLE_SIZE] = {
0x02010000,
0x0A070503,
0x1614100D,
@@ -61,7 +63,7 @@ static const uint32_t filter_isharp_1D_lut_0[32] = {
// C_end = 127.000000
//========================================
-static const uint32_t filter_isharp_1D_lut_0p5x[32] = {
+static const uint32_t filter_isharp_1D_lut_0p5x[ISHARP_LUT_TABLE_SIZE] = {
0x00000000,
0x02020101,
0x06050403,
@@ -106,7 +108,7 @@ static const uint32_t filter_isharp_1D_lut_0p5x[32] = {
// C_start = 96.000000
// C_end = 127.000000
//========================================
-static const uint32_t filter_isharp_1D_lut_1p0x[32] = {
+static const uint32_t filter_isharp_1D_lut_1p0x[ISHARP_LUT_TABLE_SIZE] = {
0x01000000,
0x05040302,
0x0B0A0806,
@@ -151,7 +153,7 @@ static const uint32_t filter_isharp_1D_lut_1p0x[32] = {
// C_start = 96.000000
// C_end = 127.000000
//========================================
-static const uint32_t filter_isharp_1D_lut_1p5x[32] = {
+static const uint32_t filter_isharp_1D_lut_1p5x[ISHARP_LUT_TABLE_SIZE] = {
0x01010000,
0x07050402,
0x110F0C0A,
@@ -196,7 +198,7 @@ static const uint32_t filter_isharp_1D_lut_1p5x[32] = {
// C_start = 40.000000
// C_end = 127.000000
//========================================
-static const uint32_t filter_isharp_1D_lut_2p0x[32] = {
+static const uint32_t filter_isharp_1D_lut_2p0x[ISHARP_LUT_TABLE_SIZE] = {
0x02010000,
0x0A070503,
0x1614100D,
@@ -230,6 +232,53 @@ static const uint32_t filter_isharp_1D_lut_2p0x[32] = {
0x080B0D0E,
0x00020406,
};
+//========================================
+// Delta Gain 1DLUT
+// LUT content is packed as 4-bytes into one DWORD/entry
+// A_start = 0.000000
+// A_end = 10.000000
+// A_gain = 3.000000
+// B_start = 11.000000
+// B_end = 127.000000
+// C_start = 40.000000
+// C_end = 127.000000
+//========================================
+static const uint32_t filter_isharp_1D_lut_3p0x[ISHARP_LUT_TABLE_SIZE] = {
+0x03010000,
+0x0F0B0805,
+0x211E1813,
+0x2B292624,
+0x3533302E,
+0x3E3C3A37,
+0x46444240,
+0x4D4B4A48,
+0x5352504F,
+0x59575655,
+0x5D5C5B5A,
+0x61605F5E,
+0x64646362,
+0x66666565,
+0x68686767,
+0x68686868,
+0x68686868,
+0x67676868,
+0x65656666,
+0x62636464,
+0x5E5F6061,
+0x5A5B5C5D,
+0x55565759,
+0x4F505253,
+0x484A4B4D,
+0x40424446,
+0x373A3C3E,
+0x2E303335,
+0x2426292B,
+0x191B1E21,
+0x0D101316,
+0x0003060A,
+};
+
+//========================================
// Wide scaler coefficients
//========================================================
// <using> gen_scaler_coeffs.m
@@ -284,7 +333,7 @@ static const uint16_t filter_isharp_wide_6tap_64p[198] = {
// <CoefType> Blur & Scale LPF
// <CoefQuant> S1.10
//========================================================
-static const uint16_t filter_isharp_bs_4tap_64p[198] = {
+static const uint16_t filter_isharp_bs_4tap_in_6_64p[198] = {
0x0000, 0x00E5, 0x0237, 0x00E4, 0x0000, 0x0000,
0x0000, 0x00DE, 0x0237, 0x00EB, 0x0000, 0x0000,
0x0000, 0x00D7, 0x0236, 0x00F2, 0x0001, 0x0000,
@@ -319,6 +368,138 @@ static const uint16_t filter_isharp_bs_4tap_64p[198] = {
0x0000, 0x003B, 0x01CF, 0x01C2, 0x0034, 0x0000,
0x0000, 0x0037, 0x01C9, 0x01C9, 0x0037, 0x0000
};
+//========================================================
+// <using> gen_BlurScale_coeffs.m
+// <date> 25-Apr-2022
+// <num_taps> 4
+// <num_phases> 64
+// <CoefType> Blur & Scale LPF
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t filter_isharp_bs_4tap_64p[132] = {
+0x00E5, 0x0237, 0x00E4, 0x0000,
+0x00DE, 0x0237, 0x00EB, 0x0000,
+0x00D7, 0x0236, 0x00F2, 0x0001,
+0x00D0, 0x0235, 0x00FA, 0x0001,
+0x00C9, 0x0234, 0x0101, 0x0002,
+0x00C2, 0x0233, 0x0108, 0x0003,
+0x00BB, 0x0232, 0x0110, 0x0003,
+0x00B5, 0x0230, 0x0117, 0x0004,
+0x00AE, 0x022E, 0x011F, 0x0005,
+0x00A8, 0x022C, 0x0126, 0x0006,
+0x00A2, 0x022A, 0x012D, 0x0007,
+0x009C, 0x0228, 0x0134, 0x0008,
+0x0096, 0x0225, 0x013C, 0x0009,
+0x0090, 0x0222, 0x0143, 0x000B,
+0x008A, 0x021F, 0x014B, 0x000C,
+0x0085, 0x021C, 0x0151, 0x000E,
+0x007F, 0x0218, 0x015A, 0x000F,
+0x007A, 0x0215, 0x0160, 0x0011,
+0x0074, 0x0211, 0x0168, 0x0013,
+0x006F, 0x020D, 0x016F, 0x0015,
+0x006A, 0x0209, 0x0176, 0x0017,
+0x0065, 0x0204, 0x017E, 0x0019,
+0x0060, 0x0200, 0x0185, 0x001B,
+0x005C, 0x01FB, 0x018C, 0x001D,
+0x0057, 0x01F6, 0x0193, 0x0020,
+0x0053, 0x01F1, 0x019A, 0x0022,
+0x004E, 0x01EC, 0x01A1, 0x0025,
+0x004A, 0x01E6, 0x01A8, 0x0028,
+0x0046, 0x01E1, 0x01AF, 0x002A,
+0x0042, 0x01DB, 0x01B6, 0x002D,
+0x003F, 0x01D5, 0x01BB, 0x0031,
+0x003B, 0x01CF, 0x01C2, 0x0034,
+0x0037, 0x01C9, 0x01C9, 0x0037,
+};
+//========================================================
+// <using> gen_BlurScale_coeffs.m
+// <date> 09-Jun-2022
+// <num_taps> 3
+// <num_phases> 64
+// <CoefType> Blur & Scale LPF
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t filter_isharp_bs_3tap_64p[99] = {
+0x0200, 0x0200, 0x0000,
+0x01F6, 0x0206, 0x0004,
+0x01EC, 0x020B, 0x0009,
+0x01E2, 0x0211, 0x000D,
+0x01D8, 0x0216, 0x0012,
+0x01CE, 0x021C, 0x0016,
+0x01C4, 0x0221, 0x001B,
+0x01BA, 0x0226, 0x0020,
+0x01B0, 0x022A, 0x0026,
+0x01A6, 0x022F, 0x002B,
+0x019C, 0x0233, 0x0031,
+0x0192, 0x0238, 0x0036,
+0x0188, 0x023C, 0x003C,
+0x017E, 0x0240, 0x0042,
+0x0174, 0x0244, 0x0048,
+0x016A, 0x0248, 0x004E,
+0x0161, 0x024A, 0x0055,
+0x0157, 0x024E, 0x005B,
+0x014D, 0x0251, 0x0062,
+0x0144, 0x0253, 0x0069,
+0x013A, 0x0256, 0x0070,
+0x0131, 0x0258, 0x0077,
+0x0127, 0x025B, 0x007E,
+0x011E, 0x025C, 0x0086,
+0x0115, 0x025E, 0x008D,
+0x010B, 0x0260, 0x0095,
+0x0102, 0x0262, 0x009C,
+0x00F9, 0x0263, 0x00A4,
+0x00F0, 0x0264, 0x00AC,
+0x00E7, 0x0265, 0x00B4,
+0x00DF, 0x0264, 0x00BD,
+0x00D6, 0x0265, 0x00C5,
+0x00CD, 0x0266, 0x00CD,
+};
+
+/* Converted Blur & Scale coeff tables from S1.10 to S1.12 */
+static uint16_t filter_isharp_bs_4tap_in_6_64p_s1_12[198];
+static uint16_t filter_isharp_bs_4tap_64p_s1_12[132];
+static uint16_t filter_isharp_bs_3tap_64p_s1_12[99];
+
+/* Pre-generated 1DLUT for given setup and sharpness level */
+struct isharp_1D_lut_pregen filter_isharp_1D_lut_pregen[NUM_SHARPNESS_SETUPS] = {
+ {
+ 0, 0,
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ }
+ },
+ {
+ 0, 0,
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ }
+ },
+ {
+ 0, 0,
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ }
+ },
+ {
+ 0, 0,
+ {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ }
+ },
+};
+
const uint32_t *spl_get_filter_isharp_1D_lut_0(void)
{
return filter_isharp_1D_lut_0;
@@ -339,11 +520,165 @@ const uint32_t *spl_get_filter_isharp_1D_lut_2p0x(void)
{
return filter_isharp_1D_lut_2p0x;
}
+const uint32_t *spl_get_filter_isharp_1D_lut_3p0x(void)
+{
+ return filter_isharp_1D_lut_3p0x;
+}
const uint16_t *spl_get_filter_isharp_wide_6tap_64p(void)
{
return filter_isharp_wide_6tap_64p;
}
-const uint16_t *spl_get_filter_isharp_bs_4tap_64p(void)
+uint16_t *spl_get_filter_isharp_bs_4tap_in_6_64p(void)
{
- return filter_isharp_bs_4tap_64p;
+ return filter_isharp_bs_4tap_in_6_64p_s1_12;
}
+uint16_t *spl_get_filter_isharp_bs_4tap_64p(void)
+{
+ return filter_isharp_bs_4tap_64p_s1_12;
+}
+uint16_t *spl_get_filter_isharp_bs_3tap_64p(void)
+{
+ return filter_isharp_bs_3tap_64p_s1_12;
+}
+
+static unsigned int spl_calculate_sharpness_level(int discrete_sharpness_level, enum system_setup setup,
+ struct spl_sharpness_range sharpness_range)
+{
+ unsigned int sharpness_level = 0;
+
+ int min_sharpness, max_sharpness, mid_sharpness;
+
+ switch (setup) {
+
+ case HDR_L:
+ min_sharpness = sharpness_range.hdr_rgb_min;
+ max_sharpness = sharpness_range.hdr_rgb_max;
+ mid_sharpness = sharpness_range.hdr_rgb_mid;
+ break;
+ case HDR_NL:
+ /* currently no use case, use Non-linear SDR values for now */
+ case SDR_NL:
+ min_sharpness = sharpness_range.sdr_yuv_min;
+ max_sharpness = sharpness_range.sdr_yuv_max;
+ mid_sharpness = sharpness_range.sdr_yuv_mid;
+ break;
+ case SDR_L:
+ default:
+ min_sharpness = sharpness_range.sdr_rgb_min;
+ max_sharpness = sharpness_range.sdr_rgb_max;
+ mid_sharpness = sharpness_range.sdr_rgb_mid;
+ break;
+ }
+
+ int lower_half_step_size = (mid_sharpness - min_sharpness) / 5;
+ int upper_half_step_size = (max_sharpness - mid_sharpness) / 5;
+
+ // lower half linear approximation
+ if (discrete_sharpness_level < 5)
+ sharpness_level = min_sharpness + (lower_half_step_size * discrete_sharpness_level);
+ // upper half linear approximation
+ else
+ sharpness_level = mid_sharpness + (upper_half_step_size * (discrete_sharpness_level - 5));
+
+ return sharpness_level;
+}
+
+void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup,
+ struct adaptive_sharpness sharpness)
+{
+ uint8_t *byte_ptr_1dlut_src, *byte_ptr_1dlut_dst;
+ struct spl_fixed31_32 sharp_base, sharp_calc, sharp_level;
+ int j;
+ int size_1dlut;
+ int sharp_calc_int;
+ uint32_t filter_pregen_store[ISHARP_LUT_TABLE_SIZE];
+
+ /* Custom sharpnessX1000 value */
+ unsigned int sharpnessX1000 = spl_calculate_sharpness_level(sharpness.sharpness_level,
+ setup, sharpness.sharpness_range);
+ sharp_level = spl_fixpt_from_fraction(sharpnessX1000, 1000);
+
+ /*
+ * Check if pregen 1dlut table is already precalculated
+ * If numer/denom is different, then recalculate
+ */
+ if ((filter_isharp_1D_lut_pregen[setup].sharpness_numer == sharpnessX1000) &&
+ (filter_isharp_1D_lut_pregen[setup].sharpness_denom == 1000))
+ return;
+
+
+ /*
+ * Calculate LUT_128_gained with this equation:
+ *
+ * LUT_128_gained[i] = (uint8)(0.5 + min(255,(double)(LUT_128[i])*sharpLevel/iGain))
+ * where LUT_128[i] is contents of 3p0x isharp 1dlut
+ * where sharpLevel is desired sharpness level
+ * where iGain is base sharpness level 3.0
+ * where LUT_128_gained[i] is adjusted 1dlut value based on desired sharpness level
+ */
+ byte_ptr_1dlut_src = (uint8_t *)filter_isharp_1D_lut_3p0x;
+ byte_ptr_1dlut_dst = (uint8_t *)filter_pregen_store;
+ size_1dlut = sizeof(filter_isharp_1D_lut_3p0x);
+ memset(byte_ptr_1dlut_dst, 0, size_1dlut);
+ for (j = 0; j < size_1dlut; j++) {
+ sharp_base = spl_fixpt_from_int((int)*byte_ptr_1dlut_src);
+ sharp_calc = spl_fixpt_mul(sharp_base, sharp_level);
+ sharp_calc = spl_fixpt_div(sharp_calc, spl_fixpt_from_int(3));
+ sharp_calc = spl_fixpt_min(spl_fixpt_from_int(255), sharp_calc);
+ sharp_calc = spl_fixpt_add(sharp_calc, spl_fixpt_from_fraction(1, 2));
+ sharp_calc_int = spl_fixpt_floor(sharp_calc);
+ /* Clamp it at 0x7F so it doesn't wrap */
+ if (sharp_calc_int > 127)
+ sharp_calc_int = 127;
+ *byte_ptr_1dlut_dst = (uint8_t)sharp_calc_int;
+
+ byte_ptr_1dlut_src++;
+ byte_ptr_1dlut_dst++;
+ }
+
+ /* Update 1dlut table and sharpness level */
+ memcpy((void *)filter_isharp_1D_lut_pregen[setup].value, (void *)filter_pregen_store, size_1dlut);
+ filter_isharp_1D_lut_pregen[setup].sharpness_numer = sharpnessX1000;
+ filter_isharp_1D_lut_pregen[setup].sharpness_denom = 1000;
+}
+
+uint32_t *spl_get_pregen_filter_isharp_1D_lut(enum system_setup setup)
+{
+ return filter_isharp_1D_lut_pregen[setup].value;
+}
+
+void spl_init_blur_scale_coeffs(void)
+{
+ convert_filter_s1_10_to_s1_12(filter_isharp_bs_3tap_64p,
+ filter_isharp_bs_3tap_64p_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(filter_isharp_bs_4tap_64p,
+ filter_isharp_bs_4tap_64p_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(filter_isharp_bs_4tap_in_6_64p,
+ filter_isharp_bs_4tap_in_6_64p_s1_12, 6);
+}
+
+uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps)
+{
+ if (taps == 3)
+ return spl_get_filter_isharp_bs_3tap_64p();
+ else if (taps == 4)
+ return spl_get_filter_isharp_bs_4tap_64p();
+ else if (taps == 6)
+ return spl_get_filter_isharp_bs_4tap_in_6_64p();
+ else {
+ /* should never happen, bug */
+ SPL_BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data,
+ const struct spl_scaler_data *data)
+{
+ dscl_prog_data->filter_blur_scale_h =
+ spl_dscl_get_blur_scale_coeffs_64p(data->taps.h_taps);
+
+ dscl_prog_data->filter_blur_scale_v =
+ spl_dscl_get_blur_scale_coeffs_64p(data->taps.v_taps);
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h
index 1aaf4c50c1bc..fe0b12571f2c 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_isharp_filters.h
@@ -7,11 +7,44 @@
#include "dc_spl_types.h"
+#define ISHARP_LUT_TABLE_SIZE 32
const uint32_t *spl_get_filter_isharp_1D_lut_0(void);
const uint32_t *spl_get_filter_isharp_1D_lut_0p5x(void);
const uint32_t *spl_get_filter_isharp_1D_lut_1p0x(void);
const uint32_t *spl_get_filter_isharp_1D_lut_1p5x(void);
const uint32_t *spl_get_filter_isharp_1D_lut_2p0x(void);
-const uint16_t *spl_get_filter_isharp_bs_4tap_64p(void);
+const uint32_t *spl_get_filter_isharp_1D_lut_3p0x(void);
+uint16_t *spl_get_filter_isharp_bs_4tap_in_6_64p(void);
+uint16_t *spl_get_filter_isharp_bs_4tap_64p(void);
+uint16_t *spl_get_filter_isharp_bs_3tap_64p(void);
const uint16_t *spl_get_filter_isharp_wide_6tap_64p(void);
+uint16_t *spl_dscl_get_blur_scale_coeffs_64p(int taps);
+
+struct scale_ratio_to_sharpness_level_lookup {
+ unsigned int ratio_numer;
+ unsigned int ratio_denom;
+ unsigned int sharpness_numer;
+ unsigned int sharpness_denom;
+};
+
+struct isharp_1D_lut_pregen {
+ unsigned int sharpness_numer;
+ unsigned int sharpness_denom;
+ uint32_t value[ISHARP_LUT_TABLE_SIZE];
+};
+
+enum system_setup {
+ SDR_NL = 0,
+ SDR_L,
+ HDR_NL,
+ HDR_L,
+ NUM_SHARPNESS_SETUPS
+};
+
+void spl_init_blur_scale_coeffs(void);
+void spl_set_blur_scale_data(struct dscl_prog_data *dscl_prog_data,
+ const struct spl_scaler_data *data);
+
+void spl_build_isharp_1dlut_from_reference_curve(struct spl_fixed31_32 ratio, enum system_setup setup, struct adaptive_sharpness sharpness);
+uint32_t *spl_get_pregen_filter_isharp_1D_lut(enum system_setup setup);
#endif /* __DC_SPL_ISHARP_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c
new file mode 100644
index 000000000000..09bf82f7d468
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.c
@@ -0,0 +1,1726 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "spl_debug.h"
+#include "dc_spl_filters.h"
+#include "dc_spl_scl_filters.h"
+#include "dc_spl_scl_easf_filters.h"
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.3_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.300000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_30[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F6, 0x0206, 0x0004,
+ 0x01EC, 0x020B, 0x0009,
+ 0x01E2, 0x0211, 0x000D,
+ 0x01D8, 0x0216, 0x0012,
+ 0x01CE, 0x021C, 0x0016,
+ 0x01C4, 0x0221, 0x001B,
+ 0x01BA, 0x0226, 0x0020,
+ 0x01B0, 0x022A, 0x0026,
+ 0x01A6, 0x022F, 0x002B,
+ 0x019C, 0x0233, 0x0031,
+ 0x0192, 0x0238, 0x0036,
+ 0x0188, 0x023C, 0x003C,
+ 0x017E, 0x0240, 0x0042,
+ 0x0174, 0x0244, 0x0048,
+ 0x016A, 0x0248, 0x004E,
+ 0x0161, 0x024A, 0x0055,
+ 0x0157, 0x024E, 0x005B,
+ 0x014D, 0x0251, 0x0062,
+ 0x0144, 0x0253, 0x0069,
+ 0x013A, 0x0256, 0x0070,
+ 0x0131, 0x0258, 0x0077,
+ 0x0127, 0x025B, 0x007E,
+ 0x011E, 0x025C, 0x0086,
+ 0x0115, 0x025E, 0x008D,
+ 0x010B, 0x0260, 0x0095,
+ 0x0102, 0x0262, 0x009C,
+ 0x00F9, 0x0263, 0x00A4,
+ 0x00F0, 0x0264, 0x00AC,
+ 0x00E7, 0x0265, 0x00B4,
+ 0x00DF, 0x0264, 0x00BD,
+ 0x00D6, 0x0265, 0x00C5,
+ 0x00CD, 0x0266, 0x00CD,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.4_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.400000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_40[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F6, 0x0206, 0x0004,
+ 0x01EB, 0x020E, 0x0007,
+ 0x01E1, 0x0214, 0x000B,
+ 0x01D7, 0x021A, 0x000F,
+ 0x01CD, 0x0220, 0x0013,
+ 0x01C2, 0x0226, 0x0018,
+ 0x01B8, 0x022C, 0x001C,
+ 0x01AE, 0x0231, 0x0021,
+ 0x01A3, 0x0237, 0x0026,
+ 0x0199, 0x023C, 0x002B,
+ 0x018F, 0x0240, 0x0031,
+ 0x0185, 0x0245, 0x0036,
+ 0x017A, 0x024A, 0x003C,
+ 0x0170, 0x024F, 0x0041,
+ 0x0166, 0x0253, 0x0047,
+ 0x015C, 0x0257, 0x004D,
+ 0x0152, 0x025A, 0x0054,
+ 0x0148, 0x025E, 0x005A,
+ 0x013E, 0x0261, 0x0061,
+ 0x0134, 0x0264, 0x0068,
+ 0x012B, 0x0266, 0x006F,
+ 0x0121, 0x0269, 0x0076,
+ 0x0117, 0x026C, 0x007D,
+ 0x010E, 0x026E, 0x0084,
+ 0x0104, 0x0270, 0x008C,
+ 0x00FB, 0x0271, 0x0094,
+ 0x00F2, 0x0272, 0x009C,
+ 0x00E9, 0x0273, 0x00A4,
+ 0x00E0, 0x0274, 0x00AC,
+ 0x00D7, 0x0275, 0x00B4,
+ 0x00CE, 0x0275, 0x00BD,
+ 0x00C5, 0x0276, 0x00C5,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.5_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.500000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_50[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F5, 0x0209, 0x0002,
+ 0x01EA, 0x0211, 0x0005,
+ 0x01DF, 0x021A, 0x0007,
+ 0x01D4, 0x0222, 0x000A,
+ 0x01C9, 0x022A, 0x000D,
+ 0x01BE, 0x0232, 0x0010,
+ 0x01B3, 0x0239, 0x0014,
+ 0x01A8, 0x0241, 0x0017,
+ 0x019D, 0x0248, 0x001B,
+ 0x0192, 0x024F, 0x001F,
+ 0x0187, 0x0255, 0x0024,
+ 0x017C, 0x025C, 0x0028,
+ 0x0171, 0x0262, 0x002D,
+ 0x0166, 0x0268, 0x0032,
+ 0x015B, 0x026E, 0x0037,
+ 0x0150, 0x0273, 0x003D,
+ 0x0146, 0x0278, 0x0042,
+ 0x013B, 0x027D, 0x0048,
+ 0x0130, 0x0282, 0x004E,
+ 0x0126, 0x0286, 0x0054,
+ 0x011B, 0x028A, 0x005B,
+ 0x0111, 0x028D, 0x0062,
+ 0x0107, 0x0290, 0x0069,
+ 0x00FD, 0x0293, 0x0070,
+ 0x00F3, 0x0296, 0x0077,
+ 0x00E9, 0x0298, 0x007F,
+ 0x00DF, 0x029A, 0x0087,
+ 0x00D5, 0x029C, 0x008F,
+ 0x00CC, 0x029D, 0x0097,
+ 0x00C3, 0x029E, 0x009F,
+ 0x00BA, 0x029E, 0x00A8,
+ 0x00B1, 0x029E, 0x00B1,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.6_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.600000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_60[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F4, 0x020B, 0x0001,
+ 0x01E8, 0x0216, 0x0002,
+ 0x01DC, 0x0221, 0x0003,
+ 0x01D0, 0x022B, 0x0005,
+ 0x01C4, 0x0235, 0x0007,
+ 0x01B8, 0x0240, 0x0008,
+ 0x01AC, 0x0249, 0x000B,
+ 0x01A0, 0x0253, 0x000D,
+ 0x0194, 0x025C, 0x0010,
+ 0x0188, 0x0265, 0x0013,
+ 0x017C, 0x026E, 0x0016,
+ 0x0170, 0x0277, 0x0019,
+ 0x0164, 0x027F, 0x001D,
+ 0x0158, 0x0287, 0x0021,
+ 0x014C, 0x028F, 0x0025,
+ 0x0140, 0x0297, 0x0029,
+ 0x0135, 0x029D, 0x002E,
+ 0x0129, 0x02A4, 0x0033,
+ 0x011D, 0x02AB, 0x0038,
+ 0x0112, 0x02B0, 0x003E,
+ 0x0107, 0x02B5, 0x0044,
+ 0x00FC, 0x02BA, 0x004A,
+ 0x00F1, 0x02BF, 0x0050,
+ 0x00E6, 0x02C3, 0x0057,
+ 0x00DB, 0x02C7, 0x005E,
+ 0x00D1, 0x02CA, 0x0065,
+ 0x00C7, 0x02CC, 0x006D,
+ 0x00BD, 0x02CE, 0x0075,
+ 0x00B3, 0x02D0, 0x007D,
+ 0x00A9, 0x02D2, 0x0085,
+ 0x00A0, 0x02D2, 0x008E,
+ 0x0097, 0x02D2, 0x0097,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.7_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.700000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_70[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F3, 0x020D, 0x0000,
+ 0x01E5, 0x021B, 0x0000,
+ 0x01D8, 0x0228, 0x0000,
+ 0x01CB, 0x0235, 0x0000,
+ 0x01BD, 0x0243, 0x0000,
+ 0x01B0, 0x024F, 0x0001,
+ 0x01A2, 0x025C, 0x0002,
+ 0x0195, 0x0268, 0x0003,
+ 0x0187, 0x0275, 0x0004,
+ 0x017A, 0x0280, 0x0006,
+ 0x016D, 0x028C, 0x0007,
+ 0x015F, 0x0298, 0x0009,
+ 0x0152, 0x02A2, 0x000C,
+ 0x0145, 0x02AD, 0x000E,
+ 0x0138, 0x02B7, 0x0011,
+ 0x012B, 0x02C0, 0x0015,
+ 0x011E, 0x02CA, 0x0018,
+ 0x0111, 0x02D3, 0x001C,
+ 0x0105, 0x02DB, 0x0020,
+ 0x00F8, 0x02E3, 0x0025,
+ 0x00EC, 0x02EA, 0x002A,
+ 0x00E0, 0x02F1, 0x002F,
+ 0x00D5, 0x02F6, 0x0035,
+ 0x00C9, 0x02FC, 0x003B,
+ 0x00BE, 0x0301, 0x0041,
+ 0x00B3, 0x0305, 0x0048,
+ 0x00A8, 0x0309, 0x004F,
+ 0x009E, 0x030C, 0x0056,
+ 0x0094, 0x030E, 0x005E,
+ 0x008A, 0x0310, 0x0066,
+ 0x0081, 0x0310, 0x006F,
+ 0x0077, 0x0312, 0x0077,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.8_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.800000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_80[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01F1, 0x0210, 0x0FFF,
+ 0x01E2, 0x0220, 0x0FFE,
+ 0x01D2, 0x0232, 0x0FFC,
+ 0x01C3, 0x0241, 0x0FFC,
+ 0x01B4, 0x0251, 0x0FFB,
+ 0x01A4, 0x0262, 0x0FFA,
+ 0x0195, 0x0271, 0x0FFA,
+ 0x0186, 0x0281, 0x0FF9,
+ 0x0176, 0x0291, 0x0FF9,
+ 0x0167, 0x02A0, 0x0FF9,
+ 0x0158, 0x02AE, 0x0FFA,
+ 0x0149, 0x02BD, 0x0FFA,
+ 0x013A, 0x02CB, 0x0FFB,
+ 0x012C, 0x02D7, 0x0FFD,
+ 0x011D, 0x02E5, 0x0FFE,
+ 0x010F, 0x02F1, 0x0000,
+ 0x0101, 0x02FD, 0x0002,
+ 0x00F3, 0x0308, 0x0005,
+ 0x00E5, 0x0313, 0x0008,
+ 0x00D8, 0x031D, 0x000B,
+ 0x00CB, 0x0326, 0x000F,
+ 0x00BE, 0x032F, 0x0013,
+ 0x00B2, 0x0337, 0x0017,
+ 0x00A6, 0x033E, 0x001C,
+ 0x009A, 0x0345, 0x0021,
+ 0x008F, 0x034A, 0x0027,
+ 0x0084, 0x034F, 0x002D,
+ 0x0079, 0x0353, 0x0034,
+ 0x006F, 0x0356, 0x003B,
+ 0x0065, 0x0358, 0x0043,
+ 0x005C, 0x0359, 0x004B,
+ 0x0053, 0x035A, 0x0053,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_0.9_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 0.900000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_0_90[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01EE, 0x0214, 0x0FFE,
+ 0x01DC, 0x0228, 0x0FFC,
+ 0x01CA, 0x023C, 0x0FFA,
+ 0x01B9, 0x024F, 0x0FF8,
+ 0x01A7, 0x0262, 0x0FF7,
+ 0x0195, 0x0276, 0x0FF5,
+ 0x0183, 0x028A, 0x0FF3,
+ 0x0172, 0x029C, 0x0FF2,
+ 0x0160, 0x02AF, 0x0FF1,
+ 0x014F, 0x02C2, 0x0FEF,
+ 0x013E, 0x02D4, 0x0FEE,
+ 0x012D, 0x02E5, 0x0FEE,
+ 0x011C, 0x02F7, 0x0FED,
+ 0x010C, 0x0307, 0x0FED,
+ 0x00FB, 0x0318, 0x0FED,
+ 0x00EC, 0x0327, 0x0FED,
+ 0x00DC, 0x0336, 0x0FEE,
+ 0x00CD, 0x0344, 0x0FEF,
+ 0x00BE, 0x0352, 0x0FF0,
+ 0x00B0, 0x035E, 0x0FF2,
+ 0x00A2, 0x036A, 0x0FF4,
+ 0x0095, 0x0375, 0x0FF6,
+ 0x0088, 0x037F, 0x0FF9,
+ 0x007B, 0x0388, 0x0FFD,
+ 0x006F, 0x0391, 0x0000,
+ 0x0064, 0x0397, 0x0005,
+ 0x0059, 0x039D, 0x000A,
+ 0x004E, 0x03A3, 0x000F,
+ 0x0045, 0x03A6, 0x0015,
+ 0x003B, 0x03A9, 0x001C,
+ 0x0033, 0x03AA, 0x0023,
+ 0x002A, 0x03AC, 0x002A,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 3t_64p_LanczosEd_p_1_p_10qb_
+// <num_taps> 3
+// <num_phases> 64
+// <scale_ratio> input/output = 1.000000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_3tap_64p_ratio_1_00[99] = {
+ 0x0200, 0x0200, 0x0000,
+ 0x01EB, 0x0217, 0x0FFE,
+ 0x01D5, 0x022F, 0x0FFC,
+ 0x01C0, 0x0247, 0x0FF9,
+ 0x01AB, 0x025E, 0x0FF7,
+ 0x0196, 0x0276, 0x0FF4,
+ 0x0181, 0x028D, 0x0FF2,
+ 0x016C, 0x02A5, 0x0FEF,
+ 0x0158, 0x02BB, 0x0FED,
+ 0x0144, 0x02D1, 0x0FEB,
+ 0x0130, 0x02E8, 0x0FE8,
+ 0x011C, 0x02FE, 0x0FE6,
+ 0x0109, 0x0313, 0x0FE4,
+ 0x00F6, 0x0328, 0x0FE2,
+ 0x00E4, 0x033C, 0x0FE0,
+ 0x00D2, 0x034F, 0x0FDF,
+ 0x00C0, 0x0363, 0x0FDD,
+ 0x00B0, 0x0374, 0x0FDC,
+ 0x009F, 0x0385, 0x0FDC,
+ 0x0090, 0x0395, 0x0FDB,
+ 0x0081, 0x03A4, 0x0FDB,
+ 0x0072, 0x03B3, 0x0FDB,
+ 0x0064, 0x03C0, 0x0FDC,
+ 0x0057, 0x03CC, 0x0FDD,
+ 0x004B, 0x03D6, 0x0FDF,
+ 0x003F, 0x03E0, 0x0FE1,
+ 0x0034, 0x03E8, 0x0FE4,
+ 0x002A, 0x03EF, 0x0FE7,
+ 0x0020, 0x03F5, 0x0FEB,
+ 0x0017, 0x03FA, 0x0FEF,
+ 0x000F, 0x03FD, 0x0FF4,
+ 0x0007, 0x03FF, 0x0FFA,
+ 0x0000, 0x0400, 0x0000,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.3_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.300000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_30[132] = {
+ 0x0104, 0x01F8, 0x0104, 0x0000,
+ 0x00FE, 0x01F7, 0x010A, 0x0001,
+ 0x00F8, 0x01F6, 0x010F, 0x0003,
+ 0x00F2, 0x01F5, 0x0114, 0x0005,
+ 0x00EB, 0x01F4, 0x011B, 0x0006,
+ 0x00E5, 0x01F3, 0x0120, 0x0008,
+ 0x00DF, 0x01F2, 0x0125, 0x000A,
+ 0x00DA, 0x01F0, 0x012A, 0x000C,
+ 0x00D4, 0x01EE, 0x0130, 0x000E,
+ 0x00CE, 0x01ED, 0x0135, 0x0010,
+ 0x00C8, 0x01EB, 0x013A, 0x0013,
+ 0x00C2, 0x01E9, 0x0140, 0x0015,
+ 0x00BD, 0x01E7, 0x0145, 0x0017,
+ 0x00B7, 0x01E5, 0x014A, 0x001A,
+ 0x00B1, 0x01E2, 0x0151, 0x001C,
+ 0x00AC, 0x01E0, 0x0155, 0x001F,
+ 0x00A7, 0x01DD, 0x015A, 0x0022,
+ 0x00A1, 0x01DB, 0x015F, 0x0025,
+ 0x009C, 0x01D8, 0x0165, 0x0027,
+ 0x0097, 0x01D5, 0x016A, 0x002A,
+ 0x0092, 0x01D2, 0x016E, 0x002E,
+ 0x008C, 0x01CF, 0x0174, 0x0031,
+ 0x0087, 0x01CC, 0x0179, 0x0034,
+ 0x0083, 0x01C9, 0x017D, 0x0037,
+ 0x007E, 0x01C5, 0x0182, 0x003B,
+ 0x0079, 0x01C2, 0x0187, 0x003E,
+ 0x0074, 0x01BE, 0x018C, 0x0042,
+ 0x0070, 0x01BA, 0x0190, 0x0046,
+ 0x006B, 0x01B7, 0x0195, 0x0049,
+ 0x0066, 0x01B3, 0x019A, 0x004D,
+ 0x0062, 0x01AF, 0x019E, 0x0051,
+ 0x005E, 0x01AB, 0x01A2, 0x0055,
+ 0x005A, 0x01A6, 0x01A6, 0x005A,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.4_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.400000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_40[132] = {
+ 0x00FB, 0x0209, 0x00FC, 0x0000,
+ 0x00F5, 0x0209, 0x0101, 0x0001,
+ 0x00EE, 0x0208, 0x0108, 0x0002,
+ 0x00E8, 0x0207, 0x010E, 0x0003,
+ 0x00E2, 0x0206, 0x0114, 0x0004,
+ 0x00DB, 0x0205, 0x011A, 0x0006,
+ 0x00D5, 0x0204, 0x0120, 0x0007,
+ 0x00CF, 0x0203, 0x0125, 0x0009,
+ 0x00C9, 0x0201, 0x012C, 0x000A,
+ 0x00C3, 0x01FF, 0x0132, 0x000C,
+ 0x00BD, 0x01FD, 0x0138, 0x000E,
+ 0x00B7, 0x01FB, 0x013E, 0x0010,
+ 0x00B1, 0x01F9, 0x0144, 0x0012,
+ 0x00AC, 0x01F7, 0x0149, 0x0014,
+ 0x00A6, 0x01F4, 0x0150, 0x0016,
+ 0x00A0, 0x01F2, 0x0156, 0x0018,
+ 0x009B, 0x01EF, 0x015C, 0x001A,
+ 0x0095, 0x01EC, 0x0162, 0x001D,
+ 0x0090, 0x01E9, 0x0168, 0x001F,
+ 0x008B, 0x01E6, 0x016D, 0x0022,
+ 0x0085, 0x01E3, 0x0173, 0x0025,
+ 0x0080, 0x01DF, 0x0179, 0x0028,
+ 0x007B, 0x01DC, 0x017E, 0x002B,
+ 0x0076, 0x01D8, 0x0184, 0x002E,
+ 0x0071, 0x01D4, 0x018A, 0x0031,
+ 0x006D, 0x01D1, 0x018E, 0x0034,
+ 0x0068, 0x01CD, 0x0193, 0x0038,
+ 0x0063, 0x01C8, 0x019A, 0x003B,
+ 0x005F, 0x01C4, 0x019E, 0x003F,
+ 0x005B, 0x01C0, 0x01A3, 0x0042,
+ 0x0056, 0x01BB, 0x01A9, 0x0046,
+ 0x0052, 0x01B7, 0x01AD, 0x004A,
+ 0x004E, 0x01B2, 0x01B2, 0x004E,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.5_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.500000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_50[132] = {
+ 0x00E5, 0x0236, 0x00E5, 0x0000,
+ 0x00DE, 0x0235, 0x00ED, 0x0000,
+ 0x00D7, 0x0235, 0x00F4, 0x0000,
+ 0x00D0, 0x0235, 0x00FB, 0x0000,
+ 0x00C9, 0x0234, 0x0102, 0x0001,
+ 0x00C2, 0x0233, 0x010A, 0x0001,
+ 0x00BC, 0x0232, 0x0111, 0x0001,
+ 0x00B5, 0x0230, 0x0119, 0x0002,
+ 0x00AE, 0x022F, 0x0121, 0x0002,
+ 0x00A8, 0x022D, 0x0128, 0x0003,
+ 0x00A2, 0x022B, 0x012F, 0x0004,
+ 0x009B, 0x0229, 0x0137, 0x0005,
+ 0x0095, 0x0226, 0x013F, 0x0006,
+ 0x008F, 0x0224, 0x0146, 0x0007,
+ 0x0089, 0x0221, 0x014E, 0x0008,
+ 0x0083, 0x021E, 0x0155, 0x000A,
+ 0x007E, 0x021B, 0x015C, 0x000B,
+ 0x0078, 0x0217, 0x0164, 0x000D,
+ 0x0072, 0x0213, 0x016D, 0x000E,
+ 0x006D, 0x0210, 0x0173, 0x0010,
+ 0x0068, 0x020C, 0x017A, 0x0012,
+ 0x0063, 0x0207, 0x0182, 0x0014,
+ 0x005E, 0x0203, 0x0189, 0x0016,
+ 0x0059, 0x01FE, 0x0191, 0x0018,
+ 0x0054, 0x01F9, 0x0198, 0x001B,
+ 0x0050, 0x01F4, 0x019F, 0x001D,
+ 0x004B, 0x01EF, 0x01A6, 0x0020,
+ 0x0047, 0x01EA, 0x01AC, 0x0023,
+ 0x0043, 0x01E4, 0x01B3, 0x0026,
+ 0x003F, 0x01DF, 0x01B9, 0x0029,
+ 0x003B, 0x01D9, 0x01C0, 0x002C,
+ 0x0037, 0x01D3, 0x01C6, 0x0030,
+ 0x0033, 0x01CD, 0x01CD, 0x0033,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.6_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.600000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_60[132] = {
+ 0x00C8, 0x026F, 0x00C9, 0x0000,
+ 0x00C0, 0x0270, 0x00D1, 0x0FFF,
+ 0x00B8, 0x0270, 0x00D9, 0x0FFF,
+ 0x00B1, 0x0270, 0x00E1, 0x0FFE,
+ 0x00A9, 0x026F, 0x00EB, 0x0FFD,
+ 0x00A2, 0x026E, 0x00F3, 0x0FFD,
+ 0x009A, 0x026D, 0x00FD, 0x0FFC,
+ 0x0093, 0x026C, 0x0105, 0x0FFC,
+ 0x008C, 0x026A, 0x010F, 0x0FFB,
+ 0x0085, 0x0268, 0x0118, 0x0FFB,
+ 0x007E, 0x0265, 0x0122, 0x0FFB,
+ 0x0078, 0x0263, 0x012A, 0x0FFB,
+ 0x0071, 0x0260, 0x0134, 0x0FFB,
+ 0x006B, 0x025C, 0x013E, 0x0FFB,
+ 0x0065, 0x0259, 0x0147, 0x0FFB,
+ 0x005F, 0x0255, 0x0151, 0x0FFB,
+ 0x0059, 0x0251, 0x015A, 0x0FFC,
+ 0x0054, 0x024D, 0x0163, 0x0FFC,
+ 0x004E, 0x0248, 0x016D, 0x0FFD,
+ 0x0049, 0x0243, 0x0176, 0x0FFE,
+ 0x0044, 0x023E, 0x017F, 0x0FFF,
+ 0x003F, 0x0238, 0x0189, 0x0000,
+ 0x003A, 0x0232, 0x0193, 0x0001,
+ 0x0036, 0x022C, 0x019C, 0x0002,
+ 0x0031, 0x0226, 0x01A5, 0x0004,
+ 0x002D, 0x021F, 0x01AF, 0x0005,
+ 0x0029, 0x0218, 0x01B8, 0x0007,
+ 0x0025, 0x0211, 0x01C1, 0x0009,
+ 0x0022, 0x020A, 0x01C9, 0x000B,
+ 0x001E, 0x0203, 0x01D2, 0x000D,
+ 0x001B, 0x01FB, 0x01DA, 0x0010,
+ 0x0018, 0x01F3, 0x01E3, 0x0012,
+ 0x0015, 0x01EB, 0x01EB, 0x0015,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.7_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.700000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_70[132] = {
+ 0x00A3, 0x02B9, 0x00A4, 0x0000,
+ 0x009A, 0x02BA, 0x00AD, 0x0FFF,
+ 0x0092, 0x02BA, 0x00B6, 0x0FFE,
+ 0x0089, 0x02BA, 0x00C1, 0x0FFC,
+ 0x0081, 0x02B9, 0x00CB, 0x0FFB,
+ 0x0079, 0x02B8, 0x00D5, 0x0FFA,
+ 0x0071, 0x02B7, 0x00DF, 0x0FF9,
+ 0x0069, 0x02B5, 0x00EA, 0x0FF8,
+ 0x0062, 0x02B3, 0x00F4, 0x0FF7,
+ 0x005B, 0x02B0, 0x00FF, 0x0FF6,
+ 0x0054, 0x02AD, 0x010B, 0x0FF4,
+ 0x004D, 0x02A9, 0x0117, 0x0FF3,
+ 0x0046, 0x02A5, 0x0123, 0x0FF2,
+ 0x0040, 0x02A1, 0x012D, 0x0FF2,
+ 0x003A, 0x029C, 0x0139, 0x0FF1,
+ 0x0034, 0x0297, 0x0145, 0x0FF0,
+ 0x002F, 0x0292, 0x0150, 0x0FEF,
+ 0x0029, 0x028C, 0x015C, 0x0FEF,
+ 0x0024, 0x0285, 0x0169, 0x0FEE,
+ 0x001F, 0x027F, 0x0174, 0x0FEE,
+ 0x001B, 0x0278, 0x017F, 0x0FEE,
+ 0x0016, 0x0270, 0x018D, 0x0FED,
+ 0x0012, 0x0268, 0x0199, 0x0FED,
+ 0x000E, 0x0260, 0x01A4, 0x0FEE,
+ 0x000B, 0x0258, 0x01AF, 0x0FEE,
+ 0x0007, 0x024F, 0x01BC, 0x0FEE,
+ 0x0004, 0x0246, 0x01C7, 0x0FEF,
+ 0x0001, 0x023D, 0x01D3, 0x0FEF,
+ 0x0FFE, 0x0233, 0x01DF, 0x0FF0,
+ 0x0FFC, 0x0229, 0x01EA, 0x0FF1,
+ 0x0FFA, 0x021F, 0x01F4, 0x0FF3,
+ 0x0FF8, 0x0215, 0x01FF, 0x0FF4,
+ 0x0FF6, 0x020A, 0x020A, 0x0FF6,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.8_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.800000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_80[132] = {
+ 0x0075, 0x0315, 0x0076, 0x0000,
+ 0x006C, 0x0316, 0x007F, 0x0FFF,
+ 0x0062, 0x0316, 0x008A, 0x0FFE,
+ 0x0059, 0x0315, 0x0096, 0x0FFC,
+ 0x0050, 0x0314, 0x00A1, 0x0FFB,
+ 0x0048, 0x0312, 0x00AD, 0x0FF9,
+ 0x0040, 0x0310, 0x00B8, 0x0FF8,
+ 0x0038, 0x030D, 0x00C5, 0x0FF6,
+ 0x0030, 0x030A, 0x00D1, 0x0FF5,
+ 0x0029, 0x0306, 0x00DE, 0x0FF3,
+ 0x0022, 0x0301, 0x00EB, 0x0FF2,
+ 0x001C, 0x02FC, 0x00F8, 0x0FF0,
+ 0x0015, 0x02F7, 0x0106, 0x0FEE,
+ 0x0010, 0x02F1, 0x0112, 0x0FED,
+ 0x000A, 0x02EA, 0x0121, 0x0FEB,
+ 0x0005, 0x02E3, 0x012F, 0x0FE9,
+ 0x0000, 0x02DB, 0x013D, 0x0FE8,
+ 0x0FFB, 0x02D3, 0x014C, 0x0FE6,
+ 0x0FF7, 0x02CA, 0x015A, 0x0FE5,
+ 0x0FF3, 0x02C1, 0x0169, 0x0FE3,
+ 0x0FF0, 0x02B7, 0x0177, 0x0FE2,
+ 0x0FEC, 0x02AD, 0x0186, 0x0FE1,
+ 0x0FE9, 0x02A2, 0x0196, 0x0FDF,
+ 0x0FE7, 0x0297, 0x01A4, 0x0FDE,
+ 0x0FE4, 0x028C, 0x01B3, 0x0FDD,
+ 0x0FE2, 0x0280, 0x01C2, 0x0FDC,
+ 0x0FE0, 0x0274, 0x01D0, 0x0FDC,
+ 0x0FDF, 0x0268, 0x01DE, 0x0FDB,
+ 0x0FDD, 0x025B, 0x01EE, 0x0FDA,
+ 0x0FDC, 0x024E, 0x01FC, 0x0FDA,
+ 0x0FDB, 0x0241, 0x020A, 0x0FDA,
+ 0x0FDB, 0x0233, 0x0218, 0x0FDA,
+ 0x0FDA, 0x0226, 0x0226, 0x0FDA,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_0.9_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 0.900000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_0_90[132] = {
+ 0x003F, 0x0383, 0x003E, 0x0000,
+ 0x0034, 0x0383, 0x004A, 0x0FFF,
+ 0x002B, 0x0383, 0x0054, 0x0FFE,
+ 0x0021, 0x0381, 0x0061, 0x0FFD,
+ 0x0019, 0x037F, 0x006C, 0x0FFC,
+ 0x0010, 0x037C, 0x0079, 0x0FFB,
+ 0x0008, 0x0378, 0x0086, 0x0FFA,
+ 0x0001, 0x0374, 0x0093, 0x0FF8,
+ 0x0FFA, 0x036E, 0x00A1, 0x0FF7,
+ 0x0FF3, 0x0368, 0x00B0, 0x0FF5,
+ 0x0FED, 0x0361, 0x00BF, 0x0FF3,
+ 0x0FE8, 0x035A, 0x00CD, 0x0FF1,
+ 0x0FE2, 0x0352, 0x00DC, 0x0FF0,
+ 0x0FDE, 0x0349, 0x00EB, 0x0FEE,
+ 0x0FD9, 0x033F, 0x00FC, 0x0FEC,
+ 0x0FD5, 0x0335, 0x010D, 0x0FE9,
+ 0x0FD2, 0x032A, 0x011D, 0x0FE7,
+ 0x0FCF, 0x031E, 0x012E, 0x0FE5,
+ 0x0FCC, 0x0312, 0x013F, 0x0FE3,
+ 0x0FCA, 0x0305, 0x0150, 0x0FE1,
+ 0x0FC8, 0x02F8, 0x0162, 0x0FDE,
+ 0x0FC6, 0x02EA, 0x0174, 0x0FDC,
+ 0x0FC5, 0x02DC, 0x0185, 0x0FDA,
+ 0x0FC4, 0x02CD, 0x0197, 0x0FD8,
+ 0x0FC3, 0x02BE, 0x01AA, 0x0FD5,
+ 0x0FC3, 0x02AF, 0x01BB, 0x0FD3,
+ 0x0FC3, 0x029F, 0x01CD, 0x0FD1,
+ 0x0FC3, 0x028E, 0x01E0, 0x0FCF,
+ 0x0FC3, 0x027E, 0x01F2, 0x0FCD,
+ 0x0FC4, 0x026D, 0x0203, 0x0FCC,
+ 0x0FC5, 0x025C, 0x0215, 0x0FCA,
+ 0x0FC6, 0x024B, 0x0227, 0x0FC8,
+ 0x0FC7, 0x0239, 0x0239, 0x0FC7,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 03-Apr-2024
+// <coeffDescrip> 4t_64p_LanczosEd_p_1_p_10qb_
+// <num_taps> 4
+// <num_phases> 64
+// <scale_ratio> input/output = 1.000000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_4tap_64p_ratio_1_00[132] = {
+ 0x0000, 0x0400, 0x0000, 0x0000,
+ 0x0FF6, 0x03FF, 0x000B, 0x0000,
+ 0x0FED, 0x03FE, 0x0015, 0x0000,
+ 0x0FE4, 0x03FB, 0x0022, 0x0FFF,
+ 0x0FDC, 0x03F7, 0x002E, 0x0FFF,
+ 0x0FD5, 0x03F2, 0x003B, 0x0FFE,
+ 0x0FCE, 0x03EC, 0x0048, 0x0FFE,
+ 0x0FC8, 0x03E5, 0x0056, 0x0FFD,
+ 0x0FC3, 0x03DC, 0x0065, 0x0FFC,
+ 0x0FBE, 0x03D3, 0x0075, 0x0FFA,
+ 0x0FB9, 0x03C9, 0x0085, 0x0FF9,
+ 0x0FB6, 0x03BE, 0x0094, 0x0FF8,
+ 0x0FB2, 0x03B2, 0x00A6, 0x0FF6,
+ 0x0FB0, 0x03A5, 0x00B7, 0x0FF4,
+ 0x0FAD, 0x0397, 0x00CA, 0x0FF2,
+ 0x0FAB, 0x0389, 0x00DC, 0x0FF0,
+ 0x0FAA, 0x0379, 0x00EF, 0x0FEE,
+ 0x0FA9, 0x0369, 0x0102, 0x0FEC,
+ 0x0FA9, 0x0359, 0x0115, 0x0FE9,
+ 0x0FA9, 0x0348, 0x0129, 0x0FE6,
+ 0x0FA9, 0x0336, 0x013D, 0x0FE4,
+ 0x0FA9, 0x0323, 0x0153, 0x0FE1,
+ 0x0FAA, 0x0310, 0x0168, 0x0FDE,
+ 0x0FAC, 0x02FD, 0x017C, 0x0FDB,
+ 0x0FAD, 0x02E9, 0x0192, 0x0FD8,
+ 0x0FAF, 0x02D5, 0x01A7, 0x0FD5,
+ 0x0FB1, 0x02C0, 0x01BD, 0x0FD2,
+ 0x0FB3, 0x02AC, 0x01D2, 0x0FCF,
+ 0x0FB5, 0x0296, 0x01E9, 0x0FCC,
+ 0x0FB8, 0x0281, 0x01FE, 0x0FC9,
+ 0x0FBA, 0x026C, 0x0214, 0x0FC6,
+ 0x0FBD, 0x0256, 0x022A, 0x0FC3,
+ 0x0FC0, 0x0240, 0x0240, 0x0FC0,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.3_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.300000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_30[198] = {
+ 0x004B, 0x0100, 0x0169, 0x0101, 0x004B, 0x0000,
+ 0x0049, 0x00FD, 0x0169, 0x0103, 0x004E, 0x0000,
+ 0x0047, 0x00FA, 0x0169, 0x0106, 0x0050, 0x0000,
+ 0x0045, 0x00F7, 0x0168, 0x0109, 0x0052, 0x0001,
+ 0x0043, 0x00F5, 0x0168, 0x010B, 0x0054, 0x0001,
+ 0x0040, 0x00F2, 0x0168, 0x010E, 0x0057, 0x0001,
+ 0x003E, 0x00EF, 0x0168, 0x0110, 0x0059, 0x0002,
+ 0x003C, 0x00EC, 0x0167, 0x0113, 0x005C, 0x0002,
+ 0x003A, 0x00E9, 0x0167, 0x0116, 0x005E, 0x0002,
+ 0x0038, 0x00E6, 0x0166, 0x0118, 0x0061, 0x0003,
+ 0x0036, 0x00E3, 0x0165, 0x011C, 0x0063, 0x0003,
+ 0x0034, 0x00E0, 0x0165, 0x011D, 0x0066, 0x0004,
+ 0x0033, 0x00DD, 0x0164, 0x0120, 0x0068, 0x0004,
+ 0x0031, 0x00DA, 0x0163, 0x0122, 0x006B, 0x0005,
+ 0x002F, 0x00D7, 0x0163, 0x0125, 0x006D, 0x0005,
+ 0x002D, 0x00D3, 0x0162, 0x0128, 0x0070, 0x0006,
+ 0x002B, 0x00D0, 0x0161, 0x012A, 0x0073, 0x0007,
+ 0x002A, 0x00CD, 0x0160, 0x012D, 0x0075, 0x0007,
+ 0x0028, 0x00CA, 0x015F, 0x012F, 0x0078, 0x0008,
+ 0x0026, 0x00C7, 0x015E, 0x0131, 0x007B, 0x0009,
+ 0x0025, 0x00C4, 0x015D, 0x0133, 0x007E, 0x0009,
+ 0x0023, 0x00C1, 0x015C, 0x0136, 0x0080, 0x000A,
+ 0x0022, 0x00BE, 0x015A, 0x0138, 0x0083, 0x000B,
+ 0x0020, 0x00BB, 0x0159, 0x013A, 0x0086, 0x000C,
+ 0x001F, 0x00B8, 0x0158, 0x013B, 0x0089, 0x000D,
+ 0x001E, 0x00B5, 0x0156, 0x013E, 0x008C, 0x000D,
+ 0x001C, 0x00B2, 0x0155, 0x0140, 0x008F, 0x000E,
+ 0x001B, 0x00AF, 0x0153, 0x0143, 0x0091, 0x000F,
+ 0x0019, 0x00AC, 0x0152, 0x0145, 0x0094, 0x0010,
+ 0x0018, 0x00A9, 0x0150, 0x0147, 0x0097, 0x0011,
+ 0x0017, 0x00A6, 0x014F, 0x0148, 0x009A, 0x0012,
+ 0x0016, 0x00A3, 0x014D, 0x0149, 0x009D, 0x0014,
+ 0x0015, 0x00A0, 0x014B, 0x014B, 0x00A0, 0x0015,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.4_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.400000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_40[198] = {
+ 0x0028, 0x0106, 0x01A3, 0x0107, 0x0028, 0x0000,
+ 0x0026, 0x0102, 0x01A3, 0x010A, 0x002B, 0x0000,
+ 0x0024, 0x00FE, 0x01A3, 0x010F, 0x002D, 0x0FFF,
+ 0x0022, 0x00FA, 0x01A3, 0x0113, 0x002F, 0x0FFF,
+ 0x0021, 0x00F6, 0x01A3, 0x0116, 0x0031, 0x0FFF,
+ 0x001F, 0x00F2, 0x01A2, 0x011B, 0x0034, 0x0FFE,
+ 0x001D, 0x00EE, 0x01A2, 0x011F, 0x0036, 0x0FFE,
+ 0x001B, 0x00EA, 0x01A1, 0x0123, 0x0039, 0x0FFE,
+ 0x0019, 0x00E6, 0x01A1, 0x0127, 0x003B, 0x0FFE,
+ 0x0018, 0x00E2, 0x01A0, 0x012A, 0x003E, 0x0FFE,
+ 0x0016, 0x00DE, 0x01A0, 0x012E, 0x0041, 0x0FFD,
+ 0x0015, 0x00DA, 0x019F, 0x0132, 0x0043, 0x0FFD,
+ 0x0013, 0x00D6, 0x019E, 0x0136, 0x0046, 0x0FFD,
+ 0x0012, 0x00D2, 0x019D, 0x0139, 0x0049, 0x0FFD,
+ 0x0010, 0x00CE, 0x019C, 0x013D, 0x004C, 0x0FFD,
+ 0x000F, 0x00CA, 0x019A, 0x0141, 0x004F, 0x0FFD,
+ 0x000E, 0x00C6, 0x0199, 0x0144, 0x0052, 0x0FFD,
+ 0x000D, 0x00C2, 0x0197, 0x0148, 0x0055, 0x0FFD,
+ 0x000B, 0x00BE, 0x0196, 0x014C, 0x0058, 0x0FFD,
+ 0x000A, 0x00BA, 0x0195, 0x014F, 0x005B, 0x0FFD,
+ 0x0009, 0x00B6, 0x0193, 0x0153, 0x005E, 0x0FFD,
+ 0x0008, 0x00B2, 0x0191, 0x0157, 0x0061, 0x0FFD,
+ 0x0007, 0x00AE, 0x0190, 0x015A, 0x0064, 0x0FFD,
+ 0x0006, 0x00AA, 0x018E, 0x015D, 0x0068, 0x0FFD,
+ 0x0005, 0x00A6, 0x018C, 0x0161, 0x006B, 0x0FFD,
+ 0x0005, 0x00A2, 0x0189, 0x0164, 0x006F, 0x0FFD,
+ 0x0004, 0x009E, 0x0187, 0x0167, 0x0072, 0x0FFE,
+ 0x0003, 0x009A, 0x0185, 0x016B, 0x0075, 0x0FFE,
+ 0x0002, 0x0096, 0x0183, 0x016E, 0x0079, 0x0FFE,
+ 0x0002, 0x0093, 0x0180, 0x016F, 0x007D, 0x0FFF,
+ 0x0001, 0x008F, 0x017E, 0x0173, 0x0080, 0x0FFF,
+ 0x0001, 0x008B, 0x017B, 0x0175, 0x0084, 0x0000,
+ 0x0000, 0x0087, 0x0179, 0x0179, 0x0087, 0x0000,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.5_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.500000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_50[198] = {
+ 0x0000, 0x0107, 0x01F3, 0x0106, 0x0000, 0x0000,
+ 0x0FFE, 0x0101, 0x01F3, 0x010D, 0x0002, 0x0FFF,
+ 0x0FFD, 0x00FB, 0x01F3, 0x0113, 0x0003, 0x0FFF,
+ 0x0FFC, 0x00F6, 0x01F3, 0x0118, 0x0005, 0x0FFE,
+ 0x0FFA, 0x00F0, 0x01F3, 0x011E, 0x0007, 0x0FFE,
+ 0x0FF9, 0x00EB, 0x01F2, 0x0124, 0x0009, 0x0FFD,
+ 0x0FF8, 0x00E5, 0x01F2, 0x0129, 0x000B, 0x0FFD,
+ 0x0FF7, 0x00E0, 0x01F1, 0x012F, 0x000D, 0x0FFC,
+ 0x0FF6, 0x00DA, 0x01F0, 0x0135, 0x0010, 0x0FFB,
+ 0x0FF5, 0x00D4, 0x01EF, 0x013B, 0x0012, 0x0FFB,
+ 0x0FF4, 0x00CF, 0x01EE, 0x0141, 0x0014, 0x0FFA,
+ 0x0FF3, 0x00C9, 0x01ED, 0x0147, 0x0017, 0x0FF9,
+ 0x0FF2, 0x00C4, 0x01EB, 0x014C, 0x001A, 0x0FF9,
+ 0x0FF1, 0x00BF, 0x01EA, 0x0152, 0x001C, 0x0FF8,
+ 0x0FF1, 0x00B9, 0x01E8, 0x0157, 0x001F, 0x0FF8,
+ 0x0FF0, 0x00B4, 0x01E6, 0x015D, 0x0022, 0x0FF7,
+ 0x0FF0, 0x00AE, 0x01E4, 0x0163, 0x0025, 0x0FF6,
+ 0x0FEF, 0x00A9, 0x01E2, 0x0168, 0x0028, 0x0FF6,
+ 0x0FEF, 0x00A4, 0x01DF, 0x016E, 0x002B, 0x0FF5,
+ 0x0FEF, 0x009F, 0x01DD, 0x0172, 0x002E, 0x0FF5,
+ 0x0FEE, 0x009A, 0x01DA, 0x0178, 0x0032, 0x0FF4,
+ 0x0FEE, 0x0094, 0x01D8, 0x017E, 0x0035, 0x0FF3,
+ 0x0FEE, 0x008F, 0x01D5, 0x0182, 0x0039, 0x0FF3,
+ 0x0FEE, 0x008A, 0x01D2, 0x0188, 0x003C, 0x0FF2,
+ 0x0FEE, 0x0085, 0x01CF, 0x018C, 0x0040, 0x0FF2,
+ 0x0FEE, 0x0081, 0x01CB, 0x0191, 0x0044, 0x0FF1,
+ 0x0FEE, 0x007C, 0x01C8, 0x0196, 0x0047, 0x0FF1,
+ 0x0FEE, 0x0077, 0x01C4, 0x019C, 0x004B, 0x0FF0,
+ 0x0FEE, 0x0072, 0x01C1, 0x01A0, 0x004F, 0x0FF0,
+ 0x0FEE, 0x006E, 0x01BD, 0x01A4, 0x0053, 0x0FF0,
+ 0x0FEE, 0x0069, 0x01B9, 0x01A9, 0x0058, 0x0FEF,
+ 0x0FEE, 0x0065, 0x01B5, 0x01AD, 0x005C, 0x0FEF,
+ 0x0FEF, 0x0060, 0x01B1, 0x01B1, 0x0060, 0x0FEF,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.6_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.600000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_60[198] = {
+ 0x0FD9, 0x00FB, 0x0258, 0x00FB, 0x0FD9, 0x0000,
+ 0x0FD9, 0x00F3, 0x0258, 0x0102, 0x0FDA, 0x0000,
+ 0x0FD8, 0x00EB, 0x0258, 0x010B, 0x0FDB, 0x0FFF,
+ 0x0FD8, 0x00E3, 0x0258, 0x0112, 0x0FDC, 0x0FFF,
+ 0x0FD8, 0x00DC, 0x0257, 0x011B, 0x0FDC, 0x0FFE,
+ 0x0FD7, 0x00D4, 0x0256, 0x0123, 0x0FDE, 0x0FFE,
+ 0x0FD7, 0x00CD, 0x0255, 0x012B, 0x0FDF, 0x0FFD,
+ 0x0FD7, 0x00C5, 0x0254, 0x0133, 0x0FE0, 0x0FFD,
+ 0x0FD7, 0x00BE, 0x0252, 0x013C, 0x0FE1, 0x0FFC,
+ 0x0FD7, 0x00B6, 0x0251, 0x0143, 0x0FE3, 0x0FFC,
+ 0x0FD8, 0x00AF, 0x024F, 0x014B, 0x0FE4, 0x0FFB,
+ 0x0FD8, 0x00A8, 0x024C, 0x0154, 0x0FE6, 0x0FFA,
+ 0x0FD8, 0x00A1, 0x024A, 0x015B, 0x0FE8, 0x0FFA,
+ 0x0FD9, 0x009A, 0x0247, 0x0163, 0x0FEA, 0x0FF9,
+ 0x0FD9, 0x0093, 0x0244, 0x016C, 0x0FEC, 0x0FF8,
+ 0x0FD9, 0x008C, 0x0241, 0x0174, 0x0FEF, 0x0FF7,
+ 0x0FDA, 0x0085, 0x023E, 0x017B, 0x0FF1, 0x0FF7,
+ 0x0FDB, 0x007F, 0x023A, 0x0183, 0x0FF3, 0x0FF6,
+ 0x0FDB, 0x0078, 0x0237, 0x018B, 0x0FF6, 0x0FF5,
+ 0x0FDC, 0x0072, 0x0233, 0x0192, 0x0FF9, 0x0FF4,
+ 0x0FDD, 0x006C, 0x022F, 0x0199, 0x0FFC, 0x0FF3,
+ 0x0FDD, 0x0065, 0x022A, 0x01A3, 0x0FFF, 0x0FF2,
+ 0x0FDE, 0x005F, 0x0226, 0x01AA, 0x0002, 0x0FF1,
+ 0x0FDF, 0x005A, 0x0221, 0x01B0, 0x0006, 0x0FF0,
+ 0x0FE0, 0x0054, 0x021C, 0x01B7, 0x0009, 0x0FF0,
+ 0x0FE1, 0x004E, 0x0217, 0x01BE, 0x000D, 0x0FEF,
+ 0x0FE2, 0x0048, 0x0212, 0x01C6, 0x0010, 0x0FEE,
+ 0x0FE3, 0x0043, 0x020C, 0x01CD, 0x0014, 0x0FED,
+ 0x0FE4, 0x003E, 0x0207, 0x01D3, 0x0018, 0x0FEC,
+ 0x0FE5, 0x0039, 0x0200, 0x01DA, 0x001D, 0x0FEB,
+ 0x0FE6, 0x0034, 0x01FA, 0x01E1, 0x0021, 0x0FEA,
+ 0x0FE7, 0x002F, 0x01F5, 0x01E7, 0x0025, 0x0FE9,
+ 0x0FE8, 0x002A, 0x01EE, 0x01EE, 0x002A, 0x0FE8,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.7_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.700000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_70[198] = {
+ 0x0FC0, 0x00DA, 0x02CC, 0x00DA, 0x0FC0, 0x0000,
+ 0x0FC1, 0x00D0, 0x02CC, 0x00E4, 0x0FBF, 0x0000,
+ 0x0FC2, 0x00C6, 0x02CB, 0x00EF, 0x0FBE, 0x0000,
+ 0x0FC3, 0x00BC, 0x02CA, 0x00F9, 0x0FBE, 0x0000,
+ 0x0FC4, 0x00B2, 0x02C9, 0x0104, 0x0FBD, 0x0000,
+ 0x0FC5, 0x00A8, 0x02C7, 0x010F, 0x0FBD, 0x0000,
+ 0x0FC7, 0x009F, 0x02C5, 0x0119, 0x0FBC, 0x0000,
+ 0x0FC8, 0x0095, 0x02C3, 0x0124, 0x0FBC, 0x0000,
+ 0x0FC9, 0x008C, 0x02C0, 0x012F, 0x0FBC, 0x0000,
+ 0x0FCB, 0x0083, 0x02BD, 0x0139, 0x0FBC, 0x0000,
+ 0x0FCC, 0x007A, 0x02BA, 0x0144, 0x0FBC, 0x0000,
+ 0x0FCE, 0x0072, 0x02B6, 0x014D, 0x0FBD, 0x0000,
+ 0x0FD0, 0x0069, 0x02B2, 0x0159, 0x0FBD, 0x0FFF,
+ 0x0FD1, 0x0061, 0x02AD, 0x0164, 0x0FBE, 0x0FFF,
+ 0x0FD3, 0x0059, 0x02A9, 0x016E, 0x0FBF, 0x0FFE,
+ 0x0FD4, 0x0051, 0x02A4, 0x017A, 0x0FBF, 0x0FFE,
+ 0x0FD6, 0x0049, 0x029E, 0x0184, 0x0FC1, 0x0FFE,
+ 0x0FD8, 0x0042, 0x0299, 0x018E, 0x0FC2, 0x0FFD,
+ 0x0FD9, 0x003A, 0x0293, 0x019B, 0x0FC3, 0x0FFC,
+ 0x0FDB, 0x0033, 0x028D, 0x01A4, 0x0FC5, 0x0FFC,
+ 0x0FDC, 0x002D, 0x0286, 0x01AF, 0x0FC7, 0x0FFB,
+ 0x0FDE, 0x0026, 0x0280, 0x01BA, 0x0FC8, 0x0FFA,
+ 0x0FE0, 0x001F, 0x0279, 0x01C4, 0x0FCB, 0x0FF9,
+ 0x0FE1, 0x0019, 0x0272, 0x01CE, 0x0FCD, 0x0FF9,
+ 0x0FE3, 0x0013, 0x026A, 0x01D9, 0x0FCF, 0x0FF8,
+ 0x0FE4, 0x000D, 0x0263, 0x01E3, 0x0FD2, 0x0FF7,
+ 0x0FE6, 0x0008, 0x025B, 0x01EC, 0x0FD5, 0x0FF6,
+ 0x0FE7, 0x0002, 0x0253, 0x01F7, 0x0FD8, 0x0FF5,
+ 0x0FE9, 0x0FFD, 0x024A, 0x0202, 0x0FDB, 0x0FF3,
+ 0x0FEA, 0x0FF8, 0x0242, 0x020B, 0x0FDF, 0x0FF2,
+ 0x0FEC, 0x0FF3, 0x0239, 0x0215, 0x0FE2, 0x0FF1,
+ 0x0FED, 0x0FEF, 0x0230, 0x021E, 0x0FE6, 0x0FF0,
+ 0x0FEF, 0x0FEB, 0x0226, 0x0226, 0x0FEB, 0x0FEF,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.8_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.800000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_80[198] = {
+ 0x0FBF, 0x00A1, 0x0340, 0x00A1, 0x0FBF, 0x0000,
+ 0x0FC1, 0x0095, 0x0340, 0x00AD, 0x0FBC, 0x0001,
+ 0x0FC4, 0x0089, 0x033E, 0x00BA, 0x0FBA, 0x0001,
+ 0x0FC6, 0x007D, 0x033D, 0x00C6, 0x0FB8, 0x0002,
+ 0x0FC9, 0x0072, 0x033A, 0x00D3, 0x0FB6, 0x0002,
+ 0x0FCC, 0x0067, 0x0338, 0x00DF, 0x0FB3, 0x0003,
+ 0x0FCE, 0x005C, 0x0334, 0x00EE, 0x0FB1, 0x0003,
+ 0x0FD1, 0x0051, 0x0331, 0x00FA, 0x0FAF, 0x0004,
+ 0x0FD3, 0x0047, 0x032D, 0x0108, 0x0FAD, 0x0004,
+ 0x0FD6, 0x003D, 0x0328, 0x0116, 0x0FAB, 0x0004,
+ 0x0FD8, 0x0033, 0x0323, 0x0123, 0x0FAA, 0x0005,
+ 0x0FDB, 0x002A, 0x031D, 0x0131, 0x0FA8, 0x0005,
+ 0x0FDD, 0x0021, 0x0317, 0x013F, 0x0FA7, 0x0005,
+ 0x0FDF, 0x0018, 0x0311, 0x014D, 0x0FA5, 0x0006,
+ 0x0FE2, 0x0010, 0x030A, 0x015A, 0x0FA4, 0x0006,
+ 0x0FE4, 0x0008, 0x0302, 0x0169, 0x0FA3, 0x0006,
+ 0x0FE6, 0x0000, 0x02FB, 0x0177, 0x0FA2, 0x0006,
+ 0x0FE8, 0x0FF9, 0x02F3, 0x0185, 0x0FA1, 0x0006,
+ 0x0FEB, 0x0FF1, 0x02EA, 0x0193, 0x0FA1, 0x0006,
+ 0x0FED, 0x0FEB, 0x02E1, 0x01A1, 0x0FA0, 0x0006,
+ 0x0FEE, 0x0FE4, 0x02D8, 0x01B0, 0x0FA0, 0x0006,
+ 0x0FF0, 0x0FDE, 0x02CE, 0x01BE, 0x0FA0, 0x0006,
+ 0x0FF2, 0x0FD8, 0x02C5, 0x01CB, 0x0FA0, 0x0006,
+ 0x0FF4, 0x0FD3, 0x02BA, 0x01D8, 0x0FA1, 0x0006,
+ 0x0FF6, 0x0FCD, 0x02B0, 0x01E7, 0x0FA1, 0x0005,
+ 0x0FF7, 0x0FC8, 0x02A5, 0x01F5, 0x0FA2, 0x0005,
+ 0x0FF9, 0x0FC4, 0x029A, 0x0202, 0x0FA3, 0x0004,
+ 0x0FFA, 0x0FC0, 0x028E, 0x0210, 0x0FA4, 0x0004,
+ 0x0FFB, 0x0FBC, 0x0283, 0x021D, 0x0FA6, 0x0003,
+ 0x0FFD, 0x0FB8, 0x0276, 0x022A, 0x0FA8, 0x0003,
+ 0x0FFE, 0x0FB4, 0x026B, 0x0237, 0x0FAA, 0x0002,
+ 0x0FFF, 0x0FB1, 0x025E, 0x0245, 0x0FAC, 0x0001,
+ 0x0000, 0x0FAE, 0x0252, 0x0252, 0x0FAE, 0x0000,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_0.9_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 0.900000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_0_90[198] = {
+ 0x0FD8, 0x0055, 0x03A7, 0x0054, 0x0FD8, 0x0000,
+ 0x0FDB, 0x0047, 0x03A7, 0x0063, 0x0FD4, 0x0000,
+ 0x0FDF, 0x003B, 0x03A5, 0x006F, 0x0FD1, 0x0001,
+ 0x0FE2, 0x002E, 0x03A3, 0x007E, 0x0FCD, 0x0002,
+ 0x0FE5, 0x0022, 0x03A0, 0x008D, 0x0FCA, 0x0002,
+ 0x0FE8, 0x0017, 0x039D, 0x009B, 0x0FC6, 0x0003,
+ 0x0FEB, 0x000C, 0x0398, 0x00AC, 0x0FC2, 0x0003,
+ 0x0FEE, 0x0001, 0x0394, 0x00BA, 0x0FBF, 0x0004,
+ 0x0FF1, 0x0FF7, 0x038E, 0x00CA, 0x0FBB, 0x0005,
+ 0x0FF4, 0x0FED, 0x0388, 0x00DA, 0x0FB8, 0x0005,
+ 0x0FF6, 0x0FE4, 0x0381, 0x00EB, 0x0FB4, 0x0006,
+ 0x0FF9, 0x0FDB, 0x037A, 0x00FA, 0x0FB1, 0x0007,
+ 0x0FFB, 0x0FD3, 0x0372, 0x010B, 0x0FAD, 0x0008,
+ 0x0FFD, 0x0FCB, 0x0369, 0x011D, 0x0FAA, 0x0008,
+ 0x0000, 0x0FC3, 0x0360, 0x012E, 0x0FA6, 0x0009,
+ 0x0002, 0x0FBC, 0x0356, 0x013F, 0x0FA3, 0x000A,
+ 0x0003, 0x0FB6, 0x034C, 0x0150, 0x0FA0, 0x000B,
+ 0x0005, 0x0FB0, 0x0341, 0x0162, 0x0F9D, 0x000B,
+ 0x0007, 0x0FAA, 0x0336, 0x0173, 0x0F9A, 0x000C,
+ 0x0008, 0x0FA5, 0x032A, 0x0185, 0x0F97, 0x000D,
+ 0x000A, 0x0FA0, 0x031E, 0x0197, 0x0F94, 0x000D,
+ 0x000B, 0x0F9B, 0x0311, 0x01A9, 0x0F92, 0x000E,
+ 0x000C, 0x0F97, 0x0303, 0x01BC, 0x0F8F, 0x000F,
+ 0x000D, 0x0F94, 0x02F6, 0x01CD, 0x0F8D, 0x000F,
+ 0x000E, 0x0F91, 0x02E8, 0x01DE, 0x0F8B, 0x0010,
+ 0x000F, 0x0F8E, 0x02D9, 0x01F1, 0x0F89, 0x0010,
+ 0x0010, 0x0F8B, 0x02CA, 0x0202, 0x0F88, 0x0011,
+ 0x0010, 0x0F89, 0x02BB, 0x0214, 0x0F87, 0x0011,
+ 0x0011, 0x0F87, 0x02AB, 0x0226, 0x0F86, 0x0011,
+ 0x0011, 0x0F86, 0x029C, 0x0236, 0x0F85, 0x0012,
+ 0x0011, 0x0F85, 0x028B, 0x0249, 0x0F84, 0x0012,
+ 0x0012, 0x0F84, 0x027B, 0x0259, 0x0F84, 0x0012,
+ 0x0012, 0x0F84, 0x026A, 0x026A, 0x0F84, 0x0012,
+};
+
+//========================================================
+// <using> gen_scaler_coeffs_cnf_file.m
+// <using> make_test_script.m
+// <date> 02-Apr-2024
+// <coeffDescrip> 6t_64p_LanczosEd_p_1_p_10qb_
+// <num_taps> 6
+// <num_phases> 64
+// <scale_ratio> input/output = 1.000000000000
+// <CoefType> LanczosEd
+// <CoefQuant> S1.10
+//========================================================
+static const uint16_t easf_filter_6tap_64p_ratio_1_00[198] = {
+ 0x0000, 0x0000, 0x0400, 0x0000, 0x0000, 0x0000,
+ 0x0003, 0x0FF3, 0x0400, 0x000D, 0x0FFD, 0x0000,
+ 0x0006, 0x0FE7, 0x03FE, 0x001C, 0x0FF9, 0x0000,
+ 0x0009, 0x0FDB, 0x03FC, 0x002B, 0x0FF5, 0x0000,
+ 0x000C, 0x0FD0, 0x03F9, 0x003A, 0x0FF1, 0x0000,
+ 0x000E, 0x0FC5, 0x03F5, 0x004A, 0x0FED, 0x0001,
+ 0x0011, 0x0FBB, 0x03F0, 0x005A, 0x0FE9, 0x0001,
+ 0x0013, 0x0FB2, 0x03EB, 0x006A, 0x0FE5, 0x0001,
+ 0x0015, 0x0FA9, 0x03E4, 0x007B, 0x0FE1, 0x0002,
+ 0x0017, 0x0FA1, 0x03DD, 0x008D, 0x0FDC, 0x0002,
+ 0x0018, 0x0F99, 0x03D4, 0x00A0, 0x0FD8, 0x0003,
+ 0x001A, 0x0F92, 0x03CB, 0x00B2, 0x0FD3, 0x0004,
+ 0x001B, 0x0F8C, 0x03C1, 0x00C6, 0x0FCE, 0x0004,
+ 0x001C, 0x0F86, 0x03B7, 0x00D9, 0x0FC9, 0x0005,
+ 0x001D, 0x0F80, 0x03AB, 0x00EE, 0x0FC4, 0x0006,
+ 0x001E, 0x0F7C, 0x039F, 0x0101, 0x0FBF, 0x0007,
+ 0x001F, 0x0F78, 0x0392, 0x0115, 0x0FBA, 0x0008,
+ 0x001F, 0x0F74, 0x0385, 0x012B, 0x0FB5, 0x0008,
+ 0x0020, 0x0F71, 0x0376, 0x0140, 0x0FB0, 0x0009,
+ 0x0020, 0x0F6E, 0x0367, 0x0155, 0x0FAB, 0x000B,
+ 0x0020, 0x0F6C, 0x0357, 0x016B, 0x0FA6, 0x000C,
+ 0x0020, 0x0F6A, 0x0347, 0x0180, 0x0FA2, 0x000D,
+ 0x0020, 0x0F69, 0x0336, 0x0196, 0x0F9D, 0x000E,
+ 0x0020, 0x0F69, 0x0325, 0x01AB, 0x0F98, 0x000F,
+ 0x001F, 0x0F68, 0x0313, 0x01C3, 0x0F93, 0x0010,
+ 0x001F, 0x0F69, 0x0300, 0x01D8, 0x0F8F, 0x0011,
+ 0x001E, 0x0F69, 0x02ED, 0x01EF, 0x0F8B, 0x0012,
+ 0x001D, 0x0F6A, 0x02D9, 0x0205, 0x0F87, 0x0014,
+ 0x001D, 0x0F6C, 0x02C5, 0x021A, 0x0F83, 0x0015,
+ 0x001C, 0x0F6E, 0x02B1, 0x0230, 0x0F7F, 0x0016,
+ 0x001B, 0x0F70, 0x029C, 0x0247, 0x0F7B, 0x0017,
+ 0x001A, 0x0F72, 0x0287, 0x025D, 0x0F78, 0x0018,
+ 0x0019, 0x0F75, 0x0272, 0x0272, 0x0F75, 0x0019,
+};
+
+/* Converted scaler coeff tables from S1.10 to S1.12 */
+static uint16_t easf_filter_3tap_64p_ratio_0_30_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_40_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_50_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_60_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_70_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_80_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_0_90_s1_12[99];
+static uint16_t easf_filter_3tap_64p_ratio_1_00_s1_12[99];
+static uint16_t easf_filter_4tap_64p_ratio_0_30_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_40_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_50_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_60_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_70_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_80_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_0_90_s1_12[132];
+static uint16_t easf_filter_4tap_64p_ratio_1_00_s1_12[132];
+static uint16_t easf_filter_6tap_64p_ratio_0_30_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_40_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_50_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_60_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_70_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_80_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_0_90_s1_12[198];
+static uint16_t easf_filter_6tap_64p_ratio_1_00_s1_12[198];
+
+struct scale_ratio_to_reg_value_lookup easf_v_bf3_mode_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x0000},
+ {9, 10, 0x0000},
+ {1, 1, 0x0000},
+ {-1, -1, 0x0002},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_h_bf3_mode_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x0000},
+ {9, 10, 0x0000},
+ {1, 1, 0x0000},
+ {-1, -1, 0x0002},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_6tap_lookup[] = {
+ {3, 10, 0x4100},
+ {4, 10, 0x4100},
+ {5, 10, 0x4100},
+ {6, 10, 0x4100},
+ {7, 10, 0x4100},
+ {8, 10, 0x4100},
+ {9, 10, 0x4100},
+ {1, 1, 0x4100},
+ {-1, -1, 0x4100},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_6tap_lookup[] = {
+ {3, 10, 0x4000},
+ {4, 10, 0x4000},
+ {5, 10, 0x4000},
+ {6, 10, 0x4000},
+ {7, 10, 0x4000},
+ {8, 10, 0x4000},
+ {9, 10, 0x4000},
+ {1, 1, 0x4000},
+ {-1, -1, 0x4000},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_gain_ring6_6tap_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x251F},
+ {5, 10, 0x291F},
+ {6, 10, 0xA51F},
+ {7, 10, 0xA51F},
+ {8, 10, 0xAA66},
+ {9, 10, 0xA51F},
+ {1, 1, 0xA640},
+ {-1, -1, 0xA640},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_gain_ring4_6tap_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x9600},
+ {5, 10, 0xA460},
+ {6, 10, 0xA8E0},
+ {7, 10, 0xAC00},
+ {8, 10, 0xAD20},
+ {9, 10, 0xAFC0},
+ {1, 1, 0xB058},
+ {-1, -1, 0xB058},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_reducer_gain6_4tap_lookup[] = {
+ {3, 10, 0x4100},
+ {4, 10, 0x4100},
+ {5, 10, 0x4100},
+ {6, 10, 0x4100},
+ {7, 10, 0x4100},
+ {8, 10, 0x4100},
+ {9, 10, 0x4100},
+ {1, 1, 0x4100},
+ {-1, -1, 0x4100},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_reducer_gain4_4tap_lookup[] = {
+ {3, 10, 0x4000},
+ {4, 10, 0x4000},
+ {5, 10, 0x4000},
+ {6, 10, 0x4000},
+ {7, 10, 0x4000},
+ {8, 10, 0x4000},
+ {9, 10, 0x4000},
+ {1, 1, 0x4000},
+ {-1, -1, 0x4000},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_gain_ring6_4tap_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x0000},
+ {9, 10, 0x0000},
+ {1, 1, 0x0000},
+ {-1, -1, 0x0000},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_gain_ring4_4tap_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x9900},
+ {7, 10, 0xA100},
+ {8, 10, 0xA8C0},
+ {9, 10, 0xAB20},
+ {1, 1, 0xAC00},
+ {-1, -1, 0xAC00},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_uptilt_offset_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x4100},
+ {9, 10, 0x9F00},
+ {1, 1, 0xA4C0},
+ {-1, -1, 0xA8D8},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt_maxval_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x4000},
+ {9, 10, 0x24FE},
+ {1, 1, 0x2D64},
+ {-1, -1, 0x3ADB},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_dntilt_slope_lookup[] = {
+ {3, 10, 0x3800},
+ {4, 10, 0x3800},
+ {5, 10, 0x3800},
+ {6, 10, 0x3800},
+ {7, 10, 0x3800},
+ {8, 10, 0x3886},
+ {9, 10, 0x3940},
+ {1, 1, 0x3A4E},
+ {-1, -1, 0x3B66},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt1_slope_lookup[] = {
+ {3, 10, 0x3800},
+ {4, 10, 0x3800},
+ {5, 10, 0x3800},
+ {6, 10, 0x3800},
+ {7, 10, 0x3800},
+ {8, 10, 0x36F4},
+ {9, 10, 0x359C},
+ {1, 1, 0x3360},
+ {-1, -1, 0x2F20},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_slope_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x0000},
+ {9, 10, 0x359C},
+ {1, 1, 0x31F0},
+ {-1, -1, 0x1F00},
+};
+
+struct scale_ratio_to_reg_value_lookup easf_3tap_uptilt2_offset_lookup[] = {
+ {3, 10, 0x0000},
+ {4, 10, 0x0000},
+ {5, 10, 0x0000},
+ {6, 10, 0x0000},
+ {7, 10, 0x0000},
+ {8, 10, 0x0000},
+ {9, 10, 0x9F00},
+ {1, 1, 0xA400},
+ {-1, -1, 0x9E00},
+};
+
+void spl_init_easf_filter_coeffs(void)
+{
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_30,
+ easf_filter_3tap_64p_ratio_0_30_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_40,
+ easf_filter_3tap_64p_ratio_0_40_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_50,
+ easf_filter_3tap_64p_ratio_0_50_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_60,
+ easf_filter_3tap_64p_ratio_0_60_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_70,
+ easf_filter_3tap_64p_ratio_0_70_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_80,
+ easf_filter_3tap_64p_ratio_0_80_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_0_90,
+ easf_filter_3tap_64p_ratio_0_90_s1_12, 3);
+ convert_filter_s1_10_to_s1_12(easf_filter_3tap_64p_ratio_1_00,
+ easf_filter_3tap_64p_ratio_1_00_s1_12, 3);
+
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_30,
+ easf_filter_4tap_64p_ratio_0_30_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_40,
+ easf_filter_4tap_64p_ratio_0_40_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_50,
+ easf_filter_4tap_64p_ratio_0_50_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_60,
+ easf_filter_4tap_64p_ratio_0_60_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_70,
+ easf_filter_4tap_64p_ratio_0_70_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_80,
+ easf_filter_4tap_64p_ratio_0_80_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_0_90,
+ easf_filter_4tap_64p_ratio_0_90_s1_12, 4);
+ convert_filter_s1_10_to_s1_12(easf_filter_4tap_64p_ratio_1_00,
+ easf_filter_4tap_64p_ratio_1_00_s1_12, 4);
+
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_30,
+ easf_filter_6tap_64p_ratio_0_30_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_40,
+ easf_filter_6tap_64p_ratio_0_40_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_50,
+ easf_filter_6tap_64p_ratio_0_50_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_60,
+ easf_filter_6tap_64p_ratio_0_60_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_70,
+ easf_filter_6tap_64p_ratio_0_70_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_80,
+ easf_filter_6tap_64p_ratio_0_80_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_0_90,
+ easf_filter_6tap_64p_ratio_0_90_s1_12, 6);
+ convert_filter_s1_10_to_s1_12(easf_filter_6tap_64p_ratio_1_00,
+ easf_filter_6tap_64p_ratio_1_00_s1_12, 6);
+}
+
+uint16_t *spl_get_easf_filter_3tap_64p(struct spl_fixed31_32 ratio)
+{
+ if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
+ return easf_filter_3tap_64p_ratio_0_30_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(4, 10).value)
+ return easf_filter_3tap_64p_ratio_0_40_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(5, 10).value)
+ return easf_filter_3tap_64p_ratio_0_50_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(6, 10).value)
+ return easf_filter_3tap_64p_ratio_0_60_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(7, 10).value)
+ return easf_filter_3tap_64p_ratio_0_70_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(8, 10).value)
+ return easf_filter_3tap_64p_ratio_0_80_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(9, 10).value)
+ return easf_filter_3tap_64p_ratio_0_90_s1_12;
+ else
+ return easf_filter_3tap_64p_ratio_1_00_s1_12;
+}
+
+uint16_t *spl_get_easf_filter_4tap_64p(struct spl_fixed31_32 ratio)
+{
+ if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
+ return easf_filter_4tap_64p_ratio_0_30_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(4, 10).value)
+ return easf_filter_4tap_64p_ratio_0_40_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(5, 10).value)
+ return easf_filter_4tap_64p_ratio_0_50_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(6, 10).value)
+ return easf_filter_4tap_64p_ratio_0_60_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(7, 10).value)
+ return easf_filter_4tap_64p_ratio_0_70_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(8, 10).value)
+ return easf_filter_4tap_64p_ratio_0_80_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(9, 10).value)
+ return easf_filter_4tap_64p_ratio_0_90_s1_12;
+ else
+ return easf_filter_4tap_64p_ratio_1_00_s1_12;
+}
+
+uint16_t *spl_get_easf_filter_6tap_64p(struct spl_fixed31_32 ratio)
+{
+ if (ratio.value < spl_fixpt_from_fraction(3, 10).value)
+ return easf_filter_6tap_64p_ratio_0_30_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(4, 10).value)
+ return easf_filter_6tap_64p_ratio_0_40_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(5, 10).value)
+ return easf_filter_6tap_64p_ratio_0_50_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(6, 10).value)
+ return easf_filter_6tap_64p_ratio_0_60_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(7, 10).value)
+ return easf_filter_6tap_64p_ratio_0_70_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(8, 10).value)
+ return easf_filter_6tap_64p_ratio_0_80_s1_12;
+ else if (ratio.value < spl_fixpt_from_fraction(9, 10).value)
+ return easf_filter_6tap_64p_ratio_0_90_s1_12;
+ else
+ return easf_filter_6tap_64p_ratio_1_00_s1_12;
+}
+
+uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio)
+{
+ if (taps == 6)
+ return spl_get_easf_filter_6tap_64p(ratio);
+ else if (taps == 4)
+ return spl_get_easf_filter_4tap_64p(ratio);
+ else if (taps == 3)
+ return spl_get_easf_filter_3tap_64p(ratio);
+ else {
+ /* should never happen, bug */
+ SPL_BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
+void spl_set_filters_data(struct dscl_prog_data *dscl_prog_data,
+ const struct spl_scaler_data *data, bool enable_easf_v,
+ bool enable_easf_h)
+{
+ /*
+ * Old coefficients calculated scaling ratio = input / output
+ * New coefficients are calculated based on = output / input
+ */
+ if (enable_easf_h) {
+ dscl_prog_data->filter_h = spl_dscl_get_easf_filter_coeffs_64p(
+ data->taps.h_taps, data->recip_ratios.horz);
+
+ dscl_prog_data->filter_h_c = spl_dscl_get_easf_filter_coeffs_64p(
+ data->taps.h_taps_c, data->recip_ratios.horz_c);
+ } else {
+ dscl_prog_data->filter_h = spl_dscl_get_filter_coeffs_64p(
+ data->taps.h_taps, data->ratios.horz);
+
+ dscl_prog_data->filter_h_c = spl_dscl_get_filter_coeffs_64p(
+ data->taps.h_taps_c, data->ratios.horz_c);
+ }
+ if (enable_easf_v) {
+ dscl_prog_data->filter_v = spl_dscl_get_easf_filter_coeffs_64p(
+ data->taps.v_taps, data->recip_ratios.vert);
+
+ dscl_prog_data->filter_v_c = spl_dscl_get_easf_filter_coeffs_64p(
+ data->taps.v_taps_c, data->recip_ratios.vert_c);
+ } else {
+ dscl_prog_data->filter_v = spl_dscl_get_filter_coeffs_64p(
+ data->taps.v_taps, data->ratios.vert);
+
+ dscl_prog_data->filter_v_c = spl_dscl_get_filter_coeffs_64p(
+ data->taps.v_taps_c, data->ratios.vert_c);
+ }
+}
+
+static uint32_t spl_easf_get_scale_ratio_to_reg_value(struct spl_fixed31_32 ratio,
+ struct scale_ratio_to_reg_value_lookup *lookup_table_base_ptr,
+ unsigned int num_entries)
+{
+ unsigned int count = 0;
+ uint32_t value = 0;
+ struct scale_ratio_to_reg_value_lookup *lookup_table_index_ptr;
+
+ lookup_table_index_ptr = (lookup_table_base_ptr + num_entries - 1);
+ value = lookup_table_index_ptr->reg_value;
+
+ while (count < num_entries) {
+
+ lookup_table_index_ptr = (lookup_table_base_ptr + count);
+ if (lookup_table_index_ptr->numer < 0)
+ break;
+
+ if (ratio.value < spl_fixpt_from_fraction(
+ lookup_table_index_ptr->numer,
+ lookup_table_index_ptr->denom).value) {
+ value = lookup_table_index_ptr->reg_value;
+ break;
+ }
+
+ count++;
+ }
+ return value;
+}
+uint32_t spl_get_v_bf3_mode(struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries = sizeof(easf_v_bf3_mode_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_v_bf3_mode_lookup, num_entries);
+ return value;
+}
+uint32_t spl_get_h_bf3_mode(struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries = sizeof(easf_h_bf3_mode_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_h_bf3_mode_lookup, num_entries);
+ return value;
+}
+uint32_t spl_get_reducer_gain6(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 4) {
+ num_entries = sizeof(easf_reducer_gain6_4tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_reducer_gain6_4tap_lookup, num_entries);
+ } else if (taps == 6) {
+ num_entries = sizeof(easf_reducer_gain6_6tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_reducer_gain6_6tap_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_reducer_gain4(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 4) {
+ num_entries = sizeof(easf_reducer_gain4_4tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_reducer_gain4_4tap_lookup, num_entries);
+ } else if (taps == 6) {
+ num_entries = sizeof(easf_reducer_gain4_6tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_reducer_gain4_6tap_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_gainRing6(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 4) {
+ num_entries = sizeof(easf_gain_ring6_4tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_gain_ring6_4tap_lookup, num_entries);
+ } else if (taps == 6) {
+ num_entries = sizeof(easf_gain_ring6_6tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_gain_ring6_6tap_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_gainRing4(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 4) {
+ num_entries = sizeof(easf_gain_ring4_4tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_gain_ring4_4tap_lookup, num_entries);
+ } else if (taps == 6) {
+ num_entries = sizeof(easf_gain_ring4_6tap_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_gain_ring4_6tap_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_dntilt_uptilt_offset(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_dntilt_uptilt_offset_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_dntilt_uptilt_offset_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_uptilt_maxval(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_uptilt_maxval_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_uptilt_maxval_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_dntilt_slope(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_dntilt_slope_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_dntilt_slope_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_uptilt1_slope(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_uptilt1_slope_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_uptilt1_slope_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_uptilt2_slope(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_uptilt2_slope_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_uptilt2_slope_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
+uint32_t spl_get_3tap_uptilt2_offset(int taps, struct spl_fixed31_32 ratio)
+{
+ uint32_t value;
+ unsigned int num_entries;
+
+ if (taps == 3) {
+ num_entries = sizeof(easf_3tap_uptilt2_offset_lookup) /
+ sizeof(struct scale_ratio_to_reg_value_lookup);
+ value = spl_easf_get_scale_ratio_to_reg_value(ratio,
+ easf_3tap_uptilt2_offset_lookup, num_entries);
+ } else
+ value = 0;
+ return value;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h
new file mode 100644
index 000000000000..8bb2b8108e38
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_easf_filters.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef __DC_SPL_SCL_EASF_FILTERS_H__
+#define __DC_SPL_SCL_EASF_FILTERS_H__
+
+#include "dc_spl_types.h"
+
+struct scale_ratio_to_reg_value_lookup {
+ int numer;
+ int denom;
+ const uint32_t reg_value;
+};
+
+void spl_init_easf_filter_coeffs(void);
+uint16_t *spl_get_easf_filter_3tap_64p(struct spl_fixed31_32 ratio);
+uint16_t *spl_get_easf_filter_4tap_64p(struct spl_fixed31_32 ratio);
+uint16_t *spl_get_easf_filter_6tap_64p(struct spl_fixed31_32 ratio);
+uint16_t *spl_dscl_get_easf_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio);
+void spl_set_filters_data(struct dscl_prog_data *dscl_prog_data,
+ const struct spl_scaler_data *data, bool enable_easf_v,
+ bool enable_easf_h);
+
+uint32_t spl_get_v_bf3_mode(struct spl_fixed31_32 ratio);
+uint32_t spl_get_h_bf3_mode(struct spl_fixed31_32 ratio);
+uint32_t spl_get_reducer_gain6(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_reducer_gain4(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_gainRing6(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_gainRing4(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_dntilt_uptilt_offset(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_uptilt_maxval(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_dntilt_slope(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_uptilt1_slope(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_uptilt2_slope(int taps, struct spl_fixed31_32 ratio);
+uint32_t spl_get_3tap_uptilt2_offset(int taps, struct spl_fixed31_32 ratio);
+
+#endif /* __DC_SPL_SCL_EASF_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c
index e2baaf584139..b02c7b0b262b 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.c
@@ -2,6 +2,7 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
+#include "spl_debug.h"
#include "dc_spl_scl_filters.h"
//=========================================
// <num_taps> = 2
@@ -1317,97 +1318,97 @@ static const uint16_t filter_8tap_64p_183[264] = {
0x3FD4, 0x3F84, 0x0214, 0x0694, 0x0694, 0x0214, 0x3F84, 0x3FD4
};
-const uint16_t *spl_get_filter_3tap_16p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_3tap_16p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_3tap_16p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_3tap_16p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_3tap_16p_149;
else
return filter_3tap_16p_183;
}
-const uint16_t *spl_get_filter_3tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_3tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_3tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_3tap_64p_149;
else
return filter_3tap_64p_183;
}
-const uint16_t *spl_get_filter_4tap_16p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_4tap_16p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_4tap_16p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_4tap_16p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_4tap_16p_149;
else
return filter_4tap_16p_183;
}
-const uint16_t *spl_get_filter_4tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_4tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_4tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_4tap_64p_149;
else
return filter_4tap_64p_183;
}
-const uint16_t *spl_get_filter_5tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_5tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_5tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_5tap_64p_149;
else
return filter_5tap_64p_183;
}
-const uint16_t *spl_get_filter_6tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_6tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_6tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_6tap_64p_149;
else
return filter_6tap_64p_183;
}
-const uint16_t *spl_get_filter_7tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_7tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_7tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_7tap_64p_149;
else
return filter_7tap_64p_183;
}
-const uint16_t *spl_get_filter_8tap_64p(struct fixed31_32 ratio)
+const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio)
{
- if (ratio.value < dc_fixpt_one.value)
+ if (ratio.value < spl_fixpt_one.value)
return filter_8tap_64p_upscale;
- else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(4, 3).value)
return filter_8tap_64p_116;
- else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
+ else if (ratio.value < spl_fixpt_from_fraction(5, 3).value)
return filter_8tap_64p_149;
else
return filter_8tap_64p_183;
@@ -1422,3 +1423,29 @@ const uint16_t *spl_get_filter_2tap_64p(void)
{
return filter_2tap_64p;
}
+
+const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio)
+{
+ if (taps == 8)
+ return spl_get_filter_8tap_64p(ratio);
+ else if (taps == 7)
+ return spl_get_filter_7tap_64p(ratio);
+ else if (taps == 6)
+ return spl_get_filter_6tap_64p(ratio);
+ else if (taps == 5)
+ return spl_get_filter_5tap_64p(ratio);
+ else if (taps == 4)
+ return spl_get_filter_4tap_64p(ratio);
+ else if (taps == 3)
+ return spl_get_filter_3tap_64p(ratio);
+ else if (taps == 2)
+ return spl_get_filter_2tap_64p();
+ else if (taps == 1)
+ return NULL;
+ else {
+ /* should never happen, bug */
+ SPL_BREAK_TO_DEBUGGER();
+ return NULL;
+ }
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h
index 6d96aca53b24..48202bc4f81e 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters.h
@@ -7,53 +7,16 @@
#include "dc_spl_types.h"
-const uint16_t *spl_get_filter_3tap_16p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_3tap_64p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_4tap_16p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_4tap_64p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_5tap_64p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_6tap_64p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_7tap_64p(struct fixed31_32 ratio);
-const uint16_t *spl_get_filter_8tap_64p(struct fixed31_32 ratio);
+const uint16_t *spl_get_filter_3tap_16p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_3tap_64p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_4tap_16p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_4tap_64p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_5tap_64p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_6tap_64p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_7tap_64p(struct spl_fixed31_32 ratio);
+const uint16_t *spl_get_filter_8tap_64p(struct spl_fixed31_32 ratio);
const uint16_t *spl_get_filter_2tap_16p(void);
const uint16_t *spl_get_filter_2tap_64p(void);
-const uint16_t *spl_get_filter_3tap_16p_upscale(void);
-const uint16_t *spl_get_filter_3tap_16p_116(void);
-const uint16_t *spl_get_filter_3tap_16p_149(void);
-const uint16_t *spl_get_filter_3tap_16p_183(void);
+const uint16_t *spl_dscl_get_filter_coeffs_64p(int taps, struct spl_fixed31_32 ratio);
-const uint16_t *spl_get_filter_4tap_16p_upscale(void);
-const uint16_t *spl_get_filter_4tap_16p_116(void);
-const uint16_t *spl_get_filter_4tap_16p_149(void);
-const uint16_t *spl_get_filter_4tap_16p_183(void);
-
-const uint16_t *spl_get_filter_3tap_64p_upscale(void);
-const uint16_t *spl_get_filter_3tap_64p_116(void);
-const uint16_t *spl_get_filter_3tap_64p_149(void);
-const uint16_t *spl_get_filter_3tap_64p_183(void);
-
-const uint16_t *spl_get_filter_4tap_64p_upscale(void);
-const uint16_t *spl_get_filter_4tap_64p_116(void);
-const uint16_t *spl_get_filter_4tap_64p_149(void);
-const uint16_t *spl_get_filter_4tap_64p_183(void);
-
-const uint16_t *spl_get_filter_5tap_64p_upscale(void);
-const uint16_t *spl_get_filter_5tap_64p_116(void);
-const uint16_t *spl_get_filter_5tap_64p_149(void);
-const uint16_t *spl_get_filter_5tap_64p_183(void);
-
-const uint16_t *spl_get_filter_6tap_64p_upscale(void);
-const uint16_t *spl_get_filter_6tap_64p_116(void);
-const uint16_t *spl_get_filter_6tap_64p_149(void);
-const uint16_t *spl_get_filter_6tap_64p_183(void);
-
-const uint16_t *spl_get_filter_7tap_64p_upscale(void);
-const uint16_t *spl_get_filter_7tap_64p_116(void);
-const uint16_t *spl_get_filter_7tap_64p_149(void);
-const uint16_t *spl_get_filter_7tap_64p_183(void);
-
-const uint16_t *spl_get_filter_8tap_64p_upscale(void);
-const uint16_t *spl_get_filter_8tap_64p_116(void);
-const uint16_t *spl_get_filter_8tap_64p_149(void);
-const uint16_t *spl_get_filter_8tap_64p_183(void);
#endif /* __DC_SPL_SCL_FILTERS_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters_old.c b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters_old.c
deleted file mode 100644
index bb0e1b80ec3c..000000000000
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_scl_filters_old.c
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
-
diff --git a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
index 36d10b0f2eed..85b19ebe2c57 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/dc_spl_types.h
@@ -2,14 +2,16 @@
//
// Copyright 2024 Advanced Micro Devices, Inc.
-#include "os_types.h" // swap
-#ifndef ASSERT
-#define ASSERT(_bool) ((void *)0)
-#endif
-#include "include/fixed31_32.h" // fixed31_32 and related functions
#ifndef __DC_SPL_TYPES_H__
#define __DC_SPL_TYPES_H__
+#include "spl_os_types.h" // swap
+#ifndef SPL_ASSERT
+#define SPL_ASSERT(_bool) ((void *)0)
+#endif
+#include "spl_fixpt31_32.h" // fixed31_32 and related functions
+#include "spl_custom_float.h" // custom float and related functions
+
struct spl_size {
uint32_t width;
uint32_t height;
@@ -22,16 +24,16 @@ struct spl_rect {
};
struct spl_ratios {
- struct fixed31_32 horz;
- struct fixed31_32 vert;
- struct fixed31_32 horz_c;
- struct fixed31_32 vert_c;
+ struct spl_fixed31_32 horz;
+ struct spl_fixed31_32 vert;
+ struct spl_fixed31_32 horz_c;
+ struct spl_fixed31_32 vert_c;
};
struct spl_inits {
- struct fixed31_32 h;
- struct fixed31_32 h_c;
- struct fixed31_32 v;
- struct fixed31_32 v_c;
+ struct spl_fixed31_32 h;
+ struct spl_fixed31_32 h_c;
+ struct spl_fixed31_32 v;
+ struct spl_fixed31_32 v_c;
};
struct spl_taps {
@@ -64,6 +66,8 @@ enum spl_pixel_format {
SPL_PIXEL_FORMAT_420BPP10,
/*end of pixel format definition*/
SPL_PIXEL_FORMAT_INVALID,
+ SPL_PIXEL_FORMAT_422BPP8,
+ SPL_PIXEL_FORMAT_422BPP10,
SPL_PIXEL_FORMAT_GRPH_BEGIN = SPL_PIXEL_FORMAT_INDEX8,
SPL_PIXEL_FORMAT_GRPH_END = SPL_PIXEL_FORMAT_FP16,
SPL_PIXEL_FORMAT_VIDEO_BEGIN = SPL_PIXEL_FORMAT_420BPP8,
@@ -135,6 +139,7 @@ struct spl_scaler_data {
struct spl_rect viewport_c;
struct spl_rect recout;
struct spl_ratios ratios;
+ struct spl_ratios recip_ratios;
struct spl_inits inits;
};
@@ -402,13 +407,19 @@ struct dscl_prog_data {
/* blur and scale filter */
const uint16_t *filter_blur_scale_v;
const uint16_t *filter_blur_scale_h;
+ int sharpness_level; /* Track sharpness level */
};
/* SPL input and output definitions */
-// SPL outputs struct
-struct spl_out {
+// SPL scratch struct
+struct spl_scratch {
// Pack all SPL outputs in scl_data
struct spl_scaler_data scl_data;
+};
+
+/* SPL input and output definitions */
+// SPL outputs struct
+struct spl_out {
// Pack all output need to program hw registers
struct dscl_prog_data *dscl_prog_data;
};
@@ -450,14 +461,26 @@ struct basic_out {
bool alpha_en;
bool use_two_pixels_per_container;
};
-enum explicit_sharpness {
- SHARPNESS_LOW = 0,
- SHARPNESS_MID,
- SHARPNESS_HIGH
-};
-struct adaptive_sharpness {
+enum sharpness_setting {
+ SHARPNESS_HW_OFF = 0,
+ SHARPNESS_ZERO,
+ SHARPNESS_CUSTOM
+};
+struct spl_sharpness_range {
+ int sdr_rgb_min;
+ int sdr_rgb_max;
+ int sdr_rgb_mid;
+ int sdr_yuv_min;
+ int sdr_yuv_max;
+ int sdr_yuv_mid;
+ int hdr_rgb_min;
+ int hdr_rgb_max;
+ int hdr_rgb_mid;
+};
+struct adaptive_sharpness {
bool enable;
- enum explicit_sharpness sharpness;
+ int sharpness_level;
+ struct spl_sharpness_range sharpness_range;
};
enum linear_light_scaling { // convert it in translation logic
LLS_PREF_DONT_CARE = 0,
@@ -491,6 +514,11 @@ struct spl_in {
bool prefer_easf;
bool disable_easf;
struct spl_debug debug;
+ bool is_fullscreen;
+ bool is_hdr_on;
+ int h_active;
+ int v_active;
+ int hdr_multx100;
};
// end of SPL inputs
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c b/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c
new file mode 100644
index 000000000000..be2f34d034c5
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "spl_debug.h"
+#include "spl_custom_float.h"
+
+static bool spl_build_custom_float(struct spl_fixed31_32 value,
+ const struct spl_custom_float_format *format,
+ bool *negative,
+ uint32_t *mantissa,
+ uint32_t *exponenta)
+{
+ uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1;
+
+ const struct spl_fixed31_32 mantissa_constant_plus_max_fraction =
+ spl_fixpt_from_fraction((1LL << (format->mantissa_bits + 1)) - 1,
+ 1LL << format->mantissa_bits);
+
+ struct spl_fixed31_32 mantiss;
+
+ if (spl_fixpt_eq(value, spl_fixpt_zero)) {
+ *negative = false;
+ *mantissa = 0;
+ *exponenta = 0;
+ return true;
+ }
+
+ if (spl_fixpt_lt(value, spl_fixpt_zero)) {
+ *negative = format->sign;
+ value = spl_fixpt_neg(value);
+ } else {
+ *negative = false;
+ }
+
+ if (spl_fixpt_lt(value, spl_fixpt_one)) {
+ uint32_t i = 1;
+
+ do {
+ value = spl_fixpt_shl(value, 1);
+ ++i;
+ } while (spl_fixpt_lt(value, spl_fixpt_one));
+
+ --i;
+
+ if (exp_offset <= i) {
+ *mantissa = 0;
+ *exponenta = 0;
+ return true;
+ }
+
+ *exponenta = exp_offset - i;
+ } else if (spl_fixpt_le(mantissa_constant_plus_max_fraction, value)) {
+ uint32_t i = 1;
+
+ do {
+ value = spl_fixpt_shr(value, 1);
+ ++i;
+ } while (spl_fixpt_lt(mantissa_constant_plus_max_fraction, value));
+
+ *exponenta = exp_offset + i - 1;
+ } else {
+ *exponenta = exp_offset;
+ }
+
+ mantiss = spl_fixpt_sub(value, spl_fixpt_one);
+
+ if (spl_fixpt_lt(mantiss, spl_fixpt_zero) ||
+ spl_fixpt_lt(spl_fixpt_one, mantiss))
+ mantiss = spl_fixpt_zero;
+ else
+ mantiss = spl_fixpt_shl(mantiss, format->mantissa_bits);
+
+ *mantissa = spl_fixpt_floor(mantiss);
+
+ return true;
+}
+
+static bool spl_setup_custom_float(const struct spl_custom_float_format *format,
+ bool negative,
+ uint32_t mantissa,
+ uint32_t exponenta,
+ uint32_t *result)
+{
+ uint32_t i = 0;
+ uint32_t j = 0;
+ uint32_t value = 0;
+
+ /* verification code:
+ * once calculation is ok we can remove it
+ */
+
+ const uint32_t mantissa_mask =
+ (1 << (format->mantissa_bits + 1)) - 1;
+
+ const uint32_t exponenta_mask =
+ (1 << (format->exponenta_bits + 1)) - 1;
+
+ if (mantissa & ~mantissa_mask) {
+ SPL_BREAK_TO_DEBUGGER();
+ mantissa = mantissa_mask;
+ }
+
+ if (exponenta & ~exponenta_mask) {
+ SPL_BREAK_TO_DEBUGGER();
+ exponenta = exponenta_mask;
+ }
+
+ /* end of verification code */
+
+ while (i < format->mantissa_bits) {
+ uint32_t mask = 1 << i;
+
+ if (mantissa & mask)
+ value |= mask;
+
+ ++i;
+ }
+
+ while (j < format->exponenta_bits) {
+ uint32_t mask = 1 << j;
+
+ if (exponenta & mask)
+ value |= mask << i;
+
+ ++j;
+ }
+
+ if (negative && format->sign)
+ value |= 1 << (i + j);
+
+ *result = value;
+
+ return true;
+}
+
+bool spl_convert_to_custom_float_format(struct spl_fixed31_32 value,
+ const struct spl_custom_float_format *format,
+ uint32_t *result)
+{
+ uint32_t mantissa;
+ uint32_t exponenta;
+ bool negative;
+
+ return spl_build_custom_float(value, format, &negative, &mantissa, &exponenta) &&
+ spl_setup_custom_float(format,
+ negative,
+ mantissa,
+ exponenta,
+ result);
+}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h b/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h
new file mode 100644
index 000000000000..cdc4e107b9de
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_custom_float.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef SPL_CUSTOM_FLOAT_H_
+#define SPL_CUSTOM_FLOAT_H_
+
+#include "spl_os_types.h"
+#include "spl_fixpt31_32.h"
+
+struct spl_custom_float_format {
+ uint32_t mantissa_bits;
+ uint32_t exponenta_bits;
+ bool sign;
+};
+
+struct spl_custom_float_value {
+ uint32_t mantissa;
+ uint32_t exponenta;
+ uint32_t value;
+ bool negative;
+};
+
+bool spl_convert_to_custom_float_format(
+ struct spl_fixed31_32 value,
+ const struct spl_custom_float_format *format,
+ uint32_t *result);
+
+#endif //SPL_CUSTOM_FLOAT_H_
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h
new file mode 100644
index 000000000000..5696dafd0894
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_debug.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef SPL_DEBUG_H
+#define SPL_DEBUG_H
+
+#ifdef SPL_ASSERT
+#undef SPL_ASSERT
+#endif
+#define SPL_ASSERT(b)
+
+#define SPL_ASSERT_CRITICAL(expr) do {if (expr)/* Do nothing */; } while (0)
+
+#ifdef SPL_DALMSG
+#undef SPL_DALMSG
+#endif
+#define SPL_DALMSG(b)
+
+#ifdef SPL_DAL_ASSERT_MSG
+#undef SPL_DAL_ASSERT_MSG
+#endif
+#define SPL_DAL_ASSERT_MSG(b, m)
+
+#endif // SPL_DEBUG_H
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c
new file mode 100644
index 000000000000..a95565df5487
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: MIT
+//
+// Copyright 2024 Advanced Micro Devices, Inc.
+
+#include "spl_fixpt31_32.h"
+
+static const struct spl_fixed31_32 spl_fixpt_two_pi = { 26986075409LL };
+static const struct spl_fixed31_32 spl_fixpt_ln2 = { 2977044471LL };
+static const struct spl_fixed31_32 spl_fixpt_ln2_div_2 = { 1488522236LL };
+
+static inline unsigned long long abs_i64(
+ long long arg)
+{
+ if (arg > 0)
+ return (unsigned long long)arg;
+ else
+ return (unsigned long long)(-arg);
+}
+
+/*
+ * @brief
+ * result = dividend / divisor
+ * *remainder = dividend % divisor
+ */
+static inline unsigned long long complete_integer_division_u64(
+ unsigned long long dividend,
+ unsigned long long divisor,
+ unsigned long long *remainder)
+{
+ unsigned long long result;
+
+ ASSERT(divisor);
+
+ result = spl_div64_u64_rem(dividend, divisor, remainder);
+
+ return result;
+}
+
+
+#define FRACTIONAL_PART_MASK \
+ ((1ULL << FIXED31_32_BITS_PER_FRACTIONAL_PART) - 1)
+
+#define GET_INTEGER_PART(x) \
+ ((x) >> FIXED31_32_BITS_PER_FRACTIONAL_PART)
+
+#define GET_FRACTIONAL_PART(x) \
+ (FRACTIONAL_PART_MASK & (x))
+
+struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long denominator)
+{
+ struct spl_fixed31_32 res;
+
+ bool arg1_negative = numerator < 0;
+ bool arg2_negative = denominator < 0;
+
+ unsigned long long arg1_value = arg1_negative ? -numerator : numerator;
+ unsigned long long arg2_value = arg2_negative ? -denominator : denominator;
+
+ unsigned long long remainder;
+
+ /* determine integer part */
+
+ unsigned long long res_value = complete_integer_division_u64(
+ arg1_value, arg2_value, &remainder);
+
+ ASSERT(res_value <= LONG_MAX);
+
+ /* determine fractional part */
+ {
+ unsigned int i = FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ do {
+ remainder <<= 1;
+
+ res_value <<= 1;
+
+ if (remainder >= arg2_value) {
+ res_value |= 1;
+ remainder -= arg2_value;
+ }
+ } while (--i != 0);
+ }
+
+ /* round up LSB */
+ {
+ unsigned long long summand = (remainder << 1) >= arg2_value;
+
+ ASSERT(res_value <= LLONG_MAX - summand);
+
+ res_value += summand;
+ }
+
+ res.value = (long long)res_value;
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+
+ return res;
+}
+
+struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ struct spl_fixed31_32 res;
+
+ bool arg1_negative = arg1.value < 0;
+ bool arg2_negative = arg2.value < 0;
+
+ unsigned long long arg1_value = arg1_negative ? -arg1.value : arg1.value;
+ unsigned long long arg2_value = arg2_negative ? -arg2.value : arg2.value;
+
+ unsigned long long arg1_int = GET_INTEGER_PART(arg1_value);
+ unsigned long long arg2_int = GET_INTEGER_PART(arg2_value);
+
+ unsigned long long arg1_fra = GET_FRACTIONAL_PART(arg1_value);
+ unsigned long long arg2_fra = GET_FRACTIONAL_PART(arg2_value);
+
+ unsigned long long tmp;
+
+ res.value = arg1_int * arg2_int;
+
+ ASSERT(res.value <= (long long)LONG_MAX);
+
+ res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ tmp = arg1_int * arg2_fra;
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg2_int * arg1_fra;
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg1_fra * arg2_fra;
+
+ tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
+ (tmp >= (unsigned long long)spl_fixpt_half.value);
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ if (arg1_negative ^ arg2_negative)
+ res.value = -res.value;
+
+ return res;
+}
+
+struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg)
+{
+ struct spl_fixed31_32 res;
+
+ unsigned long long arg_value = abs_i64(arg.value);
+
+ unsigned long long arg_int = GET_INTEGER_PART(arg_value);
+
+ unsigned long long arg_fra = GET_FRACTIONAL_PART(arg_value);
+
+ unsigned long long tmp;
+
+ res.value = arg_int * arg_int;
+
+ ASSERT(res.value <= (long long)LONG_MAX);
+
+ res.value <<= FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ tmp = arg_int * arg_fra;
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ tmp = arg_fra * arg_fra;
+
+ tmp = (tmp >> FIXED31_32_BITS_PER_FRACTIONAL_PART) +
+ (tmp >= (unsigned long long)spl_fixpt_half.value);
+
+ ASSERT(tmp <= (unsigned long long)(LLONG_MAX - res.value));
+
+ res.value += tmp;
+
+ return res;
+}
+
+struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg)
+{
+ /*
+ * @note
+ * Good idea to use Newton's method
+ */
+
+ ASSERT(arg.value);
+
+ return spl_fixpt_from_fraction(
+ spl_fixpt_one.value,
+ arg.value);
+}
+
+struct spl_fixed31_32 spl_fixpt_sinc(struct spl_fixed31_32 arg)
+{
+ struct spl_fixed31_32 square;
+
+ struct spl_fixed31_32 res = spl_fixpt_one;
+
+ int n = 27;
+
+ struct spl_fixed31_32 arg_norm = arg;
+
+ if (spl_fixpt_le(
+ spl_fixpt_two_pi,
+ spl_fixpt_abs(arg))) {
+ arg_norm = spl_fixpt_sub(
+ arg_norm,
+ spl_fixpt_mul_int(
+ spl_fixpt_two_pi,
+ (int)spl_div64_s64(
+ arg_norm.value,
+ spl_fixpt_two_pi.value)));
+ }
+
+ square = spl_fixpt_sqr(arg_norm);
+
+ do {
+ res = spl_fixpt_sub(
+ spl_fixpt_one,
+ spl_fixpt_div_int(
+ spl_fixpt_mul(
+ square,
+ res),
+ n * (n - 1)));
+
+ n -= 2;
+ } while (n > 2);
+
+ if (arg.value != arg_norm.value)
+ res = spl_fixpt_div(
+ spl_fixpt_mul(res, arg_norm),
+ arg);
+
+ return res;
+}
+
+struct spl_fixed31_32 spl_fixpt_sin(struct spl_fixed31_32 arg)
+{
+ return spl_fixpt_mul(
+ arg,
+ spl_fixpt_sinc(arg));
+}
+
+struct spl_fixed31_32 spl_fixpt_cos(struct spl_fixed31_32 arg)
+{
+ /* TODO implement argument normalization */
+
+ const struct spl_fixed31_32 square = spl_fixpt_sqr(arg);
+
+ struct spl_fixed31_32 res = spl_fixpt_one;
+
+ int n = 26;
+
+ do {
+ res = spl_fixpt_sub(
+ spl_fixpt_one,
+ spl_fixpt_div_int(
+ spl_fixpt_mul(
+ square,
+ res),
+ n * (n - 1)));
+
+ n -= 2;
+ } while (n != 0);
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = exp(arg),
+ * where abs(arg) < 1
+ *
+ * Calculated as Taylor series.
+ */
+static struct spl_fixed31_32 fixed31_32_exp_from_taylor_series(struct spl_fixed31_32 arg)
+{
+ unsigned int n = 9;
+
+ struct spl_fixed31_32 res = spl_fixpt_from_fraction(
+ n + 2,
+ n + 1);
+ /* TODO find correct res */
+
+ ASSERT(spl_fixpt_lt(arg, spl_fixpt_one));
+
+ do
+ res = spl_fixpt_add(
+ spl_fixpt_one,
+ spl_fixpt_div_int(
+ spl_fixpt_mul(
+ arg,
+ res),
+ n));
+ while (--n != 1);
+
+ return spl_fixpt_add(
+ spl_fixpt_one,
+ spl_fixpt_mul(
+ arg,
+ res));
+}
+
+struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg)
+{
+ /*
+ * @brief
+ * Main equation is:
+ * exp(x) = exp(r + m * ln(2)) = (1 << m) * exp(r),
+ * where m = round(x / ln(2)), r = x - m * ln(2)
+ */
+
+ if (spl_fixpt_le(
+ spl_fixpt_ln2_div_2,
+ spl_fixpt_abs(arg))) {
+ int m = spl_fixpt_round(
+ spl_fixpt_div(
+ arg,
+ spl_fixpt_ln2));
+
+ struct spl_fixed31_32 r = spl_fixpt_sub(
+ arg,
+ spl_fixpt_mul_int(
+ spl_fixpt_ln2,
+ m));
+
+ ASSERT(m != 0);
+
+ ASSERT(spl_fixpt_lt(
+ spl_fixpt_abs(r),
+ spl_fixpt_one));
+
+ if (m > 0)
+ return spl_fixpt_shl(
+ fixed31_32_exp_from_taylor_series(r),
+ (unsigned char)m);
+ else
+ return spl_fixpt_div_int(
+ fixed31_32_exp_from_taylor_series(r),
+ 1LL << -m);
+ } else if (arg.value != 0)
+ return fixed31_32_exp_from_taylor_series(arg);
+ else
+ return spl_fixpt_one;
+}
+
+struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg)
+{
+ struct spl_fixed31_32 res = spl_fixpt_neg(spl_fixpt_one);
+ /* TODO improve 1st estimation */
+
+ struct spl_fixed31_32 error;
+
+ ASSERT(arg.value > 0);
+ /* TODO if arg is negative, return NaN */
+ /* TODO if arg is zero, return -INF */
+
+ do {
+ struct spl_fixed31_32 res1 = spl_fixpt_add(
+ spl_fixpt_sub(
+ res,
+ spl_fixpt_one),
+ spl_fixpt_div(
+ arg,
+ spl_fixpt_exp(res)));
+
+ error = spl_fixpt_sub(
+ res,
+ res1);
+
+ res = res1;
+ /* TODO determine max_allowed_error based on quality of exp() */
+ } while (abs_i64(error.value) > 100ULL);
+
+ return res;
+}
+
+
+/* this function is a generic helper to translate fixed point value to
+ * specified integer format that will consist of integer_bits integer part and
+ * fractional_bits fractional part. For example it is used in
+ * spl_fixpt_u2d19 to receive 2 bits integer part and 19 bits fractional
+ * part in 32 bits. It is used in hw programming (scaler)
+ */
+
+static inline unsigned int ux_dy(
+ long long value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits)
+{
+ /* 1. create mask of integer part */
+ unsigned int result = (1 << integer_bits) - 1;
+ /* 2. mask out fractional part */
+ unsigned int fractional_part = FRACTIONAL_PART_MASK & value;
+ /* 3. shrink fixed point integer part to be of integer_bits width*/
+ result &= GET_INTEGER_PART(value);
+ /* 4. make space for fractional part to be filled in after integer */
+ result <<= fractional_bits;
+ /* 5. shrink fixed point fractional part to of fractional_bits width*/
+ fractional_part >>= FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits;
+ /* 6. merge the result */
+ return result | fractional_part;
+}
+
+static inline unsigned int clamp_ux_dy(
+ long long value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits,
+ unsigned int min_clamp)
+{
+ unsigned int truncated_val = ux_dy(value, integer_bits, fractional_bits);
+
+ if (value >= (1LL << (integer_bits + FIXED31_32_BITS_PER_FRACTIONAL_PART)))
+ return (1 << (integer_bits + fractional_bits)) - 1;
+ else if (truncated_val > min_clamp)
+ return truncated_val;
+ else
+ return min_clamp;
+}
+
+unsigned int spl_fixpt_u4d19(struct spl_fixed31_32 arg)
+{
+ return ux_dy(arg.value, 4, 19);
+}
+
+unsigned int spl_fixpt_u3d19(struct spl_fixed31_32 arg)
+{
+ return ux_dy(arg.value, 3, 19);
+}
+
+unsigned int spl_fixpt_u2d19(struct spl_fixed31_32 arg)
+{
+ return ux_dy(arg.value, 2, 19);
+}
+
+unsigned int spl_fixpt_u0d19(struct spl_fixed31_32 arg)
+{
+ return ux_dy(arg.value, 0, 19);
+}
+
+unsigned int spl_fixpt_clamp_u0d14(struct spl_fixed31_32 arg)
+{
+ return clamp_ux_dy(arg.value, 0, 14, 1);
+}
+
+unsigned int spl_fixpt_clamp_u0d10(struct spl_fixed31_32 arg)
+{
+ return clamp_ux_dy(arg.value, 0, 10, 1);
+}
+
+int spl_fixpt_s4d19(struct spl_fixed31_32 arg)
+{
+ if (arg.value < 0)
+ return -(int)ux_dy(spl_fixpt_abs(arg).value, 4, 19);
+ else
+ return ux_dy(arg.value, 4, 19);
+}
+
+struct spl_fixed31_32 spl_fixpt_from_ux_dy(unsigned int value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits)
+{
+ struct spl_fixed31_32 fixpt_value = spl_fixpt_zero;
+ struct spl_fixed31_32 fixpt_int_value = spl_fixpt_zero;
+ long long frac_mask = ((long long)1 << (long long)integer_bits) - 1;
+
+ fixpt_value.value = (long long)value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+ frac_mask = frac_mask << fractional_bits;
+ fixpt_int_value.value = value & frac_mask;
+ fixpt_int_value.value <<= (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+ fixpt_value.value |= fixpt_int_value.value;
+ return fixpt_value;
+}
+
+struct spl_fixed31_32 spl_fixpt_from_int_dy(unsigned int int_value,
+ unsigned int frac_value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits)
+{
+ struct spl_fixed31_32 fixpt_value = spl_fixpt_from_int(int_value);
+
+ fixpt_value.value |= (long long)frac_value << (FIXED31_32_BITS_PER_FRACTIONAL_PART - fractional_bits);
+ return fixpt_value;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h
new file mode 100644
index 000000000000..8a045e2f8699
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_fixpt31_32.h
@@ -0,0 +1,525 @@
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+
+#ifndef __SPL_FIXED31_32_H__
+#define __SPL_FIXED31_32_H__
+
+#include "os_types.h"
+#include "spl_os_types.h" // swap
+#ifndef ASSERT
+#define ASSERT(_bool) ((void *)0)
+#endif
+
+#ifndef LLONG_MAX
+#define LLONG_MAX 9223372036854775807ll
+#endif
+#ifndef LLONG_MIN
+#define LLONG_MIN (-LLONG_MAX - 1ll)
+#endif
+
+#define FIXED31_32_BITS_PER_FRACTIONAL_PART 32
+#ifndef LLONG_MIN
+#define LLONG_MIN (1LL<<63)
+#endif
+#ifndef LLONG_MAX
+#define LLONG_MAX (-1LL>>1)
+#endif
+
+/*
+ * @brief
+ * Arithmetic operations on real numbers
+ * represented as fixed-point numbers.
+ * There are: 1 bit for sign,
+ * 31 bit for integer part,
+ * 32 bits for fractional part.
+ *
+ * @note
+ * Currently, overflows and underflows are asserted;
+ * no special result returned.
+ */
+
+struct spl_fixed31_32 {
+ long long value;
+};
+
+
+/*
+ * @brief
+ * Useful constants
+ */
+
+static const struct spl_fixed31_32 spl_fixpt_zero = { 0 };
+static const struct spl_fixed31_32 spl_fixpt_epsilon = { 1LL };
+static const struct spl_fixed31_32 spl_fixpt_half = { 0x80000000LL };
+static const struct spl_fixed31_32 spl_fixpt_one = { 0x100000000LL };
+
+/*
+ * @brief
+ * Initialization routines
+ */
+
+/*
+ * @brief
+ * result = numerator / denominator
+ */
+struct spl_fixed31_32 spl_fixpt_from_fraction(long long numerator, long long denominator);
+
+/*
+ * @brief
+ * result = arg
+ */
+static inline struct spl_fixed31_32 spl_fixpt_from_int(int arg)
+{
+ struct spl_fixed31_32 res;
+
+ res.value = (long long) arg << FIXED31_32_BITS_PER_FRACTIONAL_PART;
+
+ return res;
+}
+
+/*
+ * @brief
+ * Unary operators
+ */
+
+/*
+ * @brief
+ * result = -arg
+ */
+static inline struct spl_fixed31_32 spl_fixpt_neg(struct spl_fixed31_32 arg)
+{
+ struct spl_fixed31_32 res;
+
+ res.value = -arg.value;
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = abs(arg) := (arg >= 0) ? arg : -arg
+ */
+static inline struct spl_fixed31_32 spl_fixpt_abs(struct spl_fixed31_32 arg)
+{
+ if (arg.value < 0)
+ return spl_fixpt_neg(arg);
+ else
+ return arg;
+}
+
+/*
+ * @brief
+ * Binary relational operators
+ */
+
+/*
+ * @brief
+ * result = arg1 < arg2
+ */
+static inline bool spl_fixpt_lt(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ return arg1.value < arg2.value;
+}
+
+/*
+ * @brief
+ * result = arg1 <= arg2
+ */
+static inline bool spl_fixpt_le(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ return arg1.value <= arg2.value;
+}
+
+/*
+ * @brief
+ * result = arg1 == arg2
+ */
+static inline bool spl_fixpt_eq(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ return arg1.value == arg2.value;
+}
+
+/*
+ * @brief
+ * result = min(arg1, arg2) := (arg1 <= arg2) ? arg1 : arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_min(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ if (arg1.value <= arg2.value)
+ return arg1;
+ else
+ return arg2;
+}
+
+/*
+ * @brief
+ * result = max(arg1, arg2) := (arg1 <= arg2) ? arg2 : arg1
+ */
+static inline struct spl_fixed31_32 spl_fixpt_max(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ if (arg1.value <= arg2.value)
+ return arg2;
+ else
+ return arg1;
+}
+
+/*
+ * @brief
+ * | min_value, when arg <= min_value
+ * result = | arg, when min_value < arg < max_value
+ * | max_value, when arg >= max_value
+ */
+static inline struct spl_fixed31_32 spl_fixpt_clamp(
+ struct spl_fixed31_32 arg,
+ struct spl_fixed31_32 min_value,
+ struct spl_fixed31_32 max_value)
+{
+ if (spl_fixpt_le(arg, min_value))
+ return min_value;
+ else if (spl_fixpt_le(max_value, arg))
+ return max_value;
+ else
+ return arg;
+}
+
+/*
+ * @brief
+ * Binary shift operators
+ */
+
+/*
+ * @brief
+ * result = arg << shift
+ */
+static inline struct spl_fixed31_32 spl_fixpt_shl(struct spl_fixed31_32 arg, unsigned char shift)
+{
+ ASSERT(((arg.value >= 0) && (arg.value <= LLONG_MAX >> shift)) ||
+ ((arg.value < 0) && (arg.value >= ~(LLONG_MAX >> shift))));
+
+ arg.value = arg.value << shift;
+
+ return arg;
+}
+
+/*
+ * @brief
+ * result = arg >> shift
+ */
+static inline struct spl_fixed31_32 spl_fixpt_shr(struct spl_fixed31_32 arg, unsigned char shift)
+{
+ bool negative = arg.value < 0;
+
+ if (negative)
+ arg.value = -arg.value;
+ arg.value = arg.value >> shift;
+ if (negative)
+ arg.value = -arg.value;
+ return arg;
+}
+
+/*
+ * @brief
+ * Binary additive operators
+ */
+
+/*
+ * @brief
+ * result = arg1 + arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_add(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ struct spl_fixed31_32 res;
+
+ ASSERT(((arg1.value >= 0) && (LLONG_MAX - arg1.value >= arg2.value)) ||
+ ((arg1.value < 0) && (LLONG_MIN - arg1.value <= arg2.value)));
+
+ res.value = arg1.value + arg2.value;
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = arg1 + arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_add_int(struct spl_fixed31_32 arg1, int arg2)
+{
+ return spl_fixpt_add(arg1, spl_fixpt_from_int(arg2));
+}
+
+/*
+ * @brief
+ * result = arg1 - arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_sub(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ struct spl_fixed31_32 res;
+
+ ASSERT(((arg2.value >= 0) && (LLONG_MIN + arg2.value <= arg1.value)) ||
+ ((arg2.value < 0) && (LLONG_MAX + arg2.value >= arg1.value)));
+
+ res.value = arg1.value - arg2.value;
+
+ return res;
+}
+
+/*
+ * @brief
+ * result = arg1 - arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_sub_int(struct spl_fixed31_32 arg1, int arg2)
+{
+ return spl_fixpt_sub(arg1, spl_fixpt_from_int(arg2));
+}
+
+
+/*
+ * @brief
+ * Binary multiplicative operators
+ */
+
+/*
+ * @brief
+ * result = arg1 * arg2
+ */
+struct spl_fixed31_32 spl_fixpt_mul(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2);
+
+
+/*
+ * @brief
+ * result = arg1 * arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_mul_int(struct spl_fixed31_32 arg1, int arg2)
+{
+ return spl_fixpt_mul(arg1, spl_fixpt_from_int(arg2));
+}
+
+/*
+ * @brief
+ * result = square(arg) := arg * arg
+ */
+struct spl_fixed31_32 spl_fixpt_sqr(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * result = arg1 / arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_div_int(struct spl_fixed31_32 arg1, long long arg2)
+{
+ return spl_fixpt_from_fraction(arg1.value, spl_fixpt_from_int((int)arg2).value);
+}
+
+/*
+ * @brief
+ * result = arg1 / arg2
+ */
+static inline struct spl_fixed31_32 spl_fixpt_div(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ return spl_fixpt_from_fraction(arg1.value, arg2.value);
+}
+
+/*
+ * @brief
+ * Reciprocal function
+ */
+
+/*
+ * @brief
+ * result = reciprocal(arg) := 1 / arg
+ *
+ * @note
+ * No special actions taken in case argument is zero.
+ */
+struct spl_fixed31_32 spl_fixpt_recip(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * Trigonometric functions
+ */
+
+/*
+ * @brief
+ * result = sinc(arg) := sin(arg) / arg
+ *
+ * @note
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+struct spl_fixed31_32 spl_fixpt_sinc(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * result = sin(arg)
+ *
+ * @note
+ * Argument specified in radians,
+ * internally it's normalized to [-2pi...2pi] range.
+ */
+struct spl_fixed31_32 spl_fixpt_sin(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * result = cos(arg)
+ *
+ * @note
+ * Argument specified in radians
+ * and should be in [-2pi...2pi] range -
+ * passing arguments outside that range
+ * will cause incorrect result!
+ */
+struct spl_fixed31_32 spl_fixpt_cos(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * Transcendent functions
+ */
+
+/*
+ * @brief
+ * result = exp(arg)
+ *
+ * @note
+ * Currently, function is verified for abs(arg) <= 1.
+ */
+struct spl_fixed31_32 spl_fixpt_exp(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * result = log(arg)
+ *
+ * @note
+ * Currently, abs(arg) should be less than 1.
+ * No normalization is done.
+ * Currently, no special actions taken
+ * in case of invalid argument(s). Take care!
+ */
+struct spl_fixed31_32 spl_fixpt_log(struct spl_fixed31_32 arg);
+
+/*
+ * @brief
+ * Power function
+ */
+
+/*
+ * @brief
+ * result = pow(arg1, arg2)
+ *
+ * @note
+ * Currently, abs(arg1) should be less than 1. Take care!
+ */
+static inline struct spl_fixed31_32 spl_fixpt_pow(struct spl_fixed31_32 arg1, struct spl_fixed31_32 arg2)
+{
+ if (arg1.value == 0)
+ return arg2.value == 0 ? spl_fixpt_one : spl_fixpt_zero;
+
+ return spl_fixpt_exp(
+ spl_fixpt_mul(
+ spl_fixpt_log(arg1),
+ arg2));
+}
+
+/*
+ * @brief
+ * Rounding functions
+ */
+
+/*
+ * @brief
+ * result = floor(arg) := greatest integer lower than or equal to arg
+ */
+static inline int spl_fixpt_floor(struct spl_fixed31_32 arg)
+{
+ unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
+
+ if (arg.value >= 0)
+ return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+ else
+ return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+}
+
+/*
+ * @brief
+ * result = round(arg) := integer nearest to arg
+ */
+static inline int spl_fixpt_round(struct spl_fixed31_32 arg)
+{
+ unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
+
+ const long long summand = spl_fixpt_half.value;
+
+ ASSERT(LLONG_MAX - (long long)arg_value >= summand);
+
+ arg_value += summand;
+
+ if (arg.value >= 0)
+ return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+ else
+ return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+}
+
+/*
+ * @brief
+ * result = ceil(arg) := lowest integer greater than or equal to arg
+ */
+static inline int spl_fixpt_ceil(struct spl_fixed31_32 arg)
+{
+ unsigned long long arg_value = arg.value > 0 ? arg.value : -arg.value;
+
+ const long long summand = spl_fixpt_one.value -
+ spl_fixpt_epsilon.value;
+
+ ASSERT(LLONG_MAX - (long long)arg_value >= summand);
+
+ arg_value += summand;
+
+ if (arg.value >= 0)
+ return (int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+ else
+ return -(int)(arg_value >> FIXED31_32_BITS_PER_FRACTIONAL_PART);
+}
+
+/* the following two function are used in scaler hw programming to convert fixed
+ * point value to format 2 bits from integer part and 19 bits from fractional
+ * part. The same applies for u0d19, 0 bits from integer part and 19 bits from
+ * fractional
+ */
+
+unsigned int spl_fixpt_u4d19(struct spl_fixed31_32 arg);
+
+unsigned int spl_fixpt_u3d19(struct spl_fixed31_32 arg);
+
+unsigned int spl_fixpt_u2d19(struct spl_fixed31_32 arg);
+
+unsigned int spl_fixpt_u0d19(struct spl_fixed31_32 arg);
+
+unsigned int spl_fixpt_clamp_u0d14(struct spl_fixed31_32 arg);
+
+unsigned int spl_fixpt_clamp_u0d10(struct spl_fixed31_32 arg);
+
+int spl_fixpt_s4d19(struct spl_fixed31_32 arg);
+
+static inline struct spl_fixed31_32 spl_fixpt_truncate(struct spl_fixed31_32 arg, unsigned int frac_bits)
+{
+ bool negative = arg.value < 0;
+
+ if (frac_bits >= FIXED31_32_BITS_PER_FRACTIONAL_PART) {
+ ASSERT(frac_bits == FIXED31_32_BITS_PER_FRACTIONAL_PART);
+ return arg;
+ }
+
+ if (negative)
+ arg.value = -arg.value;
+ arg.value &= (~0ULL) << (FIXED31_32_BITS_PER_FRACTIONAL_PART - frac_bits);
+ if (negative)
+ arg.value = -arg.value;
+ return arg;
+}
+
+struct spl_fixed31_32 spl_fixpt_from_ux_dy(unsigned int value, unsigned int integer_bits, unsigned int fractional_bits);
+struct spl_fixed31_32 spl_fixpt_from_int_dy(unsigned int int_value,
+ unsigned int frac_value,
+ unsigned int integer_bits,
+ unsigned int fractional_bits);
+
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h
index 7ebea91c84f6..709706ed4f2c 100644
--- a/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/spl/spl_os_types.h
@@ -1,28 +1,7 @@
-/*
- * Copyright 2012-16 Advanced Micro Devices, Inc.
- * Copyright 2019 Raptor Engineering, LLC
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- *
- * Authors: AMD
- *
- */
+/* SPDX-License-Identifier: MIT */
+
+/* Copyright 2024 Advanced Micro Devices, Inc. */
+/* Copyright 2019 Raptor Engineering, LLC */
#ifndef _SPL_OS_TYPES_H_
#define _SPL_OS_TYPES_H_
@@ -39,7 +18,6 @@
* general debug capabilities
*
*/
-// TODO: need backport
#define SPL_BREAK_TO_DEBUGGER() ASSERT(0)
static inline uint64_t spl_div_u64_rem(uint64_t dividend, uint32_t divisor, uint32_t *remainder)
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index c5f99cbff0b6..e20c220aa8b4 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -111,7 +111,7 @@
#define DMUB_MAX_PHANTOM_PLANES ((DMUB_MAX_PLANES) / 2)
/* Trace buffer offset for entry */
-#define TRACE_BUFFER_ENTRY_OFFSET 16
+#define TRACE_BUFFER_ENTRY_OFFSET 16
/**
* Maximum number of dirty rects supported by FW.
@@ -1879,7 +1879,12 @@ enum dmub_cmd_idle_opt_type {
/**
* DCN hardware notify idle.
*/
- DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE = 2
+ DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE = 2,
+
+ /**
+ * DCN hardware notify power state.
+ */
+ DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE = 3,
};
/**
@@ -1907,6 +1912,33 @@ struct dmub_rb_cmd_idle_opt_dcn_notify_idle {
};
/**
+ * enum dmub_idle_opt_dc_power_state - DC power states.
+ */
+enum dmub_idle_opt_dc_power_state {
+ DMUB_IDLE_OPT_DC_POWER_STATE_UNKNOWN = 0,
+ DMUB_IDLE_OPT_DC_POWER_STATE_D0 = 1,
+ DMUB_IDLE_OPT_DC_POWER_STATE_D1 = 2,
+ DMUB_IDLE_OPT_DC_POWER_STATE_D2 = 4,
+ DMUB_IDLE_OPT_DC_POWER_STATE_D3 = 8,
+};
+
+/**
+ * struct dmub_idle_opt_set_dc_power_state_data - Data passed to FW in a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command.
+ */
+struct dmub_idle_opt_set_dc_power_state_data {
+ uint8_t power_state; /**< power state */
+ uint8_t pad[3]; /**< padding */
+};
+
+/**
+ * struct dmub_rb_cmd_idle_opt_set_dc_power_state - Data passed to FW in a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command.
+ */
+struct dmub_rb_cmd_idle_opt_set_dc_power_state {
+ struct dmub_cmd_header header; /**< header */
+ struct dmub_idle_opt_set_dc_power_state_data data;
+};
+
+/**
* struct dmub_clocks - Clock update notification.
*/
struct dmub_clocks {
@@ -5298,6 +5330,10 @@ union dmub_rb_cmd {
* Definition of a DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE command.
*/
struct dmub_rb_cmd_idle_opt_dcn_notify_idle idle_opt_notify_idle;
+ /**
+ * Definition of a DMUB_CMD__IDLE_OPT_SET_DC_POWER_STATE command.
+ */
+ struct dmub_rb_cmd_idle_opt_set_dc_power_state idle_opt_set_dc_power_state;
/*
* Definition of a DMUB_CMD__REPLAY_COPY_SETTINGS command.
*/
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index f5b725f10a7c..745fd052840d 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -61,7 +61,7 @@ enum amd_apu_flags {
* acquires the list of IP blocks for the GPU in use on initialization.
* It can then operate on this list to perform standard driver operations
* such as: init, fini, suspend, resume, etc.
-*
+*
*
* IP block implementations are named using the following convention:
* <functionality>_v<version> (E.g.: gfx_v6_0).
@@ -251,19 +251,92 @@ enum DC_FEATURE_MASK {
DC_REPLAY_MASK = (1 << 9), //0x200, disabled by default for dcn < 3.1.4
};
+/**
+ * enum DC_DEBUG_MASK - Bits that are useful for debugging the Display Core IP
+ */
enum DC_DEBUG_MASK {
+ /**
+ * @DC_DISABLE_PIPE_SPLIT: If set, disable pipe-splitting
+ */
DC_DISABLE_PIPE_SPLIT = 0x1,
+
+ /**
+ * @DC_DISABLE_STUTTER: If set, disable memory stutter mode
+ */
DC_DISABLE_STUTTER = 0x2,
+
+ /**
+ * @DC_DISABLE_DSC: If set, disable display stream compression
+ */
DC_DISABLE_DSC = 0x4,
+
+ /**
+ * @DC_DISABLE_CLOCK_GATING: If set, disable clock gating optimizations
+ */
DC_DISABLE_CLOCK_GATING = 0x8,
+
+ /**
+ * @DC_DISABLE_PSR: If set, disable Panel self refresh v1 and PSR-SU
+ */
DC_DISABLE_PSR = 0x10,
+
+ /**
+ * @DC_FORCE_SUBVP_MCLK_SWITCH: If set, force mclk switch in subvp, even
+ * if mclk switch in vblank is possible
+ */
DC_FORCE_SUBVP_MCLK_SWITCH = 0x20,
+
+ /**
+ * @DC_DISABLE_MPO: If set, disable multi-plane offloading
+ */
DC_DISABLE_MPO = 0x40,
+
+ /**
+ * @DC_ENABLE_DPIA_TRACE: If set, enable trace logging for DPIA
+ */
DC_ENABLE_DPIA_TRACE = 0x80,
+
+ /**
+ * @DC_ENABLE_DML2: If set, force usage of DML2, even if the DCN version
+ * does not default to it.
+ */
DC_ENABLE_DML2 = 0x100,
+
+ /**
+ * @DC_DISABLE_PSR_SU: If set, disable PSR SU
+ */
DC_DISABLE_PSR_SU = 0x200,
+
+ /**
+ * @DC_DISABLE_REPLAY: If set, disable Panel Replay
+ */
DC_DISABLE_REPLAY = 0x400,
+
+ /**
+ * @DC_DISABLE_IPS: If set, disable all Idle Power States, all the time.
+ * If more than one IPS debug bit is set, the lowest bit takes
+ * precedence. For example, if DC_FORCE_IPS_ENABLE and
+ * DC_DISABLE_IPS_DYNAMIC are set, then DC_DISABLE_IPS_DYNAMIC takes
+ * precedence.
+ */
DC_DISABLE_IPS = 0x800,
+
+ /**
+ * @DC_DISABLE_IPS_DYNAMIC: If set, disable all IPS, all the time,
+ * *except* when driver goes into suspend.
+ */
+ DC_DISABLE_IPS_DYNAMIC = 0x1000,
+
+ /**
+ * @DC_DISABLE_IPS2_DYNAMIC: If set, disable IPS2 (IPS1 allowed) if
+ * there is an enabled display. Otherwise, enable all IPS.
+ */
+ DC_DISABLE_IPS2_DYNAMIC = 0x2000,
+
+ /**
+ * @DC_FORCE_IPS_ENABLE: If set, force enable all IPS, all the time.
+ */
+ DC_FORCE_IPS_ENABLE = 0x4000,
};
enum amd_dpm_forced_level;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index 9d7454b3c314..bb3bc68dfc39 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -1257,7 +1257,6 @@ static int smu_sw_init(void *handle)
atomic_set(&smu->smu_power.power_gate.vpe_gated, 1);
atomic_set(&smu->smu_power.power_gate.umsch_mm_gated, 1);
- smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
smu->workload_prority[PP_SMC_POWER_PROFILE_FULLSCREEN3D] = 1;
smu->workload_prority[PP_SMC_POWER_PROFILE_POWERSAVING] = 2;
@@ -1265,6 +1264,7 @@ static int smu_sw_init(void *handle)
smu->workload_prority[PP_SMC_POWER_PROFILE_VR] = 4;
smu->workload_prority[PP_SMC_POWER_PROFILE_COMPUTE] = 5;
smu->workload_prority[PP_SMC_POWER_PROFILE_CUSTOM] = 6;
+ smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
smu->workload_setting[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
smu->workload_setting[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D;
@@ -2224,8 +2224,9 @@ static int smu_bump_power_profile_mode(struct smu_context *smu,
}
static int smu_adjust_power_state_dynamic(struct smu_context *smu,
- enum amd_dpm_forced_level level,
- bool skip_display_settings)
+ enum amd_dpm_forced_level level,
+ bool skip_display_settings,
+ bool force_update)
{
int ret = 0;
int index = 0;
@@ -2254,7 +2255,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
}
}
- if (smu_dpm_ctx->dpm_level != level) {
+ if (force_update || smu_dpm_ctx->dpm_level != level) {
ret = smu_asic_set_performance_level(smu, level);
if (ret) {
dev_err(smu->adev->dev, "Failed to set performance level!");
@@ -2271,7 +2272,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
workload[0] = smu->workload_setting[index];
- if (smu->power_profile_mode != workload[0])
+ if (force_update || smu->power_profile_mode != workload[0])
smu_bump_power_profile_mode(smu, workload, 0);
}
@@ -2292,11 +2293,13 @@ static int smu_handle_task(struct smu_context *smu,
ret = smu_pre_display_config_changed(smu);
if (ret)
return ret;
- ret = smu_adjust_power_state_dynamic(smu, level, false);
+ ret = smu_adjust_power_state_dynamic(smu, level, false, false);
break;
case AMD_PP_TASK_COMPLETE_INIT:
+ ret = smu_adjust_power_state_dynamic(smu, level, true, true);
+ break;
case AMD_PP_TASK_READJUST_POWER_STATE:
- ret = smu_adjust_power_state_dynamic(smu, level, true);
+ ret = smu_adjust_power_state_dynamic(smu, level, true, false);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index a7d0231727e8..7bc95c404377 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2378,7 +2378,7 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf
size += sysfs_emit_at(buf, size, " ");
for (i = 0; i <= PP_SMC_POWER_PROFILE_WINDOW3D; i++)
- size += sysfs_emit_at(buf, size, "%-14s%s", amdgpu_pp_profile_name[i],
+ size += sysfs_emit_at(buf, size, "%d %-14s%s", i, amdgpu_pp_profile_name[i],
(i == smu->power_profile_mode) ? "* " : " ");
size += sysfs_emit_at(buf, size, "\n");
@@ -2408,7 +2408,7 @@ static int smu_v13_0_7_get_power_profile_mode(struct smu_context *smu, char *buf
do { \
size += sysfs_emit_at(buf, size, "%-30s", #field); \
for (j = 0; j <= PP_SMC_POWER_PROFILE_WINDOW3D; j++) \
- size += sysfs_emit_at(buf, size, "%-16d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \
+ size += sysfs_emit_at(buf, size, "%-18d", activity_monitor_external[j].DpmActivityMonitorCoeffInt.field); \
size += sysfs_emit_at(buf, size, "\n"); \
} while (0)
diff --git a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
index fe46b0ebefea..e5eb5d672bcd 100644
--- a/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
+++ b/drivers/gpu/drm/arm/display/komeda/komeda_kms.c
@@ -160,6 +160,7 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
struct drm_plane *plane;
struct list_head zorder_list;
int order = 0, err;
+ u32 slave_zpos = 0;
DRM_DEBUG_ATOMIC("[CRTC:%d:%s] calculating normalized zpos values\n",
crtc->base.id, crtc->name);
@@ -199,10 +200,13 @@ static int komeda_crtc_normalize_zpos(struct drm_crtc *crtc,
plane_st->zpos, plane_st->normalized_zpos);
/* calculate max slave zorder */
- if (has_bit(drm_plane_index(plane), kcrtc->slave_planes))
+ if (has_bit(drm_plane_index(plane), kcrtc->slave_planes)) {
+ slave_zpos = plane_st->normalized_zpos;
+ if (to_kplane_st(plane_st)->layer_split)
+ slave_zpos++;
kcrtc_st->max_slave_zorder =
- max(plane_st->normalized_zpos,
- kcrtc_st->max_slave_zorder);
+ max(slave_zpos, kcrtc_st->max_slave_zorder);
+ }
}
crtc_st->zpos_changed = true;
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index c0ab5b620b57..683cb33805b2 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -397,6 +397,7 @@ config DRM_TI_SN65DSI86
depends on OF
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_KMS_HELPER
select REGMAP_I2C
select DRM_PANEL
diff --git a/drivers/gpu/drm/display/Kconfig b/drivers/gpu/drm/display/Kconfig
index a2e42014ffe0..0f07cf1483ff 100644
--- a/drivers/gpu/drm/display/Kconfig
+++ b/drivers/gpu/drm/display/Kconfig
@@ -1,19 +1,26 @@
# SPDX-License-Identifier: MIT
+config DRM_DISPLAY_DP_AUX_BUS
+ tristate
+ depends on DRM
+ depends on OF || COMPILE_TEST
+
config DRM_DISPLAY_HELPER
tristate
depends on DRM
help
DRM helpers for display adapters.
-config DRM_DISPLAY_DP_AUX_BUS
- tristate
- depends on DRM
- depends on OF || COMPILE_TEST
+if DRM_DISPLAY_HELPER
+
+config DRM_BRIDGE_CONNECTOR
+ bool
+ select DRM_DISPLAY_HDMI_STATE_HELPER
+ help
+ DRM connector implementation terminating DRM bridge chains.
config DRM_DISPLAY_DP_AUX_CEC
bool "Enable DisplayPort CEC-Tunneling-over-AUX HDMI support"
- depends on DRM && DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
select CEC_CORE
help
@@ -25,7 +32,6 @@ config DRM_DISPLAY_DP_AUX_CEC
config DRM_DISPLAY_DP_AUX_CHARDEV
bool "DRM DP AUX Interface"
- depends on DRM && DRM_DISPLAY_HELPER
select DRM_DISPLAY_DP_HELPER
help
Choose this option to enable a /dev/drm_dp_auxN node that allows to
@@ -34,7 +40,6 @@ config DRM_DISPLAY_DP_AUX_CHARDEV
config DRM_DISPLAY_DP_HELPER
bool
- depends on DRM_DISPLAY_HELPER
help
DRM display helpers for DisplayPort.
@@ -67,19 +72,18 @@ config DRM_DISPLAY_DSC_HELPER
config DRM_DISPLAY_HDCP_HELPER
bool
- depends on DRM_DISPLAY_HELPER
help
DRM display helpers for HDCP.
config DRM_DISPLAY_HDMI_HELPER
bool
- depends on DRM_DISPLAY_HELPER
help
DRM display helpers for HDMI.
config DRM_DISPLAY_HDMI_STATE_HELPER
bool
- depends on DRM_DISPLAY_HELPER
select DRM_DISPLAY_HDMI_HELPER
help
DRM KMS state helpers for HDMI.
+
+endif # DRM_DISPLAY_HELPER
diff --git a/drivers/gpu/drm/display/Makefile b/drivers/gpu/drm/display/Makefile
index a023f72fa139..629c834c3192 100644
--- a/drivers/gpu/drm/display/Makefile
+++ b/drivers/gpu/drm/display/Makefile
@@ -3,6 +3,8 @@
obj-$(CONFIG_DRM_DISPLAY_DP_AUX_BUS) += drm_dp_aux_bus.o
drm_display_helper-y := drm_display_helper_mod.o
+drm_display_helper-$(CONFIG_DRM_BRIDGE_CONNECTOR) += \
+ drm_bridge_connector.o
drm_display_helper-$(CONFIG_DRM_DISPLAY_DP_HELPER) += \
drm_dp_dual_mode_helper.o \
drm_dp_helper.o \
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c
index a4fbf1eb7ac5..3da5b8bf8259 100644
--- a/drivers/gpu/drm/drm_bridge_connector.c
+++ b/drivers/gpu/drm/display/drm_bridge_connector.c
@@ -216,8 +216,19 @@ static void drm_bridge_connector_debugfs_init(struct drm_connector *connector,
}
}
+static void drm_bridge_connector_reset(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+
+ drm_atomic_helper_connector_reset(connector);
+ if (bridge_connector->bridge_hdmi)
+ __drm_atomic_helper_connector_hdmi_reset(connector,
+ connector->state);
+}
+
static const struct drm_connector_funcs drm_bridge_connector_funcs = {
- .reset = drm_atomic_helper_connector_reset,
+ .reset = drm_bridge_connector_reset,
.detect = drm_bridge_connector_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
diff --git a/drivers/gpu/drm/drm_fbdev_dma.c b/drivers/gpu/drm/drm_fbdev_dma.c
index 7ef5a48c8029..b0602c4f3628 100644
--- a/drivers/gpu/drm/drm_fbdev_dma.c
+++ b/drivers/gpu/drm/drm_fbdev_dma.c
@@ -36,20 +36,11 @@ static int drm_fbdev_dma_fb_release(struct fb_info *info, int user)
return 0;
}
-FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
- drm_fb_helper_damage_range,
- drm_fb_helper_damage_area);
-
static int drm_fbdev_dma_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
struct drm_fb_helper *fb_helper = info->par;
- struct drm_framebuffer *fb = fb_helper->fb;
- struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
- if (!dma->map_noncoherent)
- vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
-
- return fb_deferred_io_mmap(info, vma);
+ return drm_gem_prime_mmap(fb_helper->buffer->gem, vma);
}
static void drm_fbdev_dma_fb_destroy(struct fb_info *info)
@@ -73,10 +64,37 @@ static const struct fb_ops drm_fbdev_dma_fb_ops = {
.owner = THIS_MODULE,
.fb_open = drm_fbdev_dma_fb_open,
.fb_release = drm_fbdev_dma_fb_release,
+ __FB_DEFAULT_DMAMEM_OPS_RDWR,
+ DRM_FB_HELPER_DEFAULT_OPS,
+ __FB_DEFAULT_DMAMEM_OPS_DRAW,
+ .fb_mmap = drm_fbdev_dma_fb_mmap,
+ .fb_destroy = drm_fbdev_dma_fb_destroy,
+};
+
+FB_GEN_DEFAULT_DEFERRED_DMAMEM_OPS(drm_fbdev_dma,
+ drm_fb_helper_damage_range,
+ drm_fb_helper_damage_area);
+
+static int drm_fbdev_dma_deferred_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
+{
+ struct drm_fb_helper *fb_helper = info->par;
+ struct drm_framebuffer *fb = fb_helper->fb;
+ struct drm_gem_dma_object *dma = drm_fb_dma_get_gem_obj(fb, 0);
+
+ if (!dma->map_noncoherent)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+
+ return fb_deferred_io_mmap(info, vma);
+}
+
+static const struct fb_ops drm_fbdev_dma_deferred_fb_ops = {
+ .owner = THIS_MODULE,
+ .fb_open = drm_fbdev_dma_fb_open,
+ .fb_release = drm_fbdev_dma_fb_release,
__FB_DEFAULT_DEFERRED_OPS_RDWR(drm_fbdev_dma),
DRM_FB_HELPER_DEFAULT_OPS,
__FB_DEFAULT_DEFERRED_OPS_DRAW(drm_fbdev_dma),
- .fb_mmap = drm_fbdev_dma_fb_mmap,
+ .fb_mmap = drm_fbdev_dma_deferred_fb_mmap,
.fb_destroy = drm_fbdev_dma_fb_destroy,
};
@@ -89,6 +107,7 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
{
struct drm_client_dev *client = &fb_helper->client;
struct drm_device *dev = fb_helper->dev;
+ bool use_deferred_io = false;
struct drm_client_buffer *buffer;
struct drm_gem_dma_object *dma_obj;
struct drm_framebuffer *fb;
@@ -111,6 +130,15 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
fb = buffer->fb;
+ /*
+ * Deferred I/O requires struct page for framebuffer memory,
+ * which is not guaranteed for all DMA ranges. We thus only
+ * install deferred I/O if we have a framebuffer that requires
+ * it.
+ */
+ if (fb->funcs->dirty)
+ use_deferred_io = true;
+
ret = drm_client_buffer_vmap(buffer, &map);
if (ret) {
goto err_drm_client_buffer_delete;
@@ -130,7 +158,10 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
drm_fb_helper_fill_info(info, fb_helper, sizes);
- info->fbops = &drm_fbdev_dma_fb_ops;
+ if (use_deferred_io)
+ info->fbops = &drm_fbdev_dma_deferred_fb_ops;
+ else
+ info->fbops = &drm_fbdev_dma_fb_ops;
/* screen */
info->flags |= FBINFO_VIRTFB; /* system memory */
@@ -144,14 +175,28 @@ static int drm_fbdev_dma_helper_fb_probe(struct drm_fb_helper *fb_helper,
}
info->fix.smem_len = info->screen_size;
- /* deferred I/O */
- fb_helper->fbdefio.delay = HZ / 20;
- fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+ /*
+ * Only set up deferred I/O if the screen buffer supports
+ * it. If this disagrees with the previous test for ->dirty,
+ * mmap on the /dev/fb file might not work correctly.
+ */
+ if (!is_vmalloc_addr(info->screen_buffer) && info->fix.smem_start) {
+ unsigned long pfn = info->fix.smem_start >> PAGE_SHIFT;
- info->fbdefio = &fb_helper->fbdefio;
- ret = fb_deferred_io_init(info);
- if (ret)
- goto err_drm_fb_helper_release_info;
+ if (drm_WARN_ON(dev, !pfn_to_page(pfn)))
+ use_deferred_io = false;
+ }
+
+ /* deferred I/O */
+ if (use_deferred_io) {
+ fb_helper->fbdefio.delay = HZ / 20;
+ fb_helper->fbdefio.deferred_io = drm_fb_helper_deferred_io;
+
+ info->fbdefio = &fb_helper->fbdefio;
+ ret = fb_deferred_io_init(info);
+ if (ret)
+ goto err_drm_fb_helper_release_info;
+ }
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 81d501efd013..23646e55f142 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -254,10 +254,6 @@ static inline int exynos_drm_check_fimc_device(struct device *dev)
}
#endif
-int exynos_atomic_commit(struct drm_device *dev, struct drm_atomic_state *state,
- bool nonblock);
-
-
extern struct platform_driver fimd_driver;
extern struct platform_driver exynos5433_decon_driver;
extern struct platform_driver decon_driver;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 142184c8c3bc..4d7ea65b7dd8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1125,7 +1125,7 @@ static void fimc_abort(struct exynos_drm_ipp *ipp,
}
}
-static struct exynos_drm_ipp_funcs ipp_funcs = {
+static const struct exynos_drm_ipp_funcs ipp_funcs = {
.commit = fimc_commit,
.abort = fimc_abort,
};
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 1b111e2c3347..59fa22050717 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1162,7 +1162,7 @@ static void gsc_abort(struct exynos_drm_ipp *ipp,
}
}
-static struct exynos_drm_ipp_funcs ipp_funcs = {
+static const struct exynos_drm_ipp_funcs ipp_funcs = {
.commit = gsc_commit,
.abort = gsc_abort,
};
@@ -1174,7 +1174,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
struct exynos_drm_ipp *ipp = &ctx->ipp;
ctx->drm_dev = drm_dev;
- ctx->drm_dev = drm_dev;
+ ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index a9d469896824..2788105ac780 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -403,7 +403,7 @@ static int scaler_commit(struct exynos_drm_ipp *ipp,
return 0;
}
-static struct exynos_drm_ipp_funcs ipp_funcs = {
+static const struct exynos_drm_ipp_funcs ipp_funcs = {
.commit = scaler_commit,
};
diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c
index c2128b46bdbd..526c8c4d7b53 100644
--- a/drivers/gpu/drm/i915/display/g4x_dp.c
+++ b/drivers/gpu/drm/i915/display/g4x_dp.c
@@ -89,6 +89,7 @@ void g4x_dp_set_clock(struct intel_encoder *encoder,
static void intel_dp_prepare(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
enum port port = encoder->port;
@@ -118,7 +119,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
*/
- intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
+ intel_dp->DP = intel_de_read(display, intel_dp->output_reg) & DP_DETECTED;
/* Handle DP bits in common between all three register formats */
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
@@ -140,7 +141,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
} else if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
- intel_de_rmw(dev_priv, TRANS_DP_CTL(crtc->pipe),
+ intel_de_rmw(display, TRANS_DP_CTL(crtc->pipe),
TRANS_DP_ENH_FRAMING,
pipe_config->enhanced_framing ?
TRANS_DP_ENH_FRAMING : 0);
@@ -166,9 +167,10 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
+ bool cur_state = intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN;
I915_STATE_WARN(dev_priv, cur_state != state,
"[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
@@ -179,7 +181,8 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
{
- bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
+ struct intel_display *display = &dev_priv->display;
+ bool cur_state = intel_de_read(display, DP_A) & DP_PLL_ENABLE;
I915_STATE_WARN(dev_priv, cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
@@ -191,6 +194,7 @@ static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
static void ilk_edp_pll_on(struct intel_dp *intel_dp,
const struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -198,7 +202,7 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
assert_dp_port_disabled(intel_dp);
assert_edp_pll_disabled(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
+ drm_dbg_kms(display->drm, "enabling eDP PLL for clock %d\n",
pipe_config->port_clock);
intel_dp->DP &= ~DP_PLL_FREQ_MASK;
@@ -208,8 +212,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
- intel_de_write(dev_priv, DP_A, intel_dp->DP);
- intel_de_posting_read(dev_priv, DP_A);
+ intel_de_write(display, DP_A, intel_dp->DP);
+ intel_de_posting_read(display, DP_A);
udelay(500);
/*
@@ -223,14 +227,15 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
intel_dp->DP |= DP_PLL_ENABLE;
- intel_de_write(dev_priv, DP_A, intel_dp->DP);
- intel_de_posting_read(dev_priv, DP_A);
+ intel_de_write(display, DP_A, intel_dp->DP);
+ intel_de_posting_read(display, DP_A);
udelay(200);
}
static void ilk_edp_pll_off(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -238,22 +243,23 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp,
assert_dp_port_disabled(intel_dp);
assert_edp_pll_enabled(dev_priv);
- drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
+ drm_dbg_kms(display->drm, "disabling eDP PLL\n");
intel_dp->DP &= ~DP_PLL_ENABLE;
- intel_de_write(dev_priv, DP_A, intel_dp->DP);
- intel_de_posting_read(dev_priv, DP_A);
+ intel_de_write(display, DP_A, intel_dp->DP);
+ intel_de_posting_read(display, DP_A);
udelay(200);
}
static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
enum port port, enum pipe *pipe)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe p;
- for_each_pipe(dev_priv, p) {
- u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
+ for_each_pipe(display, p) {
+ u32 val = intel_de_read(display, TRANS_DP_CTL(p));
if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
*pipe = p;
@@ -261,7 +267,7 @@ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
}
}
- drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
+ drm_dbg_kms(display->drm, "No pipe for DP port %c found\n",
port_name(port));
/* must initialize pipe to something for the asserts */
@@ -274,10 +280,11 @@ bool g4x_dp_port_enabled(struct drm_i915_private *dev_priv,
i915_reg_t dp_reg, enum port port,
enum pipe *pipe)
{
+ struct intel_display *display = &dev_priv->display;
bool ret;
u32 val;
- val = intel_de_read(dev_priv, dp_reg);
+ val = intel_de_read(display, dp_reg);
ret = val & DP_PORT_EN;
@@ -333,6 +340,7 @@ static void g4x_dp_get_m_n(struct intel_crtc_state *crtc_state)
static void intel_dp_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u32 tmp, flags = 0;
@@ -344,12 +352,12 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
else
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
- tmp = intel_de_read(dev_priv, intel_dp->output_reg);
+ tmp = intel_de_read(display, intel_dp->output_reg);
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
- u32 trans_dp = intel_de_read(dev_priv,
+ u32 trans_dp = intel_de_read(display,
TRANS_DP_CTL(crtc->pipe));
if (trans_dp & TRANS_DP_ENH_FRAMING)
@@ -390,7 +398,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
g4x_dp_get_m_n(pipe_config);
if (port == PORT_A) {
- if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
+ if ((intel_de_read(display, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
pipe_config->port_clock = 162000;
else
pipe_config->port_clock = 270000;
@@ -410,17 +418,18 @@ static void
intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
enum port port = encoder->port;
- if (drm_WARN_ON(&dev_priv->drm,
- (intel_de_read(dev_priv, intel_dp->output_reg) &
+ if (drm_WARN_ON(display->drm,
+ (intel_de_read(display, intel_dp->output_reg) &
DP_PORT_EN) == 0))
return;
- drm_dbg_kms(&dev_priv->drm, "\n");
+ drm_dbg_kms(display->drm, "\n");
if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
@@ -430,12 +439,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE;
}
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
intel_dp->DP &= ~DP_PORT_EN;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
/*
* HW workaround for IBX, we need to move the port
@@ -454,12 +463,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
intel_dp->DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
intel_dp->DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
DP_LINK_TRAIN_PAT_1;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
intel_dp->DP &= ~DP_PORT_EN;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
@@ -480,7 +489,7 @@ static void g4x_dp_audio_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (!crtc_state->has_audio)
@@ -488,7 +497,7 @@ static void g4x_dp_audio_enable(struct intel_encoder *encoder,
/* Enable audio presence detect */
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
- intel_de_write(i915, intel_dp->output_reg, intel_dp->DP);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
intel_audio_codec_enable(encoder, crtc_state, conn_state);
}
@@ -497,7 +506,7 @@ static void g4x_dp_audio_disable(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (!old_crtc_state->has_audio)
@@ -507,7 +516,7 @@ static void g4x_dp_audio_disable(struct intel_encoder *encoder,
/* Disable audio presence detect */
intel_dp->DP &= ~DP_AUDIO_OUTPUT_ENABLE;
- intel_de_write(i915, intel_dp->output_reg, intel_dp->DP);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
}
static void intel_disable_dp(struct intel_atomic_state *state,
@@ -596,7 +605,7 @@ cpt_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT;
@@ -615,8 +624,8 @@ cpt_set_link_train(struct intel_dp *intel_dp,
return;
}
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
static void
@@ -624,7 +633,7 @@ g4x_set_link_train(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
u8 dp_train_pat)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
intel_dp->DP &= ~DP_LINK_TRAIN_MASK;
@@ -643,14 +652,14 @@ g4x_set_link_train(struct intel_dp *intel_dp,
return;
}
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
static void intel_dp_enable_port(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
/* enable with pattern 1 (as per spec) */
@@ -665,8 +674,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
*/
intel_dp->DP |= DP_PORT_EN;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
static void intel_enable_dp(struct intel_atomic_state *state,
@@ -674,12 +683,13 @@ static void intel_enable_dp(struct intel_atomic_state *state,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
+ u32 dp_reg = intel_de_read(display, intel_dp->output_reg);
intel_wakeref_t wakeref;
- if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
+ if (drm_WARN_ON(display->drm, dp_reg & DP_PORT_EN))
return;
with_intel_pps_lock(intel_dp, wakeref) {
@@ -1026,21 +1036,21 @@ static void
g4x_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
signal_levels = g4x_signal_levels(train_set);
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ drm_dbg_kms(display->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~(DP_VOLTAGE_MASK | DP_PRE_EMPHASIS_MASK);
intel_dp->DP |= signal_levels;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
/* SNB CPU eDP voltage swing and pre-emphasis control */
@@ -1074,21 +1084,21 @@ static void
snb_cpu_edp_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
signal_levels = snb_cpu_edp_signal_levels(train_set);
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ drm_dbg_kms(display->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB;
intel_dp->DP |= signal_levels;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
/* IVB CPU eDP voltage swing and pre-emphasis control */
@@ -1126,21 +1136,21 @@ static void
ivb_cpu_edp_set_signal_levels(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
u8 train_set = intel_dp->train_set[0];
u32 signal_levels;
signal_levels = ivb_cpu_edp_signal_levels(train_set);
- drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ drm_dbg_kms(display->drm, "Using signal levels %08x\n",
signal_levels);
intel_dp->DP &= ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB;
intel_dp->DP |= signal_levels;
- intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
}
/*
@@ -1185,15 +1195,15 @@ intel_dp_hotplug(struct intel_encoder *encoder,
static bool ibx_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.pch_hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.pch_hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, SDEISR) & bit;
+ return intel_de_read(display, SDEISR) & bit;
}
static bool g4x_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 bit;
switch (encoder->hpd_pin) {
@@ -1211,15 +1221,15 @@ static bool g4x_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return intel_de_read(dev_priv, PORT_HOTPLUG_STAT(dev_priv)) & bit;
+ return intel_de_read(display, PORT_HOTPLUG_STAT(display)) & bit;
}
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 bit = dev_priv->display.hotplug.hpd[encoder->hpd_pin];
+ struct intel_display *display = to_intel_display(encoder);
+ u32 bit = display->hotplug.hpd[encoder->hpd_pin];
- return intel_de_read(dev_priv, DEISR) & bit;
+ return intel_de_read(display, DEISR) & bit;
}
static void g4x_dp_suspend_complete(struct intel_encoder *encoder)
@@ -1241,7 +1251,8 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
enum pipe pipe;
@@ -1254,10 +1265,11 @@ enum pipe vlv_active_pipe(struct intel_dp *intel_dp)
static void intel_dp_encoder_reset(struct drm_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder->dev);
struct drm_i915_private *dev_priv = to_i915(encoder->dev);
struct intel_dp *intel_dp = enc_to_intel_dp(to_intel_encoder(encoder));
- intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
+ intel_dp->DP = intel_de_read(display, intel_dp->output_reg);
intel_dp->reset_link_params = true;
@@ -1293,7 +1305,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
/* FIXME bail? */
if (!devdata)
- drm_dbg_kms(&dev_priv->drm, "No VBT child device for DP-%c\n",
+ drm_dbg_kms(display->drm, "No VBT child device for DP-%c\n",
port_name(port));
dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL);
@@ -1313,7 +1325,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
mutex_init(&dig_port->hdcp_mutex);
- if (drm_encoder_init(&dev_priv->drm, &intel_encoder->base,
+ if (drm_encoder_init(display->drm, &intel_encoder->base,
&intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS,
"DP %c", port_name(port)))
goto err_encoder_init;
@@ -1397,7 +1409,7 @@ bool g4x_dp_init(struct drm_i915_private *dev_priv,
dig_port->hpd_pulse = intel_dp_hpd_pulse;
- if (HAS_GMCH(dev_priv)) {
+ if (HAS_GMCH(display)) {
dig_port->connected = g4x_digital_port_connected;
} else {
if (port == PORT_A)
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index 82ee778b2efe..186cf4833f71 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -228,7 +228,7 @@ bool intel_alpm_compute_params(struct intel_dp *intel_dp,
int tfw_exit_latency = 20; /* eDP spec */
int phy_wake = 4; /* eDP spec */
int preamble = 8; /* eDP spec */
- int precharge = intel_dp_aux_fw_sync_len() - preamble;
+ int precharge = intel_dp_aux_fw_sync_len(intel_dp) - preamble;
u8 max_wake_lines;
io_wake_time = max(precharge, io_buffer_wake_time(crtc_state)) +
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index 9b8508a503f7..f5e7eefab2f1 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -980,7 +980,8 @@ retry:
static unsigned long i915_audio_component_get_power(struct device *kdev)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t ret;
/* Catch potential impedance mismatches before they occur! */
@@ -1012,7 +1013,8 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
static void i915_audio_component_put_power(struct device *kdev,
unsigned long cookie)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
/* Stop forcing CDCLK to 2*BCLK if no need for audio to be powered. */
if (--i915->display.audio.power_refcount == 0)
@@ -1025,7 +1027,8 @@ static void i915_audio_component_put_power(struct device *kdev,
static void i915_audio_component_codec_wake_override(struct device *kdev,
bool enable)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
unsigned long cookie;
if (DISPLAY_VER(i915) < 9)
@@ -1053,7 +1056,8 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
/* Get CDCLK in kHz */
static int i915_audio_component_get_cdclk_freq(struct device *kdev)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (drm_WARN_ON_ONCE(&i915->drm, !HAS_DDI(i915)))
return -ENODEV;
@@ -1112,7 +1116,8 @@ static struct intel_audio_state *find_audio_state(struct drm_i915_private *i915,
static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
int cpu_transcoder, int rate)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_audio_component *acomp = i915->display.audio.component;
const struct intel_audio_state *audio_state;
struct intel_encoder *encoder;
@@ -1154,7 +1159,8 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
int cpu_transcoder, bool *enabled,
unsigned char *buf, int max_bytes)
{
- struct drm_i915_private *i915 = kdev_to_i915(kdev);
+ struct intel_display *display = to_intel_display(kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
const struct intel_audio_state *audio_state;
int ret = 0;
@@ -1189,24 +1195,25 @@ static const struct drm_audio_component_ops i915_audio_component_ops = {
.get_eld = i915_audio_component_get_eld,
};
-static int i915_audio_component_bind(struct device *i915_kdev,
+static int i915_audio_component_bind(struct device *drv_kdev,
struct device *hda_kdev, void *data)
{
+ struct intel_display *display = to_intel_display(drv_kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_audio_component *acomp = data;
- struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
int i;
if (drm_WARN_ON(&i915->drm, acomp->base.ops || acomp->base.dev))
return -EEXIST;
if (drm_WARN_ON(&i915->drm,
- !device_link_add(hda_kdev, i915_kdev,
+ !device_link_add(hda_kdev, drv_kdev,
DL_FLAG_STATELESS)))
return -ENOMEM;
drm_modeset_lock_all(&i915->drm);
acomp->base.ops = &i915_audio_component_ops;
- acomp->base.dev = i915_kdev;
+ acomp->base.dev = drv_kdev;
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
@@ -1216,11 +1223,12 @@ static int i915_audio_component_bind(struct device *i915_kdev,
return 0;
}
-static void i915_audio_component_unbind(struct device *i915_kdev,
+static void i915_audio_component_unbind(struct device *drv_kdev,
struct device *hda_kdev, void *data)
{
+ struct intel_display *display = to_intel_display(drv_kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct i915_audio_component *acomp = data;
- struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
drm_modeset_lock_all(&i915->drm);
acomp->base.ops = NULL;
@@ -1228,7 +1236,7 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
i915->display.audio.component = NULL;
drm_modeset_unlock_all(&i915->drm);
- device_link_remove(hda_kdev, i915_kdev);
+ device_link_remove(hda_kdev, drv_kdev);
if (i915->display.audio.power_refcount)
drm_err(&i915->drm, "audio power refcount %d after unbind\n",
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 25ff3ff0ab95..00fbe9f8c03a 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -1400,7 +1400,7 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder,
static int translate_signal_level(struct intel_dp *intel_dp,
u8 signal_levels)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int i;
for (i = 0; i < ARRAY_SIZE(index_to_dp_signal_levels); i++) {
@@ -1408,7 +1408,7 @@ static int translate_signal_level(struct intel_dp *intel_dp,
return i;
}
- drm_WARN(&i915->drm, 1,
+ drm_WARN(display->drm, 1,
"Unsupported voltage swing/pre-emphasis level: 0x%x\n",
signal_levels);
@@ -2211,14 +2211,14 @@ static void intel_dp_sink_set_msa_timing_par_ignore_state(struct intel_dp *intel
const struct intel_crtc_state *crtc_state,
bool enable)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!crtc_state->vrr.enable)
return;
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_DOWNSPREAD_CTRL,
enable ? DP_MSA_TIMING_PAR_IGNORE_EN : 0) <= 0)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Failed to %s MSA_TIMING_PAR_IGNORE in the sink\n",
str_enable_disable(enable));
}
@@ -2227,20 +2227,20 @@ static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool enable)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!crtc_state->fec_enable)
return;
if (drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_CONFIGURATION,
enable ? DP_FEC_READY : 0) <= 0)
- drm_dbg_kms(&i915->drm, "Failed to set FEC_READY to %s in the sink\n",
+ drm_dbg_kms(display->drm, "Failed to set FEC_READY to %s in the sink\n",
enable ? "enabled" : "disabled");
if (enable &&
drm_dp_dpcd_writeb(&intel_dp->aux, DP_FEC_STATUS,
DP_FEC_DECODE_EN_DETECTED | DP_FEC_DECODE_DIS_DETECTED) <= 0)
- drm_dbg_kms(&i915->drm, "Failed to clear FEC detected flags\n");
+ drm_dbg_kms(display->drm, "Failed to clear FEC detected flags\n");
}
static int read_fec_detected_status(struct drm_dp_aux *aux)
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 78ce402a5cd0..b4ef4d59da1a 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -7793,7 +7793,7 @@ void intel_setup_outputs(struct drm_i915_private *dev_priv)
struct intel_encoder *encoder;
bool dpd_is_edp = false;
- intel_pps_unlock_regs_wa(dev_priv);
+ intel_pps_unlock_regs_wa(display);
if (!HAS_DISPLAY(dev_priv))
return;
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
index 74f527647aa9..f5f618199d39 100644
--- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -1071,7 +1071,7 @@ void intel_display_debugfs_register(struct drm_i915_private *i915)
intel_fbc_debugfs_register(display);
intel_hpd_debugfs_register(i915);
intel_opregion_debugfs_register(display);
- intel_psr_debugfs_register(i915);
+ intel_psr_debugfs_register(display);
intel_wm_debugfs_register(i915);
intel_display_debugfs_params(display);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c
index eced20d2ce6e..069426d9260b 100644
--- a/drivers/gpu/drm/i915/display/intel_display_driver.c
+++ b/drivers/gpu/drm/i915/display/intel_display_driver.c
@@ -428,7 +428,7 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
intel_panel_sanitize_ssc(i915);
- intel_pps_setup(i915);
+ intel_pps_setup(display);
intel_gmbus_setup(i915);
@@ -459,7 +459,7 @@ int intel_display_driver_probe_nogem(struct drm_i915_private *i915)
intel_vga_disable(i915);
intel_setup_outputs(i915);
- ret = intel_dp_tunnel_mgr_init(i915);
+ ret = intel_dp_tunnel_mgr_init(display);
if (ret)
goto err_hdcp;
@@ -580,6 +580,8 @@ void intel_display_driver_remove(struct drm_i915_private *i915)
/* part #2: call after irq uninstall */
void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
{
+ struct intel_display *display = &i915->display;
+
if (!HAS_DISPLAY(i915))
return;
@@ -600,7 +602,7 @@ void intel_display_driver_remove_noirq(struct drm_i915_private *i915)
intel_mode_config_cleanup(i915);
- intel_dp_tunnel_mgr_cleanup(i915);
+ intel_dp_tunnel_mgr_cleanup(display);
intel_overlay_cleanup(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c
index d85c33eabc47..73369847ed66 100644
--- a/drivers/gpu/drm/i915/display/intel_display_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_display_irq.c
@@ -576,6 +576,7 @@ void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK;
@@ -589,7 +590,7 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
}
if (pch_iir & SDE_AUX_MASK)
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
if (pch_iir & SDE_GMBUS)
intel_gmbus_irq_handler(dev_priv);
@@ -664,6 +665,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
{
+ struct intel_display *display = &dev_priv->display;
enum pipe pipe;
u32 hotplug_trigger = pch_iir & SDE_HOTPLUG_MASK_CPT;
@@ -677,7 +679,7 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
}
if (pch_iir & SDE_AUX_MASK_CPT)
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
if (pch_iir & SDE_GMBUS_CPT)
intel_gmbus_irq_handler(dev_priv);
@@ -709,7 +711,7 @@ void ilk_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
ilk_hpd_irq_handler(dev_priv, hotplug_trigger);
if (de_iir & DE_AUX_CHANNEL_A)
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
if (de_iir & DE_GSE)
intel_opregion_asle_intr(display);
@@ -775,7 +777,7 @@ void ivb_display_irq_handler(struct drm_i915_private *dev_priv, u32 de_iir)
}
if (de_iir & DE_AUX_CHANNEL_A_IVB)
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
if (de_iir & DE_GSE_IVB)
intel_opregion_asle_intr(display);
@@ -1065,6 +1067,7 @@ static void gen8_read_and_ack_pch_irqs(struct drm_i915_private *i915, u32 *pch_i
void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
{
+ struct intel_display *display = &dev_priv->display;
u32 iir;
enum pipe pipe;
@@ -1100,7 +1103,7 @@ void gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
intel_uncore_write(&dev_priv->uncore, GEN8_DE_PORT_IIR, iir);
if (iir & gen8_de_port_aux_mask(dev_priv)) {
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
found = true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c
index adf5d1fbccb5..46e9eff12c23 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power_well.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c
@@ -861,6 +861,8 @@ void skl_enable_dc6(struct drm_i915_private *dev_priv)
void bxt_enable_dc9(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
assert_can_enable_dc9(dev_priv);
drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
@@ -870,19 +872,21 @@ void bxt_enable_dc9(struct drm_i915_private *dev_priv)
* because PPS registers are always on.
*/
if (!HAS_PCH_SPLIT(dev_priv))
- intel_pps_reset_all(dev_priv);
+ intel_pps_reset_all(display);
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC9);
}
void bxt_disable_dc9(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
assert_can_disable_dc9(dev_priv);
drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- intel_pps_unlock_regs_wa(dev_priv);
+ intel_pps_unlock_regs_wa(display);
}
static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -1184,6 +1188,7 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
struct intel_encoder *encoder;
enum pipe pipe;
@@ -1229,11 +1234,13 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
intel_vga_redisable_power_on(dev_priv);
- intel_pps_unlock_regs_wa(dev_priv);
+ intel_pps_unlock_regs_wa(display);
}
static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
{
+ struct intel_display *display = &dev_priv->display;
+
spin_lock_irq(&dev_priv->irq_lock);
valleyview_disable_display_irqs(dev_priv);
spin_unlock_irq(&dev_priv->irq_lock);
@@ -1241,7 +1248,7 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv)
/* make sure we're done processing display irqs */
intel_synchronize_irq(dev_priv);
- intel_pps_reset_all(dev_priv);
+ intel_pps_reset_all(display);
/* Prevent us from re-enabling polling on accident in late suspend */
if (!dev_priv->drm.dev->power.is_suspended)
diff --git a/drivers/gpu/drm/i915/display/intel_display_reset.c b/drivers/gpu/drm/i915/display/intel_display_reset.c
index c2c347b22448..49e2e650ebcd 100644
--- a/drivers/gpu/drm/i915/display/intel_display_reset.c
+++ b/drivers/gpu/drm/i915/display/intel_display_reset.c
@@ -83,7 +83,8 @@ void intel_display_reset_prepare(struct drm_i915_private *dev_priv)
void intel_display_reset_finish(struct drm_i915_private *i915)
{
- struct drm_modeset_acquire_ctx *ctx = &i915->display.restore.reset_ctx;
+ struct intel_display *display = &i915->display;
+ struct drm_modeset_acquire_ctx *ctx = &display->restore.reset_ctx;
struct drm_atomic_state *state;
int ret;
@@ -94,7 +95,7 @@ void intel_display_reset_finish(struct drm_i915_private *i915)
if (!test_bit(I915_RESET_MODESET, &to_gt(i915)->reset.flags))
return;
- state = fetch_and_zero(&i915->display.restore.modeset_state);
+ state = fetch_and_zero(&display->restore.modeset_state);
if (!state)
goto unlock;
@@ -112,7 +113,7 @@ void intel_display_reset_finish(struct drm_i915_private *i915)
* The display has been reset as well,
* so need a full re-initialization.
*/
- intel_pps_unlock_regs_wa(i915);
+ intel_pps_unlock_regs_wa(display);
intel_display_driver_init_hw(i915);
intel_clock_gating_init(i915);
intel_hpd_init(i915);
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 868ff8976ed9..f29e5dc3db91 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -1907,6 +1907,10 @@ struct intel_dp {
} alpm_parameters;
u8 alpm_dpcd;
+
+ struct {
+ unsigned long mask;
+ } quirks;
};
enum lspcon_vendor {
@@ -2081,8 +2085,6 @@ dp_to_lspcon(struct intel_dp *intel_dp)
return &dp_to_dig_port(intel_dp)->lspcon;
}
-#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev)
-
static inline struct intel_digital_port *
hdmi_to_dig_port(struct intel_hdmi *intel_hdmi)
{
@@ -2205,7 +2207,11 @@ to_intel_frontbuffer(struct drm_framebuffer *fb)
* intel_display pointer.
*/
#define __drm_device_to_intel_display(p) \
- (&to_i915(p)->display)
+ ((p) ? &to_i915(p)->display : NULL)
+#define __device_to_intel_display(p) \
+ __drm_device_to_intel_display(dev_get_drvdata(p))
+#define __pci_dev_to_intel_display(p) \
+ __drm_device_to_intel_display(pci_get_drvdata(p))
#define __intel_atomic_state_to_intel_display(p) \
__drm_device_to_intel_display((p)->base.dev)
#define __intel_connector_to_intel_display(p) \
@@ -2231,6 +2237,8 @@ to_intel_frontbuffer(struct drm_framebuffer *fb)
#define to_intel_display(p) \
_Generic(*p, \
__assoc(drm_device, p), \
+ __assoc(device, p), \
+ __assoc(pci_dev, p), \
__assoc(intel_atomic_state, p), \
__assoc(intel_connector, p), \
__assoc(intel_crtc, p), \
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 789c2f78826d..a1fcedfd404b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -84,11 +84,14 @@
#include "intel_pch_display.h"
#include "intel_pps.h"
#include "intel_psr.h"
+#include "intel_quirks.h"
#include "intel_tc.h"
#include "intel_vdsc.h"
#include "intel_vrr.h"
#include "intel_crtc_state_dump.h"
+#define dp_to_i915(__intel_dp) to_i915(dp_to_dig_port(__intel_dp)->base.base.dev)
+
/* DP DSC throughput values used for slice count calculations KPixels/s */
#define DP_DSC_PEAK_PIXEL_RATE 2720000
#define DP_DSC_MAX_ENC_THROUGHPUT_0 340000
@@ -4053,6 +4056,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
+ intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident);
/*
* Read the eDP display control registers.
@@ -4165,6 +4169,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
drm_dp_read_desc(&intel_dp->aux, &intel_dp->desc,
drm_dp_is_branch(intel_dp->dpcd));
+ intel_init_dpcd_quirks(intel_dp, &intel_dp->desc.ident);
+
intel_dp_update_sink_caps(intel_dp);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c
index cbc817bb0cc3..04a7acd7f73c 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c
@@ -13,16 +13,17 @@
#include "intel_dp_aux.h"
#include "intel_dp_aux_regs.h"
#include "intel_pps.h"
+#include "intel_quirks.h"
#include "intel_tc.h"
#define AUX_CH_NAME_BUFSIZE 6
-static const char *aux_ch_name(struct drm_i915_private *i915,
+static const char *aux_ch_name(struct intel_display *display,
char *buf, int size, enum aux_ch aux_ch)
{
- if (DISPLAY_VER(i915) >= 13 && aux_ch >= AUX_CH_D_XELPD)
+ if (DISPLAY_VER(display) >= 13 && aux_ch >= AUX_CH_D_XELPD)
snprintf(buf, size, "%c", 'A' + aux_ch - AUX_CH_D_XELPD + AUX_CH_D);
- else if (DISPLAY_VER(i915) >= 12 && aux_ch >= AUX_CH_USBC1)
+ else if (DISPLAY_VER(display) >= 12 && aux_ch >= AUX_CH_USBC1)
snprintf(buf, size, "USBC%c", '1' + aux_ch - AUX_CH_USBC1);
else
snprintf(buf, size, "%c", 'A' + aux_ch);
@@ -55,17 +56,18 @@ static void intel_dp_aux_unpack(u32 src, u8 *dst, int dst_bytes)
static u32
intel_dp_aux_wait_done(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
i915_reg_t ch_ctl = intel_dp->aux_ch_ctl_reg(intel_dp);
const unsigned int timeout_ms = 10;
u32 status;
int ret;
- ret = intel_de_wait_custom(i915, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY, 0,
+ ret = intel_de_wait_custom(display, ch_ctl, DP_AUX_CH_CTL_SEND_BUSY,
+ 0,
2, timeout_ms, &status);
if (ret == -ETIMEDOUT)
- drm_err(&i915->drm,
+ drm_err(display->drm,
"%s: did not complete or timeout within %ums (status 0x%08x)\n",
intel_dp->aux.name, timeout_ms, status);
@@ -74,7 +76,7 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (index)
return 0;
@@ -83,12 +85,12 @@ static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* The clock divider is based off the hrawclk, and would like to run at
* 2MHz. So, take the hrawclk value and divide by 2000 and use that
*/
- return DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(i915)->rawclk_freq, 2000);
+ return DIV_ROUND_CLOSEST(DISPLAY_RUNTIME_INFO(display)->rawclk_freq, 2000);
}
static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 freq;
@@ -101,15 +103,16 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* divide by 2000 and use that
*/
if (dig_port->aux_ch == AUX_CH_A)
- freq = i915->display.cdclk.hw.cdclk;
+ freq = display->cdclk.hw.cdclk;
else
- freq = DISPLAY_RUNTIME_INFO(i915)->rawclk_freq;
+ freq = DISPLAY_RUNTIME_INFO(display)->rawclk_freq;
return DIV_ROUND_CLOSEST(freq, 2000);
}
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
if (dig_port->aux_ch != AUX_CH_A && HAS_PCH_LPT_H(i915)) {
@@ -142,16 +145,21 @@ static int intel_dp_aux_sync_len(void)
return precharge + preamble;
}
-int intel_dp_aux_fw_sync_len(void)
+int intel_dp_aux_fw_sync_len(struct intel_dp *intel_dp)
{
+ int precharge = 10; /* 10-16 */
+ int preamble = 8;
+
/*
* We faced some glitches on Dell Precision 5490 MTL laptop with panel:
* "Manufacturer: AUO, Model: 63898" when using HW default 18. Using 20
* is fixing these problems with the panel. It is still within range
- * mentioned in eDP specification.
+ * mentioned in eDP specification. Increasing Fast Wake sync length is
+ * causing problems with other panels: increase length as a quirk for
+ * this specific laptop.
*/
- int precharge = 12; /* 10-16 */
- int preamble = 8;
+ if (intel_has_dpcd_quirk(intel_dp, QUIRK_FW_SYNC_LEN))
+ precharge += 2;
return precharge + preamble;
}
@@ -195,8 +203,8 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
int send_bytes,
u32 unused)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
u32 ret;
/*
@@ -211,7 +219,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
DP_AUX_CH_CTL_TIME_OUT_MAX |
DP_AUX_CH_CTL_RECEIVE_ERROR |
DP_AUX_CH_CTL_MESSAGE_SIZE(send_bytes) |
- DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len()) |
+ DP_AUX_CH_CTL_FW_SYNC_PULSE_SKL(intel_dp_aux_fw_sync_len(intel_dp)) |
DP_AUX_CH_CTL_SYNC_PULSE_SKL(intel_dp_aux_sync_len());
if (intel_tc_port_in_tbt_alt_mode(dig_port))
@@ -221,7 +229,7 @@ static u32 skl_get_aux_send_ctl(struct intel_dp *intel_dp,
* Power request bit is already set during aux power well enable.
* Preserve the bit across aux transactions.
*/
- if (DISPLAY_VER(i915) >= 14)
+ if (DISPLAY_VER(display) >= 14)
ret |= XELPDP_DP_AUX_CH_CTL_POWER_REQUEST;
return ret;
@@ -233,6 +241,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
u8 *recv, int recv_size,
u32 aux_send_ctl_flags)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
@@ -291,7 +300,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
/* Try to wait for any previous AUX channel activity */
for (try = 0; try < 3; try++) {
- status = intel_de_read_notrace(i915, ch_ctl);
+ status = intel_de_read_notrace(display, ch_ctl);
if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0)
break;
msleep(1);
@@ -300,10 +309,10 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (try == 3) {
- const u32 status = intel_de_read(i915, ch_ctl);
+ const u32 status = intel_de_read(display, ch_ctl);
if (status != intel_dp->aux_busy_last_status) {
- drm_WARN(&i915->drm, 1,
+ drm_WARN(display->drm, 1,
"%s: not started (status 0x%08x)\n",
intel_dp->aux.name, status);
intel_dp->aux_busy_last_status = status;
@@ -314,7 +323,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
}
/* Only 5 data registers! */
- if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
+ if (drm_WARN_ON(display->drm, send_bytes > 20 || recv_size > 20)) {
ret = -E2BIG;
goto out;
}
@@ -330,17 +339,17 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
for (try = 0; try < 5; try++) {
/* Load the send data into the aux channel data registers */
for (i = 0; i < send_bytes; i += 4)
- intel_de_write(i915, ch_data[i >> 2],
+ intel_de_write(display, ch_data[i >> 2],
intel_dp_aux_pack(send + i,
send_bytes - i));
/* Send the command and wait for it to complete */
- intel_de_write(i915, ch_ctl, send_ctl);
+ intel_de_write(display, ch_ctl, send_ctl);
status = intel_dp_aux_wait_done(intel_dp);
/* Clear done status and any errors */
- intel_de_write(i915, ch_ctl,
+ intel_de_write(display, ch_ctl,
status | DP_AUX_CH_CTL_DONE |
DP_AUX_CH_CTL_TIME_OUT_ERROR |
DP_AUX_CH_CTL_RECEIVE_ERROR);
@@ -364,7 +373,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
}
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
- drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
+ drm_err(display->drm, "%s: not done (status 0x%08x)\n",
intel_dp->aux.name, status);
ret = -EBUSY;
goto out;
@@ -376,7 +385,7 @@ done:
* not connected.
*/
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
- drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
+ drm_err(display->drm, "%s: receive error (status 0x%08x)\n",
intel_dp->aux.name, status);
ret = -EIO;
goto out;
@@ -387,7 +396,7 @@ done:
* -- don't fill the kernel log with these
*/
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
- drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
+ drm_dbg_kms(display->drm, "%s: timeout (status 0x%08x)\n",
intel_dp->aux.name, status);
ret = -ETIMEDOUT;
goto out;
@@ -402,7 +411,7 @@ done:
* drm layer takes care for the necessary retries.
*/
if (recv_bytes == 0 || recv_bytes > 20) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"%s: Forbidden recv_bytes = %d on aux transaction\n",
intel_dp->aux.name, recv_bytes);
ret = -EBUSY;
@@ -413,7 +422,7 @@ done:
recv_bytes = recv_size;
for (i = 0; i < recv_bytes; i += 4)
- intel_dp_aux_unpack(intel_de_read(i915, ch_data[i >> 2]),
+ intel_dp_aux_unpack(intel_de_read(display, ch_data[i >> 2]),
recv + i, recv_bytes - i);
ret = recv_bytes;
@@ -462,7 +471,7 @@ static ssize_t
intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
{
struct intel_dp *intel_dp = container_of(aux, struct intel_dp, aux);
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 txbuf[20], rxbuf[20];
size_t txsize, rxsize;
u32 flags = intel_dp_aux_xfer_flags(msg);
@@ -477,10 +486,10 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
txsize = msg->size ? HEADER_SIZE + msg->size : BARE_ADDRESS_SIZE;
rxsize = 2; /* 0 or 1 data bytes */
- if (drm_WARN_ON(&i915->drm, txsize > 20))
+ if (drm_WARN_ON(display->drm, txsize > 20))
return -E2BIG;
- drm_WARN_ON(&i915->drm, !msg->buffer != !msg->size);
+ drm_WARN_ON(display->drm, !msg->buffer != !msg->size);
if (msg->buffer)
memcpy(txbuf + HEADER_SIZE, msg->buffer, msg->size);
@@ -505,7 +514,7 @@ intel_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
txsize = msg->size ? HEADER_SIZE : BARE_ADDRESS_SIZE;
rxsize = msg->size + 1;
- if (drm_WARN_ON(&i915->drm, rxsize > 20))
+ if (drm_WARN_ON(display->drm, rxsize > 20))
return -E2BIG;
ret = intel_dp_aux_xfer(intel_dp, txbuf, txsize,
@@ -715,7 +724,7 @@ static i915_reg_t tgl_aux_data_reg(struct intel_dp *intel_dp, int index)
static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -726,16 +735,16 @@ static i915_reg_t xelpdp_aux_ctl_reg(struct intel_dp *intel_dp)
case AUX_CH_USBC2:
case AUX_CH_USBC3:
case AUX_CH_USBC4:
- return XELPDP_DP_AUX_CH_CTL(i915, aux_ch);
+ return XELPDP_DP_AUX_CH_CTL(display, aux_ch);
default:
MISSING_CASE(aux_ch);
- return XELPDP_DP_AUX_CH_CTL(i915, AUX_CH_A);
+ return XELPDP_DP_AUX_CH_CTL(display, AUX_CH_A);
}
}
static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum aux_ch aux_ch = dig_port->aux_ch;
@@ -746,10 +755,10 @@ static i915_reg_t xelpdp_aux_data_reg(struct intel_dp *intel_dp, int index)
case AUX_CH_USBC2:
case AUX_CH_USBC3:
case AUX_CH_USBC4:
- return XELPDP_DP_AUX_CH_DATA(i915, aux_ch, index);
+ return XELPDP_DP_AUX_CH_DATA(display, aux_ch, index);
default:
MISSING_CASE(aux_ch);
- return XELPDP_DP_AUX_CH_DATA(i915, AUX_CH_A, index);
+ return XELPDP_DP_AUX_CH_DATA(display, AUX_CH_A, index);
}
}
@@ -763,19 +772,20 @@ void intel_dp_aux_fini(struct intel_dp *intel_dp)
void intel_dp_aux_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
enum aux_ch aux_ch = dig_port->aux_ch;
char buf[AUX_CH_NAME_BUFSIZE];
- if (DISPLAY_VER(i915) >= 14) {
+ if (DISPLAY_VER(display) >= 14) {
intel_dp->aux_ch_ctl_reg = xelpdp_aux_ctl_reg;
intel_dp->aux_ch_data_reg = xelpdp_aux_data_reg;
- } else if (DISPLAY_VER(i915) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
intel_dp->aux_ch_ctl_reg = tgl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = tgl_aux_data_reg;
- } else if (DISPLAY_VER(i915) >= 9) {
+ } else if (DISPLAY_VER(display) >= 9) {
intel_dp->aux_ch_ctl_reg = skl_aux_ctl_reg;
intel_dp->aux_ch_data_reg = skl_aux_data_reg;
} else if (HAS_PCH_SPLIT(i915)) {
@@ -789,7 +799,7 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
intel_dp->aux_ch_data_reg = g4x_aux_data_reg;
}
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
intel_dp->get_aux_clock_divider = skl_get_aux_clock_divider;
else if (IS_BROADWELL(i915) || IS_HASWELL(i915))
intel_dp->get_aux_clock_divider = hsw_get_aux_clock_divider;
@@ -798,17 +808,17 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
else
intel_dp->get_aux_clock_divider = g4x_get_aux_clock_divider;
- if (DISPLAY_VER(i915) >= 9)
+ if (DISPLAY_VER(display) >= 9)
intel_dp->get_aux_send_ctl = skl_get_aux_send_ctl;
else
intel_dp->get_aux_send_ctl = g4x_get_aux_send_ctl;
- intel_dp->aux.drm_dev = &i915->drm;
+ intel_dp->aux.drm_dev = display->drm;
drm_dp_aux_init(&intel_dp->aux);
/* Failure to allocate our preferred name is not critical */
intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %s/%s",
- aux_ch_name(i915, buf, sizeof(buf), aux_ch),
+ aux_ch_name(display, buf, sizeof(buf), aux_ch),
encoder->base.name);
intel_dp->aux.transfer = intel_dp_aux_transfer;
@@ -817,10 +827,10 @@ void intel_dp_aux_init(struct intel_dp *intel_dp)
static enum aux_ch default_aux_ch(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
/* SKL has DDI E but no AUX E */
- if (DISPLAY_VER(i915) == 9 && encoder->port == PORT_E)
+ if (DISPLAY_VER(display) == 9 && encoder->port == PORT_E)
return AUX_CH_A;
return (enum aux_ch)encoder->port;
@@ -830,10 +840,10 @@ static struct intel_encoder *
get_encoder_by_aux_ch(struct intel_encoder *encoder,
enum aux_ch aux_ch)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_encoder *other;
- for_each_intel_encoder(&i915->drm, other) {
+ for_each_intel_encoder(display->drm, other) {
if (other == encoder)
continue;
@@ -849,7 +859,7 @@ get_encoder_by_aux_ch(struct intel_encoder *encoder,
enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_encoder *other;
const char *source;
enum aux_ch aux_ch;
@@ -870,23 +880,23 @@ enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder)
other = get_encoder_by_aux_ch(encoder, aux_ch);
if (other) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] AUX CH %s already claimed by [ENCODER:%d:%s]\n",
encoder->base.base.id, encoder->base.name,
- aux_ch_name(i915, buf, sizeof(buf), aux_ch),
+ aux_ch_name(display, buf, sizeof(buf), aux_ch),
other->base.base.id, other->base.name);
return AUX_CH_NONE;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] Using AUX CH %s (%s)\n",
encoder->base.base.id, encoder->base.name,
- aux_ch_name(i915, buf, sizeof(buf), aux_ch), source);
+ aux_ch_name(display, buf, sizeof(buf), aux_ch), source);
return aux_ch;
}
-void intel_dp_aux_irq_handler(struct drm_i915_private *i915)
+void intel_dp_aux_irq_handler(struct intel_display *display)
{
- wake_up_all(&i915->display.gmbus.wait_queue);
+ wake_up_all(&display->gmbus.wait_queue);
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h
index 76d1f2ed7c2f..90ee1c5fae28 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h
@@ -9,7 +9,7 @@
#include <linux/types.h>
enum aux_ch;
-struct drm_i915_private;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
@@ -18,8 +18,8 @@ void intel_dp_aux_init(struct intel_dp *intel_dp);
enum aux_ch intel_dp_aux_ch(struct intel_encoder *encoder);
-void intel_dp_aux_irq_handler(struct drm_i915_private *i915);
+void intel_dp_aux_irq_handler(struct intel_display *display);
u32 intel_dp_aux_pack(const u8 *src, int src_bytes);
-int intel_dp_aux_fw_sync_len(void);
+int intel_dp_aux_fw_sync_len(struct intel_dp *intel_dp);
#endif /* __INTEL_DP_AUX_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 9c8738295106..40bedc31d6bf 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -39,13 +39,13 @@
drm_dp_phy_name(_dp_phy)
#define lt_dbg(_intel_dp, _dp_phy, _format, ...) \
- drm_dbg_kms(&dp_to_i915(_intel_dp)->drm, \
+ drm_dbg_kms(to_intel_display(_intel_dp)->drm, \
LT_MSG_PREFIX _format, \
LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__)
#define lt_err(_intel_dp, _dp_phy, _format, ...) do { \
if (intel_digital_port_connected(&dp_to_dig_port(_intel_dp)->base)) \
- drm_err(&dp_to_i915(_intel_dp)->drm, \
+ drm_err(to_intel_display(_intel_dp)->drm, \
LT_MSG_PREFIX _format, \
LT_MSG_ARGS(_intel_dp, _dp_phy), ## __VA_ARGS__); \
else \
@@ -216,7 +216,8 @@ static int intel_dp_init_lttpr(struct intel_dp *intel_dp, const u8 dpcd[DP_RECEI
int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (intel_dp_is_edp(intel_dp))
return 0;
@@ -225,7 +226,7 @@ int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_S
* Detecting LTTPRs must be avoided on platforms with an AUX timeout
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
*/
- if (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))
+ if (DISPLAY_VER(display) >= 10 && !IS_GEMINILAKE(i915))
if (drm_dp_dpcd_probe(&intel_dp->aux,
DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV))
return -EIO;
@@ -256,7 +257,8 @@ int intel_dp_read_dprx_caps(struct intel_dp *intel_dp, u8 dpcd[DP_RECEIVER_CAP_S
*/
int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
int lttpr_count = 0;
/*
@@ -264,7 +266,7 @@ int intel_dp_init_lttpr_and_dprx_caps(struct intel_dp *intel_dp)
* period < 3.2ms. (see DP Standard v2.0, 2.11.2, 3.6.6.1).
*/
if (!intel_dp_is_edp(intel_dp) &&
- (DISPLAY_VER(i915) >= 10 && !IS_GEMINILAKE(i915))) {
+ (DISPLAY_VER(display) >= 10 && !IS_GEMINILAKE(i915))) {
u8 dpcd[DP_RECEIVER_CAP_SIZE];
int err = intel_dp_read_dprx_caps(intel_dp, dpcd);
@@ -327,10 +329,11 @@ static bool
intel_dp_phy_is_downstream_of_source(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int lttpr_count = drm_dp_lttpr_count(intel_dp->lttpr_common_caps);
- drm_WARN_ON_ONCE(&i915->drm, lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
+ drm_WARN_ON_ONCE(display->drm,
+ lttpr_count <= 0 && dp_phy != DP_PHY_DPRX);
return lttpr_count <= 0 || dp_phy == DP_PHY_LTTPR(lttpr_count - 1);
}
@@ -339,7 +342,7 @@ static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 voltage_max;
/*
@@ -351,7 +354,7 @@ static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
else
voltage_max = intel_dp_lttpr_voltage_max(intel_dp, dp_phy + 1);
- drm_WARN_ON_ONCE(&i915->drm,
+ drm_WARN_ON_ONCE(display->drm,
voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_2 &&
voltage_max != DP_TRAIN_VOLTAGE_SWING_LEVEL_3);
@@ -361,7 +364,7 @@ static u8 intel_dp_phy_voltage_max(struct intel_dp *intel_dp,
static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 preemph_max;
/*
@@ -373,7 +376,7 @@ static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
else
preemph_max = intel_dp_lttpr_preemph_max(intel_dp, dp_phy + 1);
- drm_WARN_ON_ONCE(&i915->drm,
+ drm_WARN_ON_ONCE(display->drm,
preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_2 &&
preemph_max != DP_TRAIN_PRE_EMPH_LEVEL_3);
@@ -383,10 +386,11 @@ static u8 intel_dp_phy_preemph_max(struct intel_dp *intel_dp,
static bool has_per_lane_signal_levels(struct intel_dp *intel_dp,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
return !intel_dp_phy_is_downstream_of_source(intel_dp, dp_phy) ||
- DISPLAY_VER(i915) >= 10 || IS_BROXTON(i915);
+ DISPLAY_VER(display) >= 10 || IS_BROXTON(i915);
}
/* 128b/132b */
@@ -950,7 +954,8 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
enum drm_dp_phy dp_phy)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
bool source_tps3, sink_tps3, source_tps4, sink_tps4;
/* UHBR+ use separate 128b/132b TPS2 */
@@ -1586,7 +1591,7 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(state);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct intel_encoder *encoder = &dig_port->base;
bool passed;
@@ -1631,7 +1636,7 @@ void intel_dp_start_link_train(struct intel_atomic_state *state,
* For test cases which rely on the link training or processing of HPDs
* ignore_long_hpd flag can unset from the testcase.
*/
- if (i915->display.hotplug.ignore_long_hpd) {
+ if (display->hotplug.ignore_long_hpd) {
lt_dbg(intel_dp, DP_PHY_DPRX, "Ignore the link failure\n");
return;
}
@@ -1683,14 +1688,14 @@ static struct intel_dp *intel_connector_to_intel_dp(struct intel_connector *conn
static int i915_dp_force_link_rate_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = to_intel_connector(m->private);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int current_rate = -1;
int force_rate;
int err;
int i;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
@@ -1698,7 +1703,7 @@ static int i915_dp_force_link_rate_show(struct seq_file *m, void *data)
current_rate = intel_dp->link_rate;
force_rate = intel_dp->link.force_rate;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
seq_printf(m, "%sauto%s",
force_rate == 0 ? "[" : "",
@@ -1754,7 +1759,7 @@ static ssize_t i915_dp_force_link_rate_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct intel_connector *connector = to_intel_connector(m->private);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int rate;
int err;
@@ -1763,14 +1768,14 @@ static ssize_t i915_dp_force_link_rate_write(struct file *file,
if (rate < 0)
return rate;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
intel_dp_reset_link_params(intel_dp);
intel_dp->link.force_rate = rate;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
*offp += len;
@@ -1781,14 +1786,14 @@ DEFINE_SHOW_STORE_ATTRIBUTE(i915_dp_force_link_rate);
static int i915_dp_force_lane_count_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = to_intel_connector(m->private);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int current_lane_count = -1;
int force_lane_count;
int err;
int i;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
@@ -1796,7 +1801,7 @@ static int i915_dp_force_lane_count_show(struct seq_file *m, void *data)
current_lane_count = intel_dp->lane_count;
force_lane_count = intel_dp->link.force_lane_count;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
seq_printf(m, "%sauto%s",
force_lane_count == 0 ? "[" : "",
@@ -1856,7 +1861,7 @@ static ssize_t i915_dp_force_lane_count_write(struct file *file,
{
struct seq_file *m = file->private_data;
struct intel_connector *connector = to_intel_connector(m->private);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int lane_count;
int err;
@@ -1865,14 +1870,14 @@ static ssize_t i915_dp_force_lane_count_write(struct file *file,
if (lane_count < 0)
return lane_count;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
intel_dp_reset_link_params(intel_dp);
intel_dp->link.force_lane_count = lane_count;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
*offp += len;
@@ -1883,17 +1888,17 @@ DEFINE_SHOW_STORE_ATTRIBUTE(i915_dp_force_lane_count);
static int i915_dp_max_link_rate_show(void *data, u64 *val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
*val = intel_dp->link.max_rate;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
@@ -1902,17 +1907,17 @@ DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_max_link_rate_fops, i915_dp_max_link_rate_show,
static int i915_dp_max_lane_count_show(void *data, u64 *val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
*val = intel_dp->link.max_lane_count;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
@@ -1921,17 +1926,17 @@ DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_max_lane_count_fops, i915_dp_max_lane_count_sho
static int i915_dp_force_link_training_failure_show(void *data, u64 *val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
*val = intel_dp->link.force_train_failure;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
@@ -1939,20 +1944,20 @@ static int i915_dp_force_link_training_failure_show(void *data, u64 *val)
static int i915_dp_force_link_training_failure_write(void *data, u64 val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
if (val > 2)
return -EINVAL;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
intel_dp->link.force_train_failure = val;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
@@ -1963,17 +1968,17 @@ DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_force_link_training_failure_fops,
static int i915_dp_force_link_retrain_show(void *data, u64 *val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
*val = intel_dp->link.force_retrain;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
@@ -1981,17 +1986,17 @@ static int i915_dp_force_link_retrain_show(void *data, u64 *val)
static int i915_dp_force_link_retrain_write(void *data, u64 val)
{
struct intel_connector *connector = to_intel_connector(data);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
intel_dp->link.force_retrain = val;
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
intel_hpd_trigger_irq(dp_to_dig_port(intel_dp));
@@ -2004,17 +2009,17 @@ DEFINE_DEBUGFS_ATTRIBUTE(i915_dp_force_link_retrain_fops,
static int i915_dp_link_retrain_disabled_show(struct seq_file *m, void *data)
{
struct intel_connector *connector = to_intel_connector(m->private);
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_connector_to_intel_dp(connector);
int err;
- err = drm_modeset_lock_single_interruptible(&i915->drm.mode_config.connection_mutex);
+ err = drm_modeset_lock_single_interruptible(&display->drm->mode_config.connection_mutex);
if (err)
return err;
seq_printf(m, "%s\n", str_yes_no(intel_dp->link.retrain_disabled));
- drm_modeset_unlock(&i915->drm.mode_config.connection_mutex);
+ drm_modeset_unlock(&display->drm->mode_config.connection_mutex);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
index 6503abdc2b98..94198bc04939 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.c
@@ -69,7 +69,7 @@ static int get_current_link_bw(struct intel_dp *intel_dp,
static int update_tunnel_state(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
bool old_bw_below_dprx;
bool new_bw_below_dprx;
@@ -81,7 +81,7 @@ static int update_tunnel_state(struct intel_dp *intel_dp)
ret = drm_dp_tunnel_update_state(intel_dp->tunnel);
if (ret < 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] State update failed (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
@@ -103,7 +103,7 @@ static int update_tunnel_state(struct intel_dp *intel_dp)
!new_bw_below_dprx)
return 0;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] Notify users about BW change: %d -> %d\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
@@ -121,20 +121,20 @@ static int update_tunnel_state(struct intel_dp *intel_dp)
*/
static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pipe_mask)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_crtc *crtc;
int tunnel_bw = 0;
int err;
- for_each_intel_crtc_in_pipe_mask(&i915->drm, crtc, pipe_mask) {
+ for_each_intel_crtc_in_pipe_mask(display->drm, crtc, pipe_mask) {
const struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
int stream_bw = intel_dp_config_required_rate(crtc_state);
tunnel_bw += stream_bw;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s][CRTC:%d:%s] Initial BW for stream %d: %d/%d Mb/s\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
@@ -145,7 +145,7 @@ static int allocate_initial_tunnel_bw_for_pipes(struct intel_dp *intel_dp, u8 pi
err = drm_dp_tunnel_alloc_bw(intel_dp->tunnel, tunnel_bw);
if (err) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] Initial BW allocation failed (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
@@ -172,12 +172,12 @@ static int allocate_initial_tunnel_bw(struct intel_dp *intel_dp,
static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acquire_ctx *ctx)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct drm_dp_tunnel *tunnel;
int ret;
- tunnel = drm_dp_tunnel_detect(i915->display.dp_tunnel_mgr,
+ tunnel = drm_dp_tunnel_detect(display->dp_tunnel_mgr,
&intel_dp->aux);
if (IS_ERR(tunnel))
return PTR_ERR(tunnel);
@@ -189,7 +189,7 @@ static int detect_new_tunnel(struct intel_dp *intel_dp, struct drm_modeset_acqui
if (ret == -EOPNOTSUPP)
return 0;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] Failed to enable BW allocation mode (ret %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
encoder->base.base.id, encoder->base.name,
@@ -266,14 +266,15 @@ bool intel_dp_tunnel_bw_alloc_is_enabled(struct intel_dp *intel_dp)
*/
void intel_dp_tunnel_suspend(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
return;
- drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n",
+ drm_dbg_kms(display->drm,
+ "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Suspend\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name);
@@ -295,7 +296,7 @@ void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool dpcd_updated)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
u8 dpcd[DP_RECEIVER_CAP_SIZE];
@@ -307,7 +308,8 @@ void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
intel_dp->tunnel_suspended = false;
- drm_dbg_kms(&i915->drm, "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n",
+ drm_dbg_kms(display->drm,
+ "[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Resume\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
encoder->base.base.id, encoder->base.name);
@@ -347,7 +349,7 @@ void intel_dp_tunnel_resume(struct intel_dp *intel_dp,
return;
out_err:
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s] Tunnel can't be resumed, will drop and reject it (err %pe)\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
@@ -369,12 +371,12 @@ add_inherited_tunnel(struct intel_atomic_state *state,
struct drm_dp_tunnel *tunnel,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct drm_dp_tunnel *old_tunnel;
old_tunnel = get_inherited_tunnel(state, crtc);
if (old_tunnel) {
- drm_WARN_ON(&i915->drm, old_tunnel != tunnel);
+ drm_WARN_ON(display->drm, old_tunnel != tunnel);
return 0;
}
@@ -394,7 +396,7 @@ static int check_inherited_tunnel_state(struct intel_atomic_state *state,
struct intel_dp *intel_dp,
const struct intel_digital_connector_state *old_conn_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_connector *connector =
to_intel_connector(old_conn_state->base.connector);
@@ -422,7 +424,7 @@ static int check_inherited_tunnel_state(struct intel_atomic_state *state,
old_crtc_state->dp_tunnel_ref.tunnel == intel_dp->tunnel)
return 0;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding state for inherited tunnel %p\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
@@ -441,12 +443,13 @@ static int check_inherited_tunnel_state(struct intel_atomic_state *state,
*/
void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *state)
{
+ struct intel_display *display = to_intel_display(state);
enum pipe pipe;
if (!state->inherited_dp_tunnels)
return;
- for_each_pipe(to_i915(state->base.dev), pipe)
+ for_each_pipe(display, pipe)
if (state->inherited_dp_tunnels->ref[pipe].tunnel)
drm_dp_tunnel_ref_put(&state->inherited_dp_tunnels->ref[pipe]);
@@ -457,7 +460,7 @@ void intel_dp_tunnel_atomic_cleanup_inherited_state(struct intel_atomic_state *s
static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *state,
struct drm_dp_tunnel *tunnel)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
u32 pipe_mask;
int err;
@@ -466,7 +469,7 @@ static int intel_dp_tunnel_atomic_add_group_state(struct intel_atomic_state *sta
if (err)
return err;
- drm_WARN_ON(&i915->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1));
+ drm_WARN_ON(display->drm, pipe_mask & ~((1 << I915_MAX_PIPES) - 1));
return intel_modeset_pipes_in_mask_early(state, "DPTUN", pipe_mask);
}
@@ -504,7 +507,7 @@ static int check_group_state(struct intel_atomic_state *state,
struct intel_connector *connector,
struct intel_crtc *crtc)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -512,7 +515,7 @@ static int check_group_state(struct intel_atomic_state *state,
if (!crtc_state->dp_tunnel_ref.tunnel)
return 0;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Adding group state for tunnel %p\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
@@ -583,7 +586,7 @@ int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
const struct intel_connector *connector,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
int required_rate = intel_dp_config_required_rate(crtc_state);
@@ -592,7 +595,7 @@ int intel_dp_tunnel_atomic_compute_stream_bw(struct intel_atomic_state *state,
if (!intel_dp_tunnel_bw_alloc_is_enabled(intel_dp))
return 0;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][CONNECTOR:%d:%s][ENCODER:%d:%s][CRTC:%d:%s] Stream %d required BW %d Mb/s\n",
drm_dp_tunnel_name(intel_dp->tunnel),
connector->base.base.id, connector->base.name,
@@ -708,7 +711,7 @@ static void queue_retry_work(struct intel_atomic_state *state,
struct drm_dp_tunnel *tunnel,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
struct intel_encoder *encoder;
encoder = intel_get_crtc_new_encoder(state, crtc_state);
@@ -716,7 +719,7 @@ static void queue_retry_work(struct intel_atomic_state *state,
if (!intel_digital_port_connected(encoder))
return;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[DPTUN %s][ENCODER:%d:%s] BW allocation failed on a connected sink\n",
drm_dp_tunnel_name(tunnel),
encoder->base.base.id,
@@ -765,7 +768,7 @@ void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
/**
* intel_dp_tunnel_mgr_init - Initialize the DP tunnel manager
- * @i915: i915 device object
+ * @display: display device
*
* Initialize the DP tunnel manager. The tunnel manager will support the
* detection/management of DP tunnels on all DP connectors, so the function
@@ -773,14 +776,14 @@ void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
*
* Return 0 in case of success, a negative error code otherwise.
*/
-int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915)
+int intel_dp_tunnel_mgr_init(struct intel_display *display)
{
struct drm_dp_tunnel_mgr *tunnel_mgr;
struct drm_connector_list_iter connector_list_iter;
struct intel_connector *connector;
int dp_connectors = 0;
- drm_connector_list_iter_begin(&i915->drm, &connector_list_iter);
+ drm_connector_list_iter_begin(display->drm, &connector_list_iter);
for_each_intel_connector_iter(connector, &connector_list_iter) {
if (connector->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort)
continue;
@@ -789,23 +792,23 @@ int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915)
}
drm_connector_list_iter_end(&connector_list_iter);
- tunnel_mgr = drm_dp_tunnel_mgr_create(&i915->drm, dp_connectors);
+ tunnel_mgr = drm_dp_tunnel_mgr_create(display->drm, dp_connectors);
if (IS_ERR(tunnel_mgr))
return PTR_ERR(tunnel_mgr);
- i915->display.dp_tunnel_mgr = tunnel_mgr;
+ display->dp_tunnel_mgr = tunnel_mgr;
return 0;
}
/**
* intel_dp_tunnel_mgr_cleanup - Clean up the DP tunnel manager state
- * @i915: i915 device object
+ * @display: display device
*
* Clean up the DP tunnel manager state.
*/
-void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915)
+void intel_dp_tunnel_mgr_cleanup(struct intel_display *display)
{
- drm_dp_tunnel_mgr_destroy(i915->display.dp_tunnel_mgr);
- i915->display.dp_tunnel_mgr = NULL;
+ drm_dp_tunnel_mgr_destroy(display->dp_tunnel_mgr);
+ display->dp_tunnel_mgr = NULL;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
index 08b2cba84af2..a0c00b7d3303 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
+++ b/drivers/gpu/drm/i915/display/intel_dp_tunnel.h
@@ -9,14 +9,13 @@
#include <linux/errno.h>
#include <linux/types.h>
-struct drm_i915_private;
struct drm_connector_state;
struct drm_modeset_acquire_ctx;
-
struct intel_atomic_state;
struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
struct intel_link_bw_limits;
@@ -53,8 +52,8 @@ int intel_dp_tunnel_atomic_check_state(struct intel_atomic_state *state,
void intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state);
-int intel_dp_tunnel_mgr_init(struct drm_i915_private *i915);
-void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915);
+int intel_dp_tunnel_mgr_init(struct intel_display *display);
+void intel_dp_tunnel_mgr_cleanup(struct intel_display *display);
#else
@@ -121,12 +120,12 @@ intel_dp_tunnel_atomic_alloc_bw(struct intel_atomic_state *state)
}
static inline int
-intel_dp_tunnel_mgr_init(struct drm_i915_private *i915)
+intel_dp_tunnel_mgr_init(struct intel_display *display)
{
return 0;
}
-static inline void intel_dp_tunnel_mgr_cleanup(struct drm_i915_private *i915) {}
+static inline void intel_dp_tunnel_mgr_cleanup(struct intel_display *display) {}
#endif /* CONFIG_DRM_I915_DP_TUNNEL */
diff --git a/drivers/gpu/drm/i915/display/intel_dpll.c b/drivers/gpu/drm/i915/display/intel_dpll.c
index d67d5e2fd570..340dfce480b8 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll.c
@@ -1823,6 +1823,7 @@ static bool i9xx_has_pps(struct drm_i915_private *dev_priv)
void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -1833,7 +1834,7 @@ void i9xx_enable_pll(const struct intel_crtc_state *crtc_state)
/* PLL is protected by panel, make sure we can write it */
if (i9xx_has_pps(dev_priv))
- assert_pps_unlocked(dev_priv, pipe);
+ assert_pps_unlocked(display, pipe);
intel_de_write(dev_priv, FP0(pipe), hw_state->fp0);
intel_de_write(dev_priv, FP1(pipe), hw_state->fp1);
@@ -2004,6 +2005,7 @@ static void _vlv_enable_pll(const struct intel_crtc_state *crtc_state)
void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -2012,7 +2014,7 @@ void vlv_enable_pll(const struct intel_crtc_state *crtc_state)
assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
- assert_pps_unlocked(dev_priv, pipe);
+ assert_pps_unlocked(display, pipe);
/* Enable Refclk */
intel_de_write(dev_priv, DPLL(dev_priv, pipe),
@@ -2150,6 +2152,7 @@ static void _chv_enable_pll(const struct intel_crtc_state *crtc_state)
void chv_enable_pll(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct i9xx_dpll_hw_state *hw_state = &crtc_state->dpll_hw_state.i9xx;
@@ -2158,7 +2161,7 @@ void chv_enable_pll(const struct intel_crtc_state *crtc_state)
assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder);
/* PLL is protected by panel, make sure we can write it */
- assert_pps_unlocked(dev_priv, pipe);
+ assert_pps_unlocked(display, pipe);
/* Enable Refclk and SSC */
intel_de_write(dev_priv, DPLL(dev_priv, pipe),
diff --git a/drivers/gpu/drm/i915/display/intel_frontbuffer.c b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
index 4923c340a0b6..af4576dee92a 100644
--- a/drivers/gpu/drm/i915/display/intel_frontbuffer.c
+++ b/drivers/gpu/drm/i915/display/intel_frontbuffer.c
@@ -83,6 +83,8 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
+ struct intel_display *display = &i915->display;
+
/* Delay flushing when rings are still busy.*/
spin_lock(&i915->display.fb_tracking.lock);
frontbuffer_bits &= ~i915->display.fb_tracking.busy_bits;
@@ -96,7 +98,7 @@ static void frontbuffer_flush(struct drm_i915_private *i915,
might_sleep();
intel_td_flush(i915);
intel_drrs_flush(i915, frontbuffer_bits);
- intel_psr_flush(i915, frontbuffer_bits, origin);
+ intel_psr_flush(display, frontbuffer_bits, origin);
intel_fbc_flush(i915, frontbuffer_bits, origin);
}
@@ -172,6 +174,7 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
unsigned int frontbuffer_bits)
{
struct drm_i915_private *i915 = intel_bo_to_i915(front->obj);
+ struct intel_display *display = &i915->display;
if (origin == ORIGIN_CS) {
spin_lock(&i915->display.fb_tracking.lock);
@@ -183,7 +186,7 @@ void __intel_fb_invalidate(struct intel_frontbuffer *front,
trace_intel_frontbuffer_invalidate(i915, frontbuffer_bits, origin);
might_sleep();
- intel_psr_invalidate(i915, frontbuffer_bits, origin);
+ intel_psr_invalidate(display, frontbuffer_bits, origin);
intel_drrs_invalidate(i915, frontbuffer_bits);
intel_fbc_invalidate(i915, frontbuffer_bits, origin);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 94418f218448..6980b98792c2 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -2181,10 +2181,11 @@ static void intel_hdcp_check_work(struct work_struct *work)
DRM_HDCP_CHECK_PERIOD_MS);
}
-static int i915_hdcp_component_bind(struct device *i915_kdev,
+static int i915_hdcp_component_bind(struct device *drv_kdev,
struct device *mei_kdev, void *data)
{
- struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+ struct intel_display *display = to_intel_display(drv_kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
drm_dbg(&i915->drm, "I915 HDCP comp bind\n");
mutex_lock(&i915->display.hdcp.hdcp_mutex);
@@ -2195,10 +2196,11 @@ static int i915_hdcp_component_bind(struct device *i915_kdev,
return 0;
}
-static void i915_hdcp_component_unbind(struct device *i915_kdev,
+static void i915_hdcp_component_unbind(struct device *drv_kdev,
struct device *mei_kdev, void *data)
{
- struct drm_i915_private *i915 = kdev_to_i915(i915_kdev);
+ struct intel_display *display = to_intel_display(drv_kdev);
+ struct drm_i915_private *i915 = to_i915(display->drm);
drm_dbg(&i915->drm, "I915 HDCP comp unbind\n");
mutex_lock(&i915->display.hdcp.hdcp_mutex);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
index 6548e71b4c49..35bdb532bbb3 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp_gsc_message.c
@@ -7,6 +7,7 @@
#include <drm/intel/i915_hdcp_interface.h>
#include "i915_drv.h"
+#include "intel_display_types.h"
#include "intel_hdcp_gsc_message.h"
int
@@ -15,17 +16,19 @@ intel_hdcp_gsc_initiate_session(struct device *dev, struct hdcp_port_data *data,
{
struct wired_cmd_initiate_hdcp2_session_in session_init_in = {};
struct wired_cmd_initiate_hdcp2_session_out session_init_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !ake_data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
session_init_in.header.api_version = HDCP_API_VERSION;
session_init_in.header.command_id = WIRED_INITIATE_HDCP2_SESSION;
@@ -72,17 +75,19 @@ intel_hdcp_gsc_verify_receiver_cert_prepare_km(struct device *dev,
{
struct wired_cmd_verify_receiver_cert_in verify_rxcert_in = {};
struct wired_cmd_verify_receiver_cert_out verify_rxcert_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_cert || !km_stored || !ek_pub_km || !msg_sz)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
verify_rxcert_in.header.api_version = HDCP_API_VERSION;
verify_rxcert_in.header.command_id = WIRED_VERIFY_RECEIVER_CERT;
@@ -135,17 +140,19 @@ intel_hdcp_gsc_verify_hprime(struct device *dev, struct hdcp_port_data *data,
{
struct wired_cmd_ake_send_hprime_in send_hprime_in = {};
struct wired_cmd_ake_send_hprime_out send_hprime_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_hprime)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
send_hprime_in.header.api_version = HDCP_API_VERSION;
send_hprime_in.header.command_id = WIRED_AKE_SEND_HPRIME;
@@ -183,17 +190,19 @@ intel_hdcp_gsc_store_pairing_info(struct device *dev, struct hdcp_port_data *dat
{
struct wired_cmd_ake_send_pairing_info_in pairing_info_in = {};
struct wired_cmd_ake_send_pairing_info_out pairing_info_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !pairing_info)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
pairing_info_in.header.api_version = HDCP_API_VERSION;
pairing_info_in.header.command_id = WIRED_AKE_SEND_PAIRING_INFO;
@@ -234,17 +243,19 @@ intel_hdcp_gsc_initiate_locality_check(struct device *dev,
{
struct wired_cmd_init_locality_check_in lc_init_in = {};
struct wired_cmd_init_locality_check_out lc_init_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !lc_init_data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
lc_init_in.header.api_version = HDCP_API_VERSION;
lc_init_in.header.command_id = WIRED_INIT_LOCALITY_CHECK;
@@ -280,17 +291,19 @@ intel_hdcp_gsc_verify_lprime(struct device *dev, struct hdcp_port_data *data,
{
struct wired_cmd_validate_locality_in verify_lprime_in = {};
struct wired_cmd_validate_locality_out verify_lprime_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !rx_lprime)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
verify_lprime_in.header.api_version = HDCP_API_VERSION;
verify_lprime_in.header.command_id = WIRED_VALIDATE_LOCALITY;
@@ -330,17 +343,19 @@ int intel_hdcp_gsc_get_session_key(struct device *dev,
{
struct wired_cmd_get_session_key_in get_skey_in = {};
struct wired_cmd_get_session_key_out get_skey_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data || !ske_data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
get_skey_in.header.api_version = HDCP_API_VERSION;
get_skey_in.header.command_id = WIRED_GET_SESSION_KEY;
@@ -382,17 +397,19 @@ intel_hdcp_gsc_repeater_check_flow_prepare_ack(struct device *dev,
{
struct wired_cmd_verify_repeater_in verify_repeater_in = {};
struct wired_cmd_verify_repeater_out verify_repeater_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !rep_topology || !rep_send_ack || !data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
verify_repeater_in.header.api_version = HDCP_API_VERSION;
verify_repeater_in.header.command_id = WIRED_VERIFY_REPEATER;
@@ -442,6 +459,7 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
{
struct wired_cmd_repeater_auth_stream_req_in *verify_mprime_in;
struct wired_cmd_repeater_auth_stream_req_out verify_mprime_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
size_t cmd_size;
@@ -449,11 +467,12 @@ int intel_hdcp_gsc_verify_mprime(struct device *dev,
if (!dev || !stream_ready || !data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
cmd_size = struct_size(verify_mprime_in, streams, data->k);
if (cmd_size == SIZE_MAX)
@@ -504,17 +523,19 @@ int intel_hdcp_gsc_enable_authentication(struct device *dev,
{
struct wired_cmd_enable_auth_in enable_auth_in = {};
struct wired_cmd_enable_auth_out enable_auth_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
enable_auth_in.header.api_version = HDCP_API_VERSION;
enable_auth_in.header.command_id = WIRED_ENABLE_AUTH;
@@ -549,17 +570,19 @@ intel_hdcp_gsc_close_session(struct device *dev, struct hdcp_port_data *data)
{
struct wired_cmd_close_session_in session_close_in = {};
struct wired_cmd_close_session_out session_close_out = {};
+ struct intel_display *display;
struct drm_i915_private *i915;
ssize_t byte;
if (!dev || !data)
return -EINVAL;
- i915 = kdev_to_i915(dev);
- if (!i915) {
+ display = to_intel_display(dev);
+ if (!display) {
dev_err(dev, "DRM not initialized, aborting HDCP.\n");
return -ENODEV;
}
+ i915 = to_i915(display->drm);
session_close_in.header.api_version = HDCP_API_VERSION;
session_close_in.header.command_id = WIRED_CLOSE_SESSION;
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 19498ee455fa..cd9ee171e0df 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -60,30 +60,25 @@
#include "intel_panel.h"
#include "intel_snps_phy.h"
-inline struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi)
-{
- return to_i915(hdmi_to_dig_port(intel_hdmi)->base.base.dev);
-}
-
static void
assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
{
- struct drm_i915_private *dev_priv = intel_hdmi_to_i915(intel_hdmi);
+ struct intel_display *display = to_intel_display(intel_hdmi);
u32 enabled_bits;
- enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
+ enabled_bits = HAS_DDI(display) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
- drm_WARN(&dev_priv->drm,
- intel_de_read(dev_priv, intel_hdmi->hdmi_reg) & enabled_bits,
+ drm_WARN(display->drm,
+ intel_de_read(display, intel_hdmi->hdmi_reg) & enabled_bits,
"HDMI port enabled, expecting disabled\n");
}
static void
-assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
+assert_hdmi_transcoder_func_disabled(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- drm_WARN(&dev_priv->drm,
- intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dev_priv, cpu_transcoder)) &
+ drm_WARN(display->drm,
+ intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) &
TRANS_DDI_FUNC_ENABLE,
"HDMI transcoder function enabled, expecting disabled\n");
}
@@ -158,35 +153,35 @@ static u32 hsw_infoframe_enable(unsigned int type)
}
static i915_reg_t
-hsw_dip_data_reg(struct drm_i915_private *dev_priv,
+hsw_dip_data_reg(struct intel_display *display,
enum transcoder cpu_transcoder,
unsigned int type,
int i)
{
switch (type) {
case HDMI_PACKET_TYPE_GAMUT_METADATA:
- return HSW_TVIDEO_DIP_GMP_DATA(dev_priv, cpu_transcoder, i);
+ return HSW_TVIDEO_DIP_GMP_DATA(display, cpu_transcoder, i);
case DP_SDP_VSC:
- return HSW_TVIDEO_DIP_VSC_DATA(dev_priv, cpu_transcoder, i);
+ return HSW_TVIDEO_DIP_VSC_DATA(display, cpu_transcoder, i);
case DP_SDP_ADAPTIVE_SYNC:
- return ADL_TVIDEO_DIP_AS_SDP_DATA(dev_priv, cpu_transcoder, i);
+ return ADL_TVIDEO_DIP_AS_SDP_DATA(display, cpu_transcoder, i);
case DP_SDP_PPS:
- return ICL_VIDEO_DIP_PPS_DATA(dev_priv, cpu_transcoder, i);
+ return ICL_VIDEO_DIP_PPS_DATA(display, cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_AVI:
- return HSW_TVIDEO_DIP_AVI_DATA(dev_priv, cpu_transcoder, i);
+ return HSW_TVIDEO_DIP_AVI_DATA(display, cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_SPD:
- return HSW_TVIDEO_DIP_SPD_DATA(dev_priv, cpu_transcoder, i);
+ return HSW_TVIDEO_DIP_SPD_DATA(display, cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_VENDOR:
- return HSW_TVIDEO_DIP_VS_DATA(dev_priv, cpu_transcoder, i);
+ return HSW_TVIDEO_DIP_VS_DATA(display, cpu_transcoder, i);
case HDMI_INFOFRAME_TYPE_DRM:
- return GLK_TVIDEO_DIP_DRM_DATA(dev_priv, cpu_transcoder, i);
+ return GLK_TVIDEO_DIP_DRM_DATA(display, cpu_transcoder, i);
default:
MISSING_CASE(type);
return INVALID_MMIO_REG;
}
}
-static int hsw_dip_data_size(struct drm_i915_private *dev_priv,
+static int hsw_dip_data_size(struct intel_display *display,
unsigned int type)
{
switch (type) {
@@ -197,7 +192,7 @@ static int hsw_dip_data_size(struct drm_i915_private *dev_priv,
case DP_SDP_PPS:
return VIDEO_DIP_PPS_DATA_SIZE;
case HDMI_PACKET_TYPE_GAMUT_METADATA:
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
return VIDEO_DIP_GMP_DATA_SIZE;
else
return VIDEO_DIP_DATA_SIZE;
@@ -211,12 +206,12 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
+ struct intel_display *display = to_intel_display(encoder);
const u32 *data = frame;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
+ u32 val = intel_de_read(display, VIDEO_DIP_CTL);
int i;
- drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ drm_WARN(display->drm, !(val & VIDEO_DIP_ENABLE),
"Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
@@ -224,22 +219,22 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
val &= ~g4x_infoframe_enable(type);
- intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
+ intel_de_write(display, VIDEO_DIP_CTL, val);
for (i = 0; i < len; i += 4) {
- intel_de_write(dev_priv, VIDEO_DIP_DATA, *data);
+ intel_de_write(display, VIDEO_DIP_DATA, *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- intel_de_write(dev_priv, VIDEO_DIP_DATA, 0);
+ intel_de_write(display, VIDEO_DIP_DATA, 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
- intel_de_posting_read(dev_priv, VIDEO_DIP_CTL);
+ intel_de_write(display, VIDEO_DIP_CTL, val);
+ intel_de_posting_read(display, VIDEO_DIP_CTL);
}
static void g4x_read_infoframe(struct intel_encoder *encoder,
@@ -247,22 +242,22 @@ static void g4x_read_infoframe(struct intel_encoder *encoder,
unsigned int type,
void *frame, ssize_t len)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
u32 *data = frame;
int i;
- intel_de_rmw(dev_priv, VIDEO_DIP_CTL,
+ intel_de_rmw(display, VIDEO_DIP_CTL,
VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
- *data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA);
+ *data++ = intel_de_read(display, VIDEO_DIP_DATA);
}
static u32 g4x_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
+ struct intel_display *display = to_intel_display(encoder);
+ u32 val = intel_de_read(display, VIDEO_DIP_CTL);
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -279,14 +274,14 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
+ struct intel_display *display = to_intel_display(encoder);
const u32 *data = frame;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
int i;
- drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ drm_WARN(display->drm, !(val & VIDEO_DIP_ENABLE),
"Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
@@ -294,23 +289,23 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
val &= ~g4x_infoframe_enable(type);
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
for (i = 0; i < len; i += 4) {
- intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe),
+ intel_de_write(display, TVIDEO_DIP_DATA(crtc->pipe),
*data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0);
+ intel_de_write(display, TVIDEO_DIP_DATA(crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
}
static void ibx_read_infoframe(struct intel_encoder *encoder,
@@ -318,25 +313,25 @@ static void ibx_read_infoframe(struct intel_encoder *encoder,
unsigned int type,
void *frame, ssize_t len)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 *data = frame;
int i;
- intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe),
+ intel_de_rmw(display, TVIDEO_DIP_CTL(crtc->pipe),
VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
- *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
+ *data++ = intel_de_read(display, TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -354,14 +349,14 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
+ struct intel_display *display = to_intel_display(encoder);
const u32 *data = frame;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
int i;
- drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ drm_WARN(display->drm, !(val & VIDEO_DIP_ENABLE),
"Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
@@ -372,23 +367,23 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
if (type != HDMI_INFOFRAME_TYPE_AVI)
val &= ~g4x_infoframe_enable(type);
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
for (i = 0; i < len; i += 4) {
- intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe),
+ intel_de_write(display, TVIDEO_DIP_DATA(crtc->pipe),
*data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- intel_de_write(dev_priv, TVIDEO_DIP_DATA(crtc->pipe), 0);
+ intel_de_write(display, TVIDEO_DIP_DATA(crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
}
static void cpt_read_infoframe(struct intel_encoder *encoder,
@@ -396,24 +391,24 @@ static void cpt_read_infoframe(struct intel_encoder *encoder,
unsigned int type,
void *frame, ssize_t len)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 *data = frame;
int i;
- intel_de_rmw(dev_priv, TVIDEO_DIP_CTL(crtc->pipe),
+ intel_de_rmw(display, TVIDEO_DIP_CTL(crtc->pipe),
VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
- *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
+ *data++ = intel_de_read(display, TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
- u32 val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(pipe));
+ u32 val = intel_de_read(display, TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -428,14 +423,14 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
+ struct intel_display *display = to_intel_display(encoder);
const u32 *data = frame;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
int i;
- drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ drm_WARN(display->drm, !(val & VIDEO_DIP_ENABLE),
"Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
@@ -443,24 +438,24 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
val &= ~g4x_infoframe_enable(type);
- intel_de_write(dev_priv, reg, val);
+ intel_de_write(display, reg, val);
for (i = 0; i < len; i += 4) {
- intel_de_write(dev_priv,
+ intel_de_write(display,
VLV_TVIDEO_DIP_DATA(crtc->pipe), *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- intel_de_write(dev_priv,
+ intel_de_write(display,
VLV_TVIDEO_DIP_DATA(crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
}
static void vlv_read_infoframe(struct intel_encoder *encoder,
@@ -468,25 +463,25 @@ static void vlv_read_infoframe(struct intel_encoder *encoder,
unsigned int type,
void *frame, ssize_t len)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
u32 *data = frame;
int i;
- intel_de_rmw(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe),
+ intel_de_rmw(display, VLV_TVIDEO_DIP_CTL(crtc->pipe),
VIDEO_DIP_SELECT_MASK | 0xf, g4x_infoframe_index(type));
for (i = 0; i < len; i += 4)
- *data++ = intel_de_read(dev_priv,
+ *data++ = intel_de_read(display,
VLV_TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
- u32 val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(pipe));
+ u32 val = intel_de_read(display, VLV_TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -504,75 +499,75 @@ void hsw_write_infoframe(struct intel_encoder *encoder,
unsigned int type,
const void *frame, ssize_t len)
{
+ struct intel_display *display = to_intel_display(encoder);
const u32 *data = frame;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(dev_priv, cpu_transcoder);
+ i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(display, cpu_transcoder);
int data_size;
int i;
- u32 val = intel_de_read(dev_priv, ctl_reg);
+ u32 val = intel_de_read(display, ctl_reg);
- data_size = hsw_dip_data_size(dev_priv, type);
+ data_size = hsw_dip_data_size(display, type);
- drm_WARN_ON(&dev_priv->drm, len > data_size);
+ drm_WARN_ON(display->drm, len > data_size);
val &= ~hsw_infoframe_enable(type);
- intel_de_write(dev_priv, ctl_reg, val);
+ intel_de_write(display, ctl_reg, val);
for (i = 0; i < len; i += 4) {
- intel_de_write(dev_priv,
- hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2),
+ intel_de_write(display,
+ hsw_dip_data_reg(display, cpu_transcoder, type, i >> 2),
*data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < data_size; i += 4)
- intel_de_write(dev_priv,
- hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2),
+ intel_de_write(display,
+ hsw_dip_data_reg(display, cpu_transcoder, type, i >> 2),
0);
/* Wa_14013475917 */
- if (!(IS_DISPLAY_VER(dev_priv, 13, 14) && crtc_state->has_psr &&
+ if (!(IS_DISPLAY_VER(display, 13, 14) && crtc_state->has_psr &&
!crtc_state->has_panel_replay && type == DP_SDP_VSC))
val |= hsw_infoframe_enable(type);
if (type == DP_SDP_VSC)
val |= VSC_DIP_HW_DATA_SW_HEA;
- intel_de_write(dev_priv, ctl_reg, val);
- intel_de_posting_read(dev_priv, ctl_reg);
+ intel_de_write(display, ctl_reg, val);
+ intel_de_posting_read(display, ctl_reg);
}
void hsw_read_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
unsigned int type, void *frame, ssize_t len)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 *data = frame;
int i;
for (i = 0; i < len; i += 4)
- *data++ = intel_de_read(dev_priv,
- hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2));
+ *data++ = intel_de_read(display,
+ hsw_dip_data_reg(display, cpu_transcoder, type, i >> 2));
}
static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = intel_de_read(dev_priv,
- HSW_TVIDEO_DIP_CTL(dev_priv, pipe_config->cpu_transcoder));
+ struct intel_display *display = to_intel_display(encoder);
+ u32 val = intel_de_read(display,
+ HSW_TVIDEO_DIP_CTL(display, pipe_config->cpu_transcoder));
u32 mask;
mask = (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
VIDEO_DIP_ENABLE_GCP_HSW | VIDEO_DIP_ENABLE_VS_HSW |
VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
- if (DISPLAY_VER(dev_priv) >= 10)
+ if (DISPLAY_VER(display) >= 10)
mask |= VIDEO_DIP_ENABLE_DRM_GLK;
- if (HAS_AS_SDP(dev_priv))
+ if (HAS_AS_SDP(display))
mask |= VIDEO_DIP_ENABLE_AS_ADL;
return val & mask;
@@ -604,7 +599,7 @@ u32 intel_hdmi_infoframe_enable(unsigned int type)
u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
u32 val, ret = 0;
int i;
@@ -615,7 +610,7 @@ u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) {
unsigned int type = infoframe_type_to_idx[i];
- if (HAS_DDI(dev_priv)) {
+ if (HAS_DDI(display)) {
if (val & hsw_infoframe_enable(type))
ret |= BIT(i);
} else {
@@ -830,11 +825,11 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct hdmi_drm_infoframe *frame = &crtc_state->infoframes.drm.drm;
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int ret;
- if (DISPLAY_VER(dev_priv) < 10)
+ if (DISPLAY_VER(display) < 10)
return true;
if (!crtc_state->has_infoframe)
@@ -848,13 +843,13 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
ret = drm_hdmi_infoframe_set_hdr_metadata(frame, conn_state);
if (ret < 0) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"couldn't set HDR metadata in infoframe\n");
return false;
}
ret = hdmi_drm_infoframe_check(frame);
- if (drm_WARN_ON(&dev_priv->drm, ret))
+ if (drm_WARN_ON(display->drm, ret))
return false;
return true;
@@ -865,11 +860,11 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -889,21 +884,21 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
if (!(val & VIDEO_DIP_ENABLE))
return;
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"video DIP still enabled on port %c\n",
(val & VIDEO_DIP_PORT_MASK) >> 29);
return;
}
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
if (val & VIDEO_DIP_ENABLE) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"video DIP already enabled on port %c\n",
(val & VIDEO_DIP_PORT_MASK) >> 29);
return;
@@ -916,8 +911,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -977,6 +972,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
@@ -985,8 +981,8 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
return false;
- if (HAS_DDI(dev_priv))
- reg = HSW_TVIDEO_DIP_GCP(dev_priv, crtc_state->cpu_transcoder);
+ if (HAS_DDI(display))
+ reg = HSW_TVIDEO_DIP_GCP(display, crtc_state->cpu_transcoder);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
else if (HAS_PCH_SPLIT(dev_priv))
@@ -994,7 +990,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
else
return false;
- intel_de_write(dev_priv, reg, crtc_state->infoframes.gcp);
+ intel_de_write(display, reg, crtc_state->infoframes.gcp);
return true;
}
@@ -1002,6 +998,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg;
@@ -1010,8 +1007,8 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
return;
- if (HAS_DDI(dev_priv))
- reg = HSW_TVIDEO_DIP_GCP(dev_priv, crtc_state->cpu_transcoder);
+ if (HAS_DDI(display))
+ reg = HSW_TVIDEO_DIP_GCP(display, crtc_state->cpu_transcoder);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
else if (HAS_PCH_SPLIT(dev_priv))
@@ -1019,7 +1016,7 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
else
return;
- crtc_state->infoframes.gcp = intel_de_read(dev_priv, reg);
+ crtc_state->infoframes.gcp = intel_de_read(display, reg);
}
static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
@@ -1049,12 +1046,12 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1068,13 +1065,13 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE,
+ drm_WARN(display->drm, val & VIDEO_DIP_ENABLE,
"DIP already enabled on port %c\n",
(val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
@@ -1089,8 +1086,8 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1108,11 +1105,11 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1125,8 +1122,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
return;
}
@@ -1138,8 +1135,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1157,11 +1154,11 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(crtc->pipe);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1175,13 +1172,13 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE,
+ drm_WARN(display->drm, val & VIDEO_DIP_ENABLE,
"DIP already enabled on port %c\n",
(val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
@@ -1196,8 +1193,8 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1215,12 +1212,12 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- i915_reg_t reg = HSW_TVIDEO_DIP_CTL(dev_priv,
+ struct intel_display *display = to_intel_display(encoder);
+ i915_reg_t reg = HSW_TVIDEO_DIP_CTL(display,
crtc_state->cpu_transcoder);
- u32 val = intel_de_read(dev_priv, reg);
+ u32 val = intel_de_read(display, reg);
- assert_hdmi_transcoder_func_disabled(dev_priv,
+ assert_hdmi_transcoder_func_disabled(display,
crtc_state->cpu_transcoder);
val &= ~(VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
@@ -1229,16 +1226,16 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
VIDEO_DIP_ENABLE_DRM_GLK | VIDEO_DIP_ENABLE_AS_ADL);
if (!enable) {
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
return;
}
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP_HSW;
- intel_de_write(dev_priv, reg, val);
- intel_de_posting_read(dev_priv, reg);
+ intel_de_write(display, reg, val);
+ intel_de_posting_read(display, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1256,16 +1253,16 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
{
- struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
+ struct intel_display *display = to_intel_display(hdmi);
struct i2c_adapter *ddc = hdmi->attached_connector->base.ddc;
if (hdmi->dp_dual_mode.type < DRM_DP_DUAL_MODE_TYPE2_DVI)
return;
- drm_dbg_kms(&dev_priv->drm, "%s DP dual mode adaptor TMDS output\n",
+ drm_dbg_kms(display->drm, "%s DP dual mode adaptor TMDS output\n",
enable ? "Enabling" : "Disabling");
- drm_dp_dual_mode_set_tmds_output(&dev_priv->drm,
+ drm_dp_dual_mode_set_tmds_output(display->drm,
hdmi->dp_dual_mode.type, ddc, enable);
}
@@ -1331,7 +1328,7 @@ static
int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
u8 *an)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_hdmi *hdmi = &dig_port->hdmi;
struct i2c_adapter *ddc = hdmi->attached_connector->base.ddc;
int ret;
@@ -1339,14 +1336,14 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
ret = intel_hdmi_hdcp_write(dig_port, DRM_HDCP_DDC_AN, an,
DRM_HDCP_AN_LEN);
if (ret) {
- drm_dbg_kms(&i915->drm, "Write An over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm, "Write An over DDC failed (%d)\n",
ret);
return ret;
}
ret = intel_gmbus_output_aksv(ddc);
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "Failed to output aksv (%d)\n", ret);
+ drm_dbg_kms(display->drm, "Failed to output aksv (%d)\n", ret);
return ret;
}
return 0;
@@ -1355,13 +1352,13 @@ int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *dig_port,
static int intel_hdmi_hdcp_read_bksv(struct intel_digital_port *dig_port,
u8 *bksv)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BKSV, bksv,
DRM_HDCP_KSV_LEN);
if (ret)
- drm_dbg_kms(&i915->drm, "Read Bksv over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm, "Read Bksv over DDC failed (%d)\n",
ret);
return ret;
}
@@ -1370,13 +1367,14 @@ static
int intel_hdmi_hdcp_read_bstatus(struct intel_digital_port *dig_port,
u8 *bstatus)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BSTATUS,
bstatus, DRM_HDCP_BSTATUS_LEN);
if (ret)
- drm_dbg_kms(&i915->drm, "Read bstatus over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm,
+ "Read bstatus over DDC failed (%d)\n",
ret);
return ret;
}
@@ -1385,13 +1383,13 @@ static
int intel_hdmi_hdcp_repeater_present(struct intel_digital_port *dig_port,
bool *repeater_present)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
u8 val;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm, "Read bcaps over DDC failed (%d)\n",
ret);
return ret;
}
@@ -1403,13 +1401,13 @@ static
int intel_hdmi_hdcp_read_ri_prime(struct intel_digital_port *dig_port,
u8 *ri_prime)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_RI_PRIME,
ri_prime, DRM_HDCP_RI_LEN);
if (ret)
- drm_dbg_kms(&i915->drm, "Read Ri' over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm, "Read Ri' over DDC failed (%d)\n",
ret);
return ret;
}
@@ -1418,13 +1416,13 @@ static
int intel_hdmi_hdcp_read_ksv_ready(struct intel_digital_port *dig_port,
bool *ksv_ready)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
u8 val;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_BCAPS, &val, 1);
if (ret) {
- drm_dbg_kms(&i915->drm, "Read bcaps over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm, "Read bcaps over DDC failed (%d)\n",
ret);
return ret;
}
@@ -1436,12 +1434,12 @@ static
int intel_hdmi_hdcp_read_ksv_fifo(struct intel_digital_port *dig_port,
int num_downstream, u8 *ksv_fifo)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_KSV_FIFO,
ksv_fifo, num_downstream * DRM_HDCP_KSV_LEN);
if (ret) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Read ksv fifo over DDC failed (%d)\n", ret);
return ret;
}
@@ -1452,7 +1450,7 @@ static
int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
int i, u32 *part)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int ret;
if (i >= DRM_HDCP_V_PRIME_NUM_PARTS)
@@ -1461,7 +1459,8 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
ret = intel_hdmi_hdcp_read(dig_port, DRM_HDCP_DDC_V_PRIME(i),
part, DRM_HDCP_V_PRIME_PART_LEN);
if (ret)
- drm_dbg_kms(&i915->drm, "Read V'[%d] over DDC failed (%d)\n",
+ drm_dbg_kms(display->drm,
+ "Read V'[%d] over DDC failed (%d)\n",
i, ret);
return ret;
}
@@ -1469,15 +1468,15 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *dig_port,
static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
enum transcoder cpu_transcoder)
{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
struct intel_crtc *crtc = to_intel_crtc(connector->base.state->crtc);
u32 scanline;
int ret;
for (;;) {
- scanline = intel_de_read(dev_priv,
- PIPEDSL(dev_priv, crtc->pipe));
+ scanline = intel_de_read(display,
+ PIPEDSL(display, crtc->pipe));
if (scanline > 100 && scanline < 200)
break;
usleep_range(25, 50);
@@ -1486,7 +1485,7 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
false, TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Disable HDCP signalling failed (%d)\n", ret);
return ret;
}
@@ -1494,7 +1493,7 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector,
ret = intel_ddi_toggle_hdcp_bits(&dig_port->base, cpu_transcoder,
true, TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Enable HDCP signalling failed (%d)\n", ret);
return ret;
}
@@ -1507,6 +1506,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
enum transcoder cpu_transcoder,
bool enable)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct intel_hdmi *hdmi = &dig_port->hdmi;
struct intel_connector *connector = hdmi->attached_connector;
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1519,7 +1519,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *dig_port,
cpu_transcoder, enable,
TRANS_DDI_HDCP_SIGNALLING);
if (ret) {
- drm_err(&dev_priv->drm, "%s HDCP signalling failed (%d)\n",
+ drm_err(display->drm, "%s HDCP signalling failed (%d)\n",
enable ? "Enable" : "Disable", ret);
return ret;
}
@@ -1539,6 +1539,7 @@ static
bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
@@ -1558,9 +1559,9 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port,
if (wait_for((intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) ==
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
- drm_dbg_kms(&i915->drm, "Ri' mismatch detected (%x)\n",
- intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
- port)));
+ drm_dbg_kms(display->drm, "Ri' mismatch detected (%x)\n",
+ intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder,
+ port)));
return false;
}
return true;
@@ -1570,14 +1571,14 @@ static
bool intel_hdmi_hdcp_check_link(struct intel_digital_port *dig_port,
struct intel_connector *connector)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
int retry;
for (retry = 0; retry < 3; retry++)
if (intel_hdmi_hdcp_check_link_once(dig_port, connector))
return true;
- drm_err(&i915->drm, "Link check failed\n");
+ drm_err(display->drm, "Link check failed\n");
return false;
}
@@ -1628,13 +1629,13 @@ hdcp2_detect_msg_availability(struct intel_digital_port *dig_port,
u8 msg_id, bool *msg_ready,
ssize_t *msg_sz)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
int ret;
ret = intel_hdmi_hdcp2_read_rx_status(dig_port, rx_status);
if (ret < 0) {
- drm_dbg_kms(&i915->drm, "rx_status read failed. Err %d\n",
+ drm_dbg_kms(display->drm, "rx_status read failed. Err %d\n",
ret);
return ret;
}
@@ -1655,7 +1656,7 @@ static ssize_t
intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
u8 msg_id, bool paired)
{
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
+ struct intel_display *display = to_intel_display(dig_port);
bool msg_ready = false;
int timeout, ret;
ssize_t msg_sz = 0;
@@ -1670,7 +1671,8 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port,
!ret && msg_ready && msg_sz, timeout * 1000,
1000, 5 * 1000);
if (ret)
- drm_dbg_kms(&i915->drm, "msg_id: %d, ret: %d, timeout: %d\n",
+ drm_dbg_kms(display->drm,
+ "msg_id: %d, ret: %d, timeout: %d\n",
msg_id, ret, timeout);
return ret ? ret : msg_sz;
@@ -1691,8 +1693,8 @@ static
int intel_hdmi_hdcp2_read_msg(struct intel_connector *connector,
u8 msg_id, void *buf, size_t size)
{
+ struct intel_display *display = to_intel_display(connector);
struct intel_digital_port *dig_port = intel_attached_dig_port(connector);
- struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev);
struct intel_hdmi *hdmi = &dig_port->hdmi;
struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
unsigned int offset;
@@ -1708,7 +1710,7 @@ int intel_hdmi_hdcp2_read_msg(struct intel_connector *connector,
* available buffer.
*/
if (ret > size) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"msg_sz(%zd) is more than exp size(%zu)\n",
ret, size);
return -EINVAL;
@@ -1717,7 +1719,7 @@ int intel_hdmi_hdcp2_read_msg(struct intel_connector *connector,
offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
ret = intel_hdmi_hdcp_read(dig_port, offset, buf, ret);
if (ret)
- drm_dbg_kms(&i915->drm, "Failed to read msg_id: %d(%zd)\n",
+ drm_dbg_kms(display->drm, "Failed to read msg_id: %d(%zd)\n",
msg_id, ret);
return ret;
@@ -1783,16 +1785,17 @@ static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int max_tmds_clock, vbt_max_tmds_clock;
- if (DISPLAY_VER(dev_priv) >= 13 || IS_ALDERLAKE_S(dev_priv))
+ if (DISPLAY_VER(display) >= 13 || IS_ALDERLAKE_S(dev_priv))
max_tmds_clock = 600000;
- else if (DISPLAY_VER(dev_priv) >= 10)
+ else if (DISPLAY_VER(display) >= 10)
max_tmds_clock = 594000;
- else if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv))
+ else if (DISPLAY_VER(display) >= 8 || IS_HASWELL(dev_priv))
max_tmds_clock = 300000;
- else if (DISPLAY_VER(dev_priv) >= 5)
+ else if (DISPLAY_VER(display) >= 5)
max_tmds_clock = 225000;
else
max_tmds_clock = 165000;
@@ -1848,7 +1851,8 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
int clock, bool respect_downstream_limits,
bool has_hdmi_sink)
{
- struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
+ struct intel_display *display = to_intel_display(hdmi);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
if (clock < 25000)
@@ -1885,7 +1889,7 @@ hdmi_port_clock_valid(struct intel_hdmi *hdmi,
* FIXME: We will hopefully get an algorithmic way of programming
* the MPLLB for HDMI in the future.
*/
- if (DISPLAY_VER(dev_priv) >= 14)
+ if (DISPLAY_VER(display) >= 14)
return intel_cx0_phy_check_hdmi_link_rate(hdmi, clock);
else if (IS_DG2(dev_priv))
return intel_snps_phy_check_hdmi_link_rate(clock);
@@ -1908,13 +1912,13 @@ int intel_hdmi_tmds_clock(int clock, int bpc,
return DIV_ROUND_CLOSEST(clock * bpc, 8);
}
-static bool intel_hdmi_source_bpc_possible(struct drm_i915_private *i915, int bpc)
+static bool intel_hdmi_source_bpc_possible(struct intel_display *display, int bpc)
{
switch (bpc) {
case 12:
- return !HAS_GMCH(i915);
+ return !HAS_GMCH(display);
case 10:
- return DISPLAY_VER(i915) >= 11;
+ return DISPLAY_VER(display) >= 11;
case 8:
return true;
default:
@@ -1960,7 +1964,7 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
bool has_hdmi_sink,
enum intel_output_format sink_format)
{
- struct drm_i915_private *i915 = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
enum drm_mode_status status = MODE_OK;
int bpc;
@@ -1973,7 +1977,7 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
for (bpc = 12; bpc >= 8; bpc -= 2) {
int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, sink_format);
- if (!intel_hdmi_source_bpc_possible(i915, bpc))
+ if (!intel_hdmi_source_bpc_possible(display, bpc))
continue;
if (!intel_hdmi_sink_bpc_possible(connector, bpc, has_hdmi_sink, sink_format))
@@ -1985,7 +1989,7 @@ intel_hdmi_mode_clock_valid(struct drm_connector *connector, int clock,
}
/* can never happen */
- drm_WARN_ON(&i915->drm, status == MODE_OK);
+ drm_WARN_ON(display->drm, status == MODE_OK);
return status;
}
@@ -1994,8 +1998,9 @@ static enum drm_mode_status
intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
- struct drm_i915_private *dev_priv = intel_hdmi_to_i915(hdmi);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum drm_mode_status status;
int clock = mode->clock;
int max_dotclk = to_i915(connector->dev)->display.cdclk.max_dotclk_freq;
@@ -2073,17 +2078,16 @@ bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state,
static bool hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, int bpc)
{
- struct drm_i915_private *dev_priv =
- to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- if (!intel_hdmi_source_bpc_possible(dev_priv, bpc))
+ if (!intel_hdmi_source_bpc_possible(display, bpc))
return false;
/* Display Wa_1405510057:icl,ehl */
if (intel_hdmi_is_ycbcr420(crtc_state) &&
- bpc == 10 && DISPLAY_VER(dev_priv) == 11 &&
+ bpc == 10 && DISPLAY_VER(display) == 11 &&
(adjusted_mode->crtc_hblank_end -
adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
@@ -2130,7 +2134,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
bool respect_downstream_limits)
{
- struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int bpc, clock = adjusted_mode->crtc_clock;
@@ -2153,7 +2157,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
*/
crtc_state->pipe_bpp = min(crtc_state->pipe_bpp, bpc * 3);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"picking %d bpc for HDMI output (pipe bpp: %d)\n",
bpc, crtc_state->pipe_bpp);
@@ -2230,10 +2234,10 @@ static int intel_hdmi_compute_output_format(struct intel_encoder *encoder,
const struct drm_connector_state *conn_state,
bool respect_downstream_limits)
{
+ struct intel_display *display = to_intel_display(encoder);
struct intel_connector *connector = to_intel_connector(conn_state->connector);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
const struct drm_display_info *info = &connector->base.display_info;
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
bool ycbcr_420_only = drm_mode_is_420_only(info, adjusted_mode);
int ret;
@@ -2241,7 +2245,7 @@ static int intel_hdmi_compute_output_format(struct intel_encoder *encoder,
intel_hdmi_sink_format(crtc_state, connector, ycbcr_420_only);
if (ycbcr_420_only && crtc_state->sink_format != INTEL_OUTPUT_FORMAT_YCBCR420) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"YCbCr 4:2:0 mode but YCbCr 4:2:0 output not possible. Falling back to RGB.\n");
crtc_state->sink_format = INTEL_OUTPUT_FORMAT_RGB;
}
@@ -2302,7 +2306,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
struct drm_connector *connector = conn_state->connector;
struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
@@ -2335,7 +2339,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
if (ret)
ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state, false);
if (ret) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"unsupported HDMI clock (%d kHz), rejecting mode\n",
pipe_config->hw.adjusted_mode.crtc_clock);
return ret;
@@ -2370,22 +2374,22 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
conn_state);
if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) {
- drm_dbg_kms(&dev_priv->drm, "bad AVI infoframe\n");
+ drm_dbg_kms(display->drm, "bad AVI infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) {
- drm_dbg_kms(&dev_priv->drm, "bad SPD infoframe\n");
+ drm_dbg_kms(display->drm, "bad SPD infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) {
- drm_dbg_kms(&dev_priv->drm, "bad HDMI infoframe\n");
+ drm_dbg_kms(display->drm, "bad HDMI infoframe\n");
return -EINVAL;
}
if (!intel_hdmi_compute_drm_infoframe(encoder, pipe_config, conn_state)) {
- drm_dbg_kms(&dev_priv->drm, "bad DRM infoframe\n");
+ drm_dbg_kms(display->drm, "bad DRM infoframe\n");
return -EINVAL;
}
@@ -2418,13 +2422,14 @@ intel_hdmi_unset_edid(struct drm_connector *connector)
static void
intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *hdmi = intel_attached_hdmi(to_intel_connector(connector));
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
struct i2c_adapter *ddc = connector->ddc;
enum drm_dp_dual_mode_type type;
- type = drm_dp_dual_mode_detect(&dev_priv->drm, ddc);
+ type = drm_dp_dual_mode_detect(display->drm, ddc);
/*
* Type 1 DVI adaptors are not required to implement any
@@ -2438,7 +2443,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
if (type == DRM_DP_DUAL_MODE_UNKNOWN) {
if (!connector->force &&
intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Assuming DP dual mode adaptor presence based on VBT\n");
type = DRM_DP_DUAL_MODE_TYPE1_DVI;
} else {
@@ -2451,17 +2456,17 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
hdmi->dp_dual_mode.type = type;
hdmi->dp_dual_mode.max_tmds_clock =
- drm_dp_dual_mode_max_tmds_clock(&dev_priv->drm, type, ddc);
+ drm_dp_dual_mode_max_tmds_clock(display->drm, type, ddc);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n",
drm_dp_get_dual_mode_type_name(type),
hdmi->dp_dual_mode.max_tmds_clock);
/* Older VBTs are often buggy and can't be trusted :( Play it safe. */
- if ((DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) &&
+ if ((DISPLAY_VER(display) >= 8 || IS_HASWELL(dev_priv)) &&
!intel_bios_encoder_supports_dp_dual_mode(encoder->devdata)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Ignoring DP dual mode adaptor max TMDS clock for native HDMI port\n");
hdmi->dp_dual_mode.max_tmds_clock = 0;
}
@@ -2470,6 +2475,7 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector)
static bool
intel_hdmi_set_edid(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
struct i2c_adapter *ddc = connector->ddc;
@@ -2482,7 +2488,7 @@ intel_hdmi_set_edid(struct drm_connector *connector)
drm_edid = drm_edid_read_ddc(connector, ddc);
if (!drm_edid && !intel_gmbus_is_forced_bit(ddc)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"HDMI GMBUS EDID read failed, retry using GPIO bit-banging\n");
intel_gmbus_force_bit(ddc, true);
drm_edid = drm_edid_read_ddc(connector, ddc);
@@ -2511,13 +2517,14 @@ intel_hdmi_set_edid(struct drm_connector *connector)
static enum drm_connector_status
intel_hdmi_detect(struct drm_connector *connector, bool force)
{
+ struct intel_display *display = to_intel_display(connector->dev);
enum drm_connector_status status = connector_status_disconnected;
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct intel_hdmi *intel_hdmi = intel_attached_hdmi(to_intel_connector(connector));
struct intel_encoder *encoder = &hdmi_to_dig_port(intel_hdmi)->base;
intel_wakeref_t wakeref;
- drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
if (!intel_display_device_enabled(dev_priv))
@@ -2528,7 +2535,7 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
- if (DISPLAY_VER(dev_priv) >= 11 &&
+ if (DISPLAY_VER(display) >= 11 &&
!intel_digital_port_connected(encoder))
goto out;
@@ -2549,9 +2556,10 @@ out:
static void
intel_hdmi_force(struct drm_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector->dev);
struct drm_i915_private *i915 = to_i915(connector->dev);
- drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ drm_dbg_kms(display->drm, "[CONNECTOR:%d:%s]\n",
connector->base.id, connector->name);
if (!intel_display_driver_check_access(i915))
@@ -2608,9 +2616,9 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
static int intel_hdmi_connector_atomic_check(struct drm_connector *connector,
struct drm_atomic_state *state)
{
- struct drm_i915_private *i915 = to_i915(state->dev);
+ struct intel_display *display = to_intel_display(connector->dev);
- if (HAS_DDI(i915))
+ if (HAS_DDI(display))
return intel_digital_connector_atomic_check(connector, state);
else
return g4x_hdmi_connector_atomic_check(connector, state);
@@ -2625,7 +2633,7 @@ static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs
static void
intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_display *display = to_intel_display(intel_hdmi);
intel_attach_force_audio_property(connector);
intel_attach_broadcast_rgb_property(connector);
@@ -2634,10 +2642,10 @@ intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *c
intel_attach_hdmi_colorspace_property(connector);
drm_connector_attach_content_type_property(connector);
- if (DISPLAY_VER(dev_priv) >= 10)
+ if (DISPLAY_VER(display) >= 10)
drm_connector_attach_hdr_output_metadata_property(connector);
- if (!HAS_GMCH(dev_priv))
+ if (!HAS_GMCH(display))
drm_connector_attach_max_bpc_property(connector, 8, 12);
}
@@ -2664,14 +2672,14 @@ bool intel_hdmi_handle_sink_scrambling(struct intel_encoder *encoder,
bool high_tmds_clock_ratio,
bool scrambling)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct drm_scrambling *sink_scrambling =
&connector->display_info.hdmi.scdc.scrambling;
if (!sink_scrambling->supported)
return true;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[CONNECTOR:%d:%s] scrambling=%s, TMDS bit clock ratio=1/%d\n",
connector->base.id, connector->name,
str_yes_no(scrambling), high_tmds_clock_ratio ? 40 : 10);
@@ -2752,7 +2760,7 @@ static u8 cnp_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 icl_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
enum port port = encoder->port;
if (intel_encoder_is_combo(encoder))
@@ -2760,7 +2768,7 @@ static u8 icl_encoder_to_ddc_pin(struct intel_encoder *encoder)
else if (intel_encoder_is_tc(encoder))
return GMBUS_PIN_9_TC1_ICP + intel_encoder_to_tc(encoder);
- drm_WARN(&dev_priv->drm, 1, "Unknown port:%c\n", port_name(port));
+ drm_WARN(display->drm, 1, "Unknown port:%c\n", port_name(port));
return GMBUS_PIN_2_BXT;
}
@@ -2808,10 +2816,11 @@ static u8 rkl_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 gen9bc_tgp_encoder_to_ddc_pin(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
enum phy phy = intel_encoder_to_phy(encoder);
- drm_WARN_ON(&i915->drm, encoder->port == PORT_A);
+ drm_WARN_ON(display->drm, encoder->port == PORT_A);
/*
* Pin mapping for GEN9 BC depends on which PCH is present. With TGP,
@@ -2871,6 +2880,7 @@ static u8 g4x_encoder_to_ddc_pin(struct intel_encoder *encoder)
static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u8 ddc_pin;
@@ -2880,7 +2890,7 @@ static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
ddc_pin = dg1_encoder_to_ddc_pin(encoder);
else if (IS_ROCKETLAKE(dev_priv))
ddc_pin = rkl_encoder_to_ddc_pin(encoder);
- else if (DISPLAY_VER(dev_priv) == 9 && HAS_PCH_TGP(dev_priv))
+ else if (DISPLAY_VER(display) == 9 && HAS_PCH_TGP(dev_priv))
ddc_pin = gen9bc_tgp_encoder_to_ddc_pin(encoder);
else if ((IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) &&
HAS_PCH_TGP(dev_priv))
@@ -2902,10 +2912,11 @@ static u8 intel_hdmi_default_ddc_pin(struct intel_encoder *encoder)
static struct intel_encoder *
get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_encoder *other;
- for_each_intel_encoder(&i915->drm, other) {
+ for_each_intel_encoder(display->drm, other) {
struct intel_connector *connector;
if (other == encoder)
@@ -2925,6 +2936,7 @@ get_encoder_by_ddc_pin(struct intel_encoder *encoder, u8 ddc_pin)
static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
{
+ struct intel_display *display = to_intel_display(encoder);
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_encoder *other;
const char *source;
@@ -2939,20 +2951,22 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
}
if (!intel_gmbus_is_valid_pin(i915, ddc_pin)) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] Invalid DDC pin %d\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] Invalid DDC pin %d\n",
encoder->base.base.id, encoder->base.name, ddc_pin);
return 0;
}
other = get_encoder_by_ddc_pin(encoder, ddc_pin);
if (other) {
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] DDC pin %d already claimed by [ENCODER:%d:%s]\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] DDC pin %d already claimed by [ENCODER:%d:%s]\n",
encoder->base.base.id, encoder->base.name, ddc_pin,
other->base.base.id, other->base.name);
return 0;
}
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] Using DDC pin 0x%x (%s)\n",
encoder->base.base.id, encoder->base.name,
ddc_pin, source);
@@ -2962,6 +2976,7 @@ static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
void intel_infoframe_init(struct intel_digital_port *dig_port)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct drm_i915_private *dev_priv =
to_i915(dig_port->base.base.dev);
@@ -2975,7 +2990,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
dig_port->read_infoframe = g4x_read_infoframe;
dig_port->set_infoframes = g4x_set_infoframes;
dig_port->infoframes_enabled = g4x_infoframes_enabled;
- } else if (HAS_DDI(dev_priv)) {
+ } else if (HAS_DDI(display)) {
if (intel_bios_encoder_is_lspcon(dig_port->base.devdata)) {
dig_port->write_infoframe = lspcon_write_infoframe;
dig_port->read_infoframe = lspcon_read_infoframe;
@@ -3003,6 +3018,7 @@ void intel_infoframe_init(struct intel_digital_port *dig_port)
void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct intel_connector *intel_connector)
{
+ struct intel_display *display = to_intel_display(dig_port);
struct drm_connector *connector = &intel_connector->base;
struct intel_hdmi *intel_hdmi = &dig_port->hdmi;
struct intel_encoder *intel_encoder = &dig_port->base;
@@ -3012,11 +3028,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
struct cec_connector_info conn_info;
u8 ddc_pin;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Adding HDMI connector on [ENCODER:%d:%s]\n",
intel_encoder->base.base.id, intel_encoder->base.name);
- if (DISPLAY_VER(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
+ if (DISPLAY_VER(display) < 12 && drm_WARN_ON(dev, port == PORT_A))
return;
if (drm_WARN(dev, dig_port->max_lanes < 4,
@@ -3036,18 +3052,18 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
drm_connector_helper_add(connector, &intel_hdmi_connector_helper_funcs);
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
connector->interlace_allowed = true;
connector->stereo_allowed = true;
- if (DISPLAY_VER(dev_priv) >= 10)
+ if (DISPLAY_VER(display) >= 10)
connector->ycbcr_420_allowed = true;
intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_connector->base.polled = intel_connector->polled;
- if (HAS_DDI(dev_priv))
+ if (HAS_DDI(display))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
else
intel_connector->get_hw_state = intel_connector_get_hw_state;
@@ -3061,7 +3077,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
int ret = intel_hdcp_init(intel_connector, dig_port,
&intel_hdmi_hdcp_shim);
if (ret)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"HDCP init failed, skipping.\n");
}
@@ -3071,7 +3087,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *dig_port,
cec_notifier_conn_register(dev->dev, port_identifier(port),
&conn_info);
if (!intel_hdmi->cec_notifier)
- drm_dbg_kms(&dev_priv->drm, "CEC notifier get failed\n");
+ drm_dbg_kms(display->drm, "CEC notifier get failed\n");
}
/*
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index 6b39df38d57a..9b97623665c5 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -58,6 +58,5 @@ int intel_hdmi_dsc_get_num_slices(const struct intel_crtc_state *crtc_state,
int src_max_slices, int src_max_slice_width,
int hdmi_max_slices, int hdmi_throughput);
int intel_hdmi_dsc_get_slice_height(int vactive);
-struct drm_i915_private *intel_hdmi_to_i915(struct intel_hdmi *intel_hdmi);
#endif /* __INTEL_HDMI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
index a1f07ee69a86..2c4e946d5575 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c
@@ -456,6 +456,7 @@ u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status)
{
+ struct intel_display *display = &dev_priv->display;
u32 pin_mask = 0, long_mask = 0;
u32 hotplug_trigger;
@@ -477,7 +478,7 @@ void i9xx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_status)
if ((IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
hotplug_status & DP_AUX_CHANNEL_MASK_INT_STATUS_G4X)
- intel_dp_aux_irq_handler(dev_priv);
+ intel_dp_aux_irq_handler(display);
}
void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
@@ -513,6 +514,7 @@ void ibx_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 hotplug_trigger)
void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir)
{
+ struct intel_display *display = &i915->display;
enum hpd_pin pin;
u32 hotplug_trigger = iir & (XELPDP_DP_ALT_HOTPLUG_MASK | XELPDP_TBT_HOTPLUG_MASK);
u32 trigger_aux = iir & XELPDP_AUX_TC_MASK;
@@ -545,7 +547,7 @@ void xelpdp_pica_irq_handler(struct drm_i915_private *i915, u32 iir)
}
if (trigger_aux)
- intel_dp_aux_irq_handler(i915);
+ intel_dp_aux_irq_handler(display);
if (!pin_mask && !trigger_aux)
drm_err(&i915->drm,
diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c
index 0d48b9bec29c..f13ab680c2cf 100644
--- a/drivers/gpu/drm/i915/display/intel_pch_display.c
+++ b/drivers/gpu/drm/i915/display/intel_pch_display.c
@@ -358,6 +358,7 @@ void ilk_pch_pre_enable(struct intel_atomic_state *state,
void ilk_pch_enable(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -399,7 +400,7 @@ void ilk_pch_enable(struct intel_atomic_state *state,
intel_enable_shared_dpll(crtc_state);
/* set transcoder timing, panel must allow it */
- assert_pps_unlocked(dev_priv, pipe);
+ assert_pps_unlocked(display, pipe);
if (intel_crtc_has_dp_encoder(crtc_state)) {
intel_pch_transcoder_set_m1_n1(crtc, &crtc_state->dp_m_n);
intel_pch_transcoder_set_m2_n2(crtc, &crtc_state->dp_m2_n2);
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 68141af4da54..feddc30e3375 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -18,15 +18,18 @@
#include "intel_pps_regs.h"
#include "intel_quirks.h"
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+static void vlv_steal_power_sequencer(struct intel_display *display,
enum pipe pipe);
static void pps_init_delays(struct intel_dp *intel_dp);
static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd);
-static const char *pps_name(struct drm_i915_private *i915,
- struct intel_pps *pps)
+static const char *pps_name(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
+ struct intel_pps *pps = &intel_dp->pps;
+
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
switch (pps->pps_pipe) {
case INVALID_PIPE:
@@ -60,14 +63,15 @@ static const char *pps_name(struct drm_i915_private *i915,
intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
intel_wakeref_t wakeref;
/*
* See intel_pps_reset_all() why we need a power domain reference here.
*/
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_DISPLAY_CORE);
- mutex_lock(&dev_priv->display.pps.mutex);
+ mutex_lock(&display->pps.mutex);
return wakeref;
}
@@ -75,9 +79,10 @@ intel_wakeref_t intel_pps_lock(struct intel_dp *intel_dp)
intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
intel_wakeref_t wakeref)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- mutex_unlock(&dev_priv->display.pps.mutex);
+ mutex_unlock(&display->pps.mutex);
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
return 0;
@@ -86,7 +91,8 @@ intel_wakeref_t intel_pps_unlock(struct intel_dp *intel_dp,
static void
vlv_power_sequencer_kick(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = intel_dp->pps.pps_pipe;
bool pll_enabled, release_cl_override = false;
@@ -94,22 +100,22 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
u32 DP;
- if (drm_WARN(&dev_priv->drm,
- intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
+ if (drm_WARN(display->drm,
+ intel_de_read(display, intel_dp->output_reg) & DP_PORT_EN,
"skipping %s kick due to [ENCODER:%d:%s] being active\n",
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
dig_port->base.base.base.id, dig_port->base.base.name))
return;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"kicking %s for [ENCODER:%d:%s]\n",
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
dig_port->base.base.base.id, dig_port->base.base.name);
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
*/
- DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
+ DP = intel_de_read(display, intel_dp->output_reg) & DP_DETECTED;
DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
DP |= DP_PORT_WIDTH(1);
DP |= DP_LINK_TRAIN_PAT_1;
@@ -119,7 +125,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
else
DP |= DP_PIPE_SEL(pipe);
- pll_enabled = intel_de_read(dev_priv, DPLL(dev_priv, pipe)) & DPLL_VCO_ENABLE;
+ pll_enabled = intel_de_read(display, DPLL(display, pipe)) & DPLL_VCO_ENABLE;
/*
* The DPLL for the pipe must be enabled for this to work.
@@ -130,7 +136,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
!chv_phy_powergate_ch(dev_priv, phy, ch, true);
if (vlv_force_pll_on(dev_priv, pipe, vlv_get_dpll(dev_priv))) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Failed to force on PLL for pipe %c!\n",
pipe_name(pipe));
return;
@@ -143,14 +149,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
* to make this power sequencer lock onto the port.
* Otherwise even VDD force bit won't work.
*/
- intel_de_write(dev_priv, intel_dp->output_reg, DP);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, DP);
+ intel_de_posting_read(display, intel_dp->output_reg);
- intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, DP | DP_PORT_EN);
+ intel_de_posting_read(display, intel_dp->output_reg);
- intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
- intel_de_posting_read(dev_priv, intel_dp->output_reg);
+ intel_de_write(display, intel_dp->output_reg, DP & ~DP_PORT_EN);
+ intel_de_posting_read(display, intel_dp->output_reg);
if (!pll_enabled) {
vlv_force_pll_off(dev_priv, pipe);
@@ -160,7 +166,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
}
}
-static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
+static enum pipe vlv_find_free_pps(struct intel_display *display)
{
struct intel_encoder *encoder;
unsigned int pipes = (1 << PIPE_A) | (1 << PIPE_B);
@@ -169,11 +175,11 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
* We don't have power sequencer currently.
* Pick one that's not used by other ports.
*/
- for_each_intel_dp(&dev_priv->drm, encoder) {
+ for_each_intel_dp(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (encoder->type == INTEL_OUTPUT_EDP) {
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intel_dp->pps.active_pipe != INVALID_PIPE &&
intel_dp->pps.active_pipe !=
intel_dp->pps.pps_pipe);
@@ -181,7 +187,7 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
if (intel_dp->pps.pps_pipe != INVALID_PIPE)
pipes &= ~(1 << intel_dp->pps.pps_pipe);
} else {
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intel_dp->pps.pps_pipe != INVALID_PIPE);
if (intel_dp->pps.active_pipe != INVALID_PIPE)
@@ -198,36 +204,36 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
static enum pipe
vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
/* We should never land here with regular DP ports */
- drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+ drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp));
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
+ drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE &&
intel_dp->pps.active_pipe != intel_dp->pps.pps_pipe);
if (intel_dp->pps.pps_pipe != INVALID_PIPE)
return intel_dp->pps.pps_pipe;
- pipe = vlv_find_free_pps(dev_priv);
+ pipe = vlv_find_free_pps(display);
/*
* Didn't find one. This should not happen since there
* are two power sequencers and up to two eDP ports.
*/
- if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
+ if (drm_WARN_ON(display->drm, pipe == INVALID_PIPE))
pipe = PIPE_A;
- vlv_steal_power_sequencer(dev_priv, pipe);
+ vlv_steal_power_sequencer(display, pipe);
intel_dp->pps.pps_pipe = pipe;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"picked %s for [ENCODER:%d:%s]\n",
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
dig_port->base.base.base.id, dig_port->base.base.name);
/* init power sequencer on this pipe and port */
@@ -246,13 +252,13 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
static int
bxt_power_sequencer_idx(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int pps_idx = intel_dp->pps.pps_idx;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
/* We should never land here with regular DP ports */
- drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
+ drm_WARN_ON(display->drm, !intel_dp_is_edp(intel_dp));
if (!intel_dp->pps.pps_reset)
return pps_idx;
@@ -268,37 +274,38 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
return pps_idx;
}
-typedef bool (*pps_check)(struct drm_i915_private *dev_priv, int pps_idx);
+typedef bool (*pps_check)(struct intel_display *display, int pps_idx);
-static bool pps_has_pp_on(struct drm_i915_private *dev_priv, int pps_idx)
+static bool pps_has_pp_on(struct intel_display *display, int pps_idx)
{
- return intel_de_read(dev_priv, PP_STATUS(dev_priv, pps_idx)) & PP_ON;
+ return intel_de_read(display, PP_STATUS(display, pps_idx)) & PP_ON;
}
-static bool pps_has_vdd_on(struct drm_i915_private *dev_priv, int pps_idx)
+static bool pps_has_vdd_on(struct intel_display *display, int pps_idx)
{
- return intel_de_read(dev_priv, PP_CONTROL(dev_priv, pps_idx)) & EDP_FORCE_VDD;
+ return intel_de_read(display, PP_CONTROL(display, pps_idx)) & EDP_FORCE_VDD;
}
-static bool pps_any(struct drm_i915_private *dev_priv, int pps_idx)
+static bool pps_any(struct intel_display *display, int pps_idx)
{
return true;
}
static enum pipe
-vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
+vlv_initial_pps_pipe(struct intel_display *display,
enum port port, pps_check check)
{
enum pipe pipe;
for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
- u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, pipe)) &
+ u32 port_sel = intel_de_read(display,
+ PP_ON_DELAYS(display, pipe)) &
PANEL_PORT_SELECT_MASK;
if (port_sel != PANEL_PORT_SELECT_VLV(port))
continue;
- if (!check(dev_priv, pipe))
+ if (!check(display, pipe))
continue;
return pipe;
@@ -310,41 +317,43 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
static void
vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum port port = dig_port->base.port;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
/* try to find a pipe with this port selected */
/* first pick one where the panel is on */
- intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port,
pps_has_pp_on);
/* didn't find one? pick one where vdd is on */
if (intel_dp->pps.pps_pipe == INVALID_PIPE)
- intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port,
pps_has_vdd_on);
/* didn't find one? pick one with just the correct port */
if (intel_dp->pps.pps_pipe == INVALID_PIPE)
- intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(dev_priv, port,
+ intel_dp->pps.pps_pipe = vlv_initial_pps_pipe(display, port,
pps_any);
/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
if (intel_dp->pps.pps_pipe == INVALID_PIPE) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] no initial power sequencer\n",
dig_port->base.base.base.id, dig_port->base.base.name);
return;
}
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] initial power sequencer: %s\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
}
-static int intel_num_pps(struct drm_i915_private *i915)
+static int intel_num_pps(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
return 2;
@@ -365,23 +374,24 @@ static int intel_num_pps(struct drm_i915_private *i915)
static bool intel_pps_is_valid(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (intel_dp->pps.pps_idx == 1 &&
INTEL_PCH_TYPE(i915) >= PCH_ICP &&
INTEL_PCH_TYPE(i915) <= PCH_ADP)
- return intel_de_read(i915, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
+ return intel_de_read(display, SOUTH_CHICKEN1) & ICP_SECOND_PPS_IO_SELECT;
return true;
}
static int
-bxt_initial_pps_idx(struct drm_i915_private *i915, pps_check check)
+bxt_initial_pps_idx(struct intel_display *display, pps_check check)
{
- int pps_idx, pps_num = intel_num_pps(i915);
+ int pps_idx, pps_num = intel_num_pps(display);
for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
- if (check(i915, pps_idx))
+ if (check(display, pps_idx))
return pps_idx;
}
@@ -391,11 +401,12 @@ bxt_initial_pps_idx(struct drm_i915_private *i915, pps_check check)
static bool
pps_initial_setup(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_connector *connector = intel_dp->attached_connector;
struct drm_i915_private *i915 = to_i915(encoder->base.dev);
- lockdep_assert_held(&i915->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) {
vlv_initial_power_sequencer_setup(intel_dp);
@@ -403,46 +414,47 @@ pps_initial_setup(struct intel_dp *intel_dp)
}
/* first ask the VBT */
- if (intel_num_pps(i915) > 1)
+ if (intel_num_pps(display) > 1)
intel_dp->pps.pps_idx = connector->panel.vbt.backlight.controller;
else
intel_dp->pps.pps_idx = 0;
- if (drm_WARN_ON(&i915->drm, intel_dp->pps.pps_idx >= intel_num_pps(i915)))
+ if (drm_WARN_ON(display->drm, intel_dp->pps.pps_idx >= intel_num_pps(display)))
intel_dp->pps.pps_idx = -1;
/* VBT wasn't parsed yet? pick one where the panel is on */
if (intel_dp->pps.pps_idx < 0)
- intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_pp_on);
+ intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_pp_on);
/* didn't find one? pick one where vdd is on */
if (intel_dp->pps.pps_idx < 0)
- intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_has_vdd_on);
+ intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_has_vdd_on);
/* didn't find one? pick any */
if (intel_dp->pps.pps_idx < 0) {
- intel_dp->pps.pps_idx = bxt_initial_pps_idx(i915, pps_any);
+ intel_dp->pps.pps_idx = bxt_initial_pps_idx(display, pps_any);
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] no initial power sequencer, assuming %s\n",
encoder->base.base.id, encoder->base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
} else {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] initial power sequencer: %s\n",
encoder->base.base.id, encoder->base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
}
return intel_pps_is_valid(intel_dp);
}
-void intel_pps_reset_all(struct drm_i915_private *dev_priv)
+void intel_pps_reset_all(struct intel_display *display)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
- if (drm_WARN_ON(&dev_priv->drm, !IS_LP(dev_priv)))
+ if (drm_WARN_ON(display->drm, !IS_LP(dev_priv)))
return;
- if (!HAS_DISPLAY(dev_priv))
+ if (!HAS_DISPLAY(display))
return;
/*
@@ -455,16 +467,16 @@ void intel_pps_reset_all(struct drm_i915_private *dev_priv)
* should use them always.
*/
- for_each_intel_dp(&dev_priv->drm, encoder) {
+ for_each_intel_dp(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
intel_dp->pps.active_pipe != INVALID_PIPE);
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
- if (DISPLAY_VER(dev_priv) >= 9)
+ if (DISPLAY_VER(display) >= 9)
intel_dp->pps.pps_reset = true;
else
intel_dp->pps.pps_pipe = INVALID_PIPE;
@@ -482,7 +494,8 @@ struct pps_registers {
static void intel_pps_get_registers(struct intel_dp *intel_dp,
struct pps_registers *regs)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int pps_idx;
memset(regs, 0, sizeof(*regs));
@@ -494,17 +507,17 @@ static void intel_pps_get_registers(struct intel_dp *intel_dp,
else
pps_idx = intel_dp->pps.pps_idx;
- regs->pp_ctrl = PP_CONTROL(dev_priv, pps_idx);
- regs->pp_stat = PP_STATUS(dev_priv, pps_idx);
- regs->pp_on = PP_ON_DELAYS(dev_priv, pps_idx);
- regs->pp_off = PP_OFF_DELAYS(dev_priv, pps_idx);
+ regs->pp_ctrl = PP_CONTROL(display, pps_idx);
+ regs->pp_stat = PP_STATUS(display, pps_idx);
+ regs->pp_on = PP_ON_DELAYS(display, pps_idx);
+ regs->pp_off = PP_OFF_DELAYS(display, pps_idx);
/* Cycle delay moved from PP_DIVISOR to PP_CONTROL */
if (IS_GEMINILAKE(dev_priv) || IS_BROXTON(dev_priv) ||
INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
regs->pp_div = INVALID_MMIO_REG;
else
- regs->pp_div = PP_DIVISOR(dev_priv, pps_idx);
+ regs->pp_div = PP_DIVISOR(display, pps_idx);
}
static i915_reg_t
@@ -529,49 +542,51 @@ _pp_stat_reg(struct intel_dp *intel_dp)
static bool edp_have_panel_power(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps.pps_pipe == INVALID_PIPE)
return false;
- return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
+ return (intel_de_read(display, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
}
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
intel_dp->pps.pps_pipe == INVALID_PIPE)
return false;
- return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
+ return intel_de_read(display, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
}
void intel_pps_check_power_unlocked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
if (!intel_dp_is_edp(intel_dp))
return;
if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
- drm_WARN(&dev_priv->drm, 1,
+ drm_WARN(display->drm, 1,
"[ENCODER:%d:%s] %s powered off while attempting AUX CH communication.\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
- drm_dbg_kms(&dev_priv->drm,
+ pps_name(intel_dp));
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps),
- intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
- intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
+ pps_name(intel_dp),
+ intel_de_read(display, _pp_stat_reg(intel_dp)),
+ intel_de_read(display, _pp_ctrl_reg(intel_dp)));
}
}
@@ -589,68 +604,71 @@ static void intel_pps_verify_state(struct intel_dp *intel_dp);
static void wait_panel_status(struct intel_dp *intel_dp,
u32 mask, u32 value)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
i915_reg_t pp_stat_reg, pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
intel_pps_verify_state(intel_dp);
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] %s mask: 0x%08x value: 0x%08x PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
mask, value,
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
+ intel_de_read(display, pp_stat_reg),
+ intel_de_read(display, pp_ctrl_reg));
- if (intel_de_wait(dev_priv, pp_stat_reg, mask, value, 5000))
- drm_err(&dev_priv->drm,
+ if (intel_de_wait(display, pp_stat_reg, mask, value, 5000))
+ drm_err(display->drm,
"[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps),
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
+ pps_name(intel_dp),
+ intel_de_read(display, pp_stat_reg),
+ intel_de_read(display, pp_ctrl_reg));
- drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
+ drm_dbg_kms(display->drm, "Wait complete\n");
}
static void wait_panel_on(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power on\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s wait for panel power on\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
}
static void wait_panel_off(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power off time\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s wait for panel power off time\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
}
static void wait_panel_power_cycle(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
ktime_t panel_power_on_time;
s64 panel_power_off_duration;
- drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] %s wait for panel power cycle\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s wait for panel power cycle\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
/* take the difference of current time and panel power off time
* and then make panel wait for t11_t12 if needed. */
@@ -695,13 +713,13 @@ static void edp_wait_backlight_off(struct intel_dp *intel_dp)
static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u32 control;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
- control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
- if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
+ control = intel_de_read(display, _pp_ctrl_reg(intel_dp));
+ if (drm_WARN_ON(display->drm, !HAS_DDI(display) &&
(control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
control &= ~PANEL_UNLOCK_MASK;
control |= PANEL_UNLOCK_REGS;
@@ -716,13 +734,14 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
*/
bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
bool need_to_disable = !intel_dp->pps.want_panel_vdd;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return false;
@@ -733,16 +752,16 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
if (edp_have_panel_vdd(intel_dp))
return need_to_disable;
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+ drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref);
intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
intel_aux_power_domain(dig_port));
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD on\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD on\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -750,21 +769,22 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
pp = ilk_get_pp_control(intel_dp);
pp |= EDP_FORCE_VDD;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps),
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
+ pps_name(intel_dp),
+ intel_de_read(display, pp_stat_reg),
+ intel_de_read(display, pp_ctrl_reg));
/*
* If the panel wasn't on, delay before accessing aux channel
*/
if (!edp_have_panel_power(intel_dp)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] %s panel power wasn't enabled\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
msleep(intel_dp->pps.panel_power_up_delay);
}
@@ -779,7 +799,8 @@ bool intel_pps_vdd_on_unlocked(struct intel_dp *intel_dp)
*/
void intel_pps_vdd_on(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref;
bool vdd;
@@ -792,27 +813,27 @@ void intel_pps_vdd_on(struct intel_dp *intel_dp)
I915_STATE_WARN(i915, !vdd, "[ENCODER:%d:%s] %s VDD already requested on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
dp_to_dig_port(intel_dp)->base.base.name,
- pps_name(i915, &intel_dp->pps));
+ pps_name(intel_dp));
}
static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct intel_digital_port *dig_port =
- dp_to_dig_port(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_stat_reg, pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.want_panel_vdd);
+ drm_WARN_ON(display->drm, intel_dp->pps.want_panel_vdd);
if (!edp_have_panel_vdd(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turning VDD off\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turning VDD off\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -820,15 +841,16 @@ static void intel_pps_vdd_off_sync_unlocked(struct intel_dp *intel_dp)
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp_stat_reg = _pp_stat_reg(intel_dp);
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
/* Make sure sequencer is idle before allowing subsequent activity */
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ drm_dbg_kms(display->drm,
+ "[ENCODER:%d:%s] %s PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps),
- intel_de_read(dev_priv, pp_stat_reg),
- intel_de_read(dev_priv, pp_ctrl_reg));
+ pps_name(intel_dp),
+ intel_de_read(display, pp_stat_reg),
+ intel_de_read(display, pp_ctrl_reg));
if ((pp & PANEL_POWER_ON) == 0)
intel_dp->pps.panel_power_off_time = ktime_get_boottime();
@@ -869,7 +891,8 @@ static void edp_panel_vdd_work(struct work_struct *__work)
static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
unsigned long delay;
/*
@@ -896,9 +919,10 @@ static void edp_panel_vdd_schedule_off(struct intel_dp *intel_dp)
*/
void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
@@ -907,7 +931,7 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
"[ENCODER:%d:%s] %s VDD not forced on",
dp_to_dig_port(intel_dp)->base.base.base.id,
dp_to_dig_port(intel_dp)->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
intel_dp->pps.want_panel_vdd = false;
@@ -919,25 +943,26 @@ void intel_pps_vdd_off_unlocked(struct intel_dp *intel_dp, bool sync)
void intel_pps_on_unlocked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pp;
i915_reg_t pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power on\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
dp_to_dig_port(intel_dp)->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
- if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
+ if (drm_WARN(display->drm, edp_have_panel_power(intel_dp),
"[ENCODER:%d:%s] %s panel power already on\n",
dp_to_dig_port(intel_dp)->base.base.base.id,
dp_to_dig_port(intel_dp)->base.base.name,
- pps_name(dev_priv, &intel_dp->pps)))
+ pps_name(intel_dp)))
return;
wait_panel_power_cycle(intel_dp);
@@ -947,36 +972,36 @@ void intel_pps_on_unlocked(struct intel_dp *intel_dp)
if (IS_IRONLAKE(dev_priv)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
}
/*
* WA: 22019252566
* Disable DPLS gating around power sequence.
*/
- if (IS_DISPLAY_VER(dev_priv, 13, 14))
- intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ if (IS_DISPLAY_VER(display, 13, 14))
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
0, PCH_DPLSUNIT_CLOCK_GATE_DISABLE);
pp |= PANEL_POWER_ON;
if (!IS_IRONLAKE(dev_priv))
pp |= PANEL_POWER_RESET;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
wait_panel_on(intel_dp);
intel_dp->pps.last_power_on = jiffies;
- if (IS_DISPLAY_VER(dev_priv, 13, 14))
- intel_de_rmw(dev_priv, SOUTH_DSPCLK_GATE_D,
+ if (IS_DISPLAY_VER(display, 13, 14))
+ intel_de_rmw(display, SOUTH_DSPCLK_GATE_D,
PCH_DPLSUNIT_CLOCK_GATE_DISABLE, 0);
if (IS_IRONLAKE(dev_priv)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
}
}
@@ -993,24 +1018,25 @@ void intel_pps_on(struct intel_dp *intel_dp)
void intel_pps_off_unlocked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
u32 pp;
i915_reg_t pp_ctrl_reg;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!intel_dp_is_edp(intel_dp))
return;
- drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s] %s turn panel power off\n",
+ drm_dbg_kms(display->drm, "[ENCODER:%d:%s] %s turn panel power off\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
- drm_WARN(&dev_priv->drm, !intel_dp->pps.want_panel_vdd,
+ drm_WARN(display->drm, !intel_dp->pps.want_panel_vdd,
"[ENCODER:%d:%s] %s need VDD to turn off panel\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
+ pps_name(intel_dp));
pp = ilk_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -1022,8 +1048,8 @@ void intel_pps_off_unlocked(struct intel_dp *intel_dp)
intel_dp->pps.want_panel_vdd = false;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
wait_panel_off(intel_dp);
intel_dp->pps.panel_power_off_time = ktime_get_boottime();
@@ -1048,7 +1074,7 @@ void intel_pps_off(struct intel_dp *intel_dp)
/* Enable backlight in the panel power control. */
void intel_pps_backlight_on(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
intel_wakeref_t wakeref;
/*
@@ -1066,15 +1092,15 @@ void intel_pps_backlight_on(struct intel_dp *intel_dp)
pp = ilk_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
}
}
/* Disable backlight in the panel power control. */
void intel_pps_backlight_off(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
@@ -1087,8 +1113,8 @@ void intel_pps_backlight_off(struct intel_dp *intel_dp)
pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
- intel_de_write(dev_priv, pp_ctrl_reg, pp);
- intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ intel_de_write(display, pp_ctrl_reg, pp);
+ intel_de_posting_read(display, pp_ctrl_reg);
}
intel_dp->pps.last_backlight_off = jiffies;
@@ -1101,7 +1127,7 @@ void intel_pps_backlight_off(struct intel_dp *intel_dp)
*/
void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
{
- struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_display *display = to_intel_display(connector);
struct intel_dp *intel_dp = intel_attached_dp(connector);
intel_wakeref_t wakeref;
bool is_enabled;
@@ -1112,7 +1138,7 @@ void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
if (is_enabled == enable)
return;
- drm_dbg_kms(&i915->drm, "panel power control backlight %s\n",
+ drm_dbg_kms(display->drm, "panel power control backlight %s\n",
enable ? "enable" : "disable");
if (enable)
@@ -1123,14 +1149,14 @@ void intel_pps_backlight_power(struct intel_connector *connector, bool enable)
static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum pipe pipe = intel_dp->pps.pps_pipe;
- i915_reg_t pp_on_reg = PP_ON_DELAYS(dev_priv, pipe);
+ i915_reg_t pp_on_reg = PP_ON_DELAYS(display, pipe);
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+ drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
- if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(display->drm, pipe != PIPE_A && pipe != PIPE_B))
return;
intel_pps_vdd_off_sync_unlocked(intel_dp);
@@ -1144,27 +1170,27 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
* port select always when logically disconnecting a power sequencer
* from a port.
*/
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"detaching %s from [ENCODER:%d:%s]\n",
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
dig_port->base.base.base.id, dig_port->base.base.name);
- intel_de_write(dev_priv, pp_on_reg, 0);
- intel_de_posting_read(dev_priv, pp_on_reg);
+ intel_de_write(display, pp_on_reg, 0);
+ intel_de_posting_read(display, pp_on_reg);
intel_dp->pps.pps_pipe = INVALID_PIPE;
}
-static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
+static void vlv_steal_power_sequencer(struct intel_display *display,
enum pipe pipe)
{
struct intel_encoder *encoder;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
- for_each_intel_dp(&dev_priv->drm, encoder) {
+ for_each_intel_dp(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- drm_WARN(&dev_priv->drm, intel_dp->pps.active_pipe == pipe,
+ drm_WARN(display->drm, intel_dp->pps.active_pipe == pipe,
"stealing PPS %c from active [ENCODER:%d:%s]\n",
pipe_name(pipe), encoder->base.base.id,
encoder->base.name);
@@ -1172,7 +1198,7 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
if (intel_dp->pps.pps_pipe != pipe)
continue;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"stealing PPS %c from [ENCODER:%d:%s]\n",
pipe_name(pipe), encoder->base.base.id,
encoder->base.name);
@@ -1185,13 +1211,13 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
void vlv_pps_init(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
+ drm_WARN_ON(display->drm, intel_dp->pps.active_pipe != INVALID_PIPE);
if (intel_dp->pps.pps_pipe != INVALID_PIPE &&
intel_dp->pps.pps_pipe != crtc->pipe) {
@@ -1207,7 +1233,7 @@ void vlv_pps_init(struct intel_encoder *encoder,
* We may be stealing the power
* sequencer from another port.
*/
- vlv_steal_power_sequencer(dev_priv, crtc->pipe);
+ vlv_steal_power_sequencer(display, crtc->pipe);
intel_dp->pps.active_pipe = crtc->pipe;
@@ -1217,9 +1243,9 @@ void vlv_pps_init(struct intel_encoder *encoder,
/* now it's all ours */
intel_dp->pps.pps_pipe = crtc->pipe;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"initializing %s for [ENCODER:%d:%s]\n",
- pps_name(dev_priv, &intel_dp->pps),
+ pps_name(intel_dp),
encoder->base.base.id, encoder->base.name);
/* init power sequencer on this pipe and port */
@@ -1229,10 +1255,11 @@ void vlv_pps_init(struct intel_encoder *encoder,
static void pps_vdd_init(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!edp_have_panel_vdd(intel_dp))
return;
@@ -1243,11 +1270,11 @@ static void pps_vdd_init(struct intel_dp *intel_dp)
* schedule a vdd off, so we don't hold on to the reference
* indefinitely.
*/
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[ENCODER:%d:%s] %s VDD left on by BIOS, adjusting state tracking\n",
dig_port->base.base.base.id, dig_port->base.base.name,
- pps_name(dev_priv, &intel_dp->pps));
- drm_WARN_ON(&dev_priv->drm, intel_dp->pps.vdd_wakeref);
+ pps_name(intel_dp));
+ drm_WARN_ON(display->drm, intel_dp->pps.vdd_wakeref);
intel_dp->pps.vdd_wakeref = intel_display_power_get(dev_priv,
intel_aux_power_domain(dig_port));
}
@@ -1281,7 +1308,7 @@ static void pps_init_timestamps(struct intel_dp *intel_dp)
static void
intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u32 pp_on, pp_off, pp_ctl;
struct pps_registers regs;
@@ -1290,11 +1317,11 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
pp_ctl = ilk_get_pp_control(intel_dp);
/* Ensure PPS is unlocked */
- if (!HAS_DDI(dev_priv))
- intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
+ if (!HAS_DDI(display))
+ intel_de_write(display, regs.pp_ctrl, pp_ctl);
- pp_on = intel_de_read(dev_priv, regs.pp_on);
- pp_off = intel_de_read(dev_priv, regs.pp_off);
+ pp_on = intel_de_read(display, regs.pp_on);
+ pp_off = intel_de_read(display, regs.pp_off);
/* Pull timing values out of registers */
seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
@@ -1305,7 +1332,7 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
if (i915_mmio_reg_valid(regs.pp_div)) {
u32 pp_div;
- pp_div = intel_de_read(dev_priv, regs.pp_div);
+ pp_div = intel_de_read(display, regs.pp_div);
seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
} else {
@@ -1317,9 +1344,10 @@ static void
intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
const struct edp_power_seq *seq)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- drm_dbg_kms(&i915->drm, "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
+ drm_dbg_kms(display->drm,
+ "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n",
state_name,
seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12);
}
@@ -1327,7 +1355,7 @@ intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name,
static void
intel_pps_verify_state(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct edp_power_seq hw;
struct edp_power_seq *sw = &intel_dp->pps.pps_delays;
@@ -1335,7 +1363,7 @@ intel_pps_verify_state(struct intel_dp *intel_dp)
if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 ||
hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) {
- drm_err(&i915->drm, "PPS state mismatch\n");
+ drm_err(display->drm, "PPS state mismatch\n");
intel_pps_dump_state(intel_dp, "sw", sw);
intel_pps_dump_state(intel_dp, "hw", &hw);
}
@@ -1350,9 +1378,9 @@ static bool pps_delays_valid(struct edp_power_seq *delays)
static void pps_init_delays_bios(struct intel_dp *intel_dp,
struct edp_power_seq *bios)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
if (!pps_delays_valid(&intel_dp->pps.bios_pps_delays))
intel_pps_readout_hw_state(intel_dp, &intel_dp->pps.bios_pps_delays);
@@ -1397,9 +1425,9 @@ static void pps_init_delays_vbt(struct intel_dp *intel_dp,
static void pps_init_delays_spec(struct intel_dp *intel_dp,
struct edp_power_seq *spec)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
/* Upper limits from eDP 1.3 spec. Note that we use the clunky units of
* our hw here, which are all in 100usec. */
@@ -1418,11 +1446,11 @@ static void pps_init_delays_spec(struct intel_dp *intel_dp,
static void pps_init_delays(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct edp_power_seq cur, vbt, spec,
*final = &intel_dp->pps.pps_delays;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
/* already initialized? */
if (pps_delays_valid(final))
@@ -1452,13 +1480,13 @@ static void pps_init_delays(struct intel_dp *intel_dp)
intel_dp->pps.panel_power_cycle_delay = get_delay(t11_t12);
#undef get_delay
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"panel power up delay %d, power down delay %d, power cycle delay %d\n",
intel_dp->pps.panel_power_up_delay,
intel_dp->pps.panel_power_down_delay,
intel_dp->pps.panel_power_cycle_delay);
- drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
+ drm_dbg_kms(display->drm, "backlight on delay %d, off delay %d\n",
intel_dp->pps.backlight_on_delay,
intel_dp->pps.backlight_off_delay);
@@ -1481,14 +1509,15 @@ static void pps_init_delays(struct intel_dp *intel_dp)
static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 pp_on, pp_off, port_sel = 0;
- int div = DISPLAY_RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
+ int div = DISPLAY_RUNTIME_INFO(display)->rawclk_freq / 1000;
struct pps_registers regs;
enum port port = dp_to_dig_port(intel_dp)->base.port;
const struct edp_power_seq *seq = &intel_dp->pps.pps_delays;
- lockdep_assert_held(&dev_priv->display.pps.mutex);
+ lockdep_assert_held(&display->pps.mutex);
intel_pps_get_registers(intel_dp, &regs);
@@ -1507,16 +1536,16 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
if (force_disable_vdd) {
u32 pp = ilk_get_pp_control(intel_dp);
- drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
+ drm_WARN(display->drm, pp & PANEL_POWER_ON,
"Panel power already on\n");
if (pp & EDP_FORCE_VDD)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"VDD already on, disabling first\n");
pp &= ~EDP_FORCE_VDD;
- intel_de_write(dev_priv, regs.pp_ctrl, pp);
+ intel_de_write(display, regs.pp_ctrl, pp);
}
pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
@@ -1547,32 +1576,33 @@ static void pps_init_registers(struct intel_dp *intel_dp, bool force_disable_vdd
pp_on |= port_sel;
- intel_de_write(dev_priv, regs.pp_on, pp_on);
- intel_de_write(dev_priv, regs.pp_off, pp_off);
+ intel_de_write(display, regs.pp_on, pp_on);
+ intel_de_write(display, regs.pp_off, pp_off);
/*
* Compute the divisor for the pp clock, simply match the Bspec formula.
*/
if (i915_mmio_reg_valid(regs.pp_div))
- intel_de_write(dev_priv, regs.pp_div,
+ intel_de_write(display, regs.pp_div,
REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
else
- intel_de_rmw(dev_priv, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
+ intel_de_rmw(display, regs.pp_ctrl, BXT_POWER_CYCLE_DELAY_MASK,
REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK,
DIV_ROUND_UP(seq->t11_t12, 1000)));
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
- intel_de_read(dev_priv, regs.pp_on),
- intel_de_read(dev_priv, regs.pp_off),
+ intel_de_read(display, regs.pp_on),
+ intel_de_read(display, regs.pp_off),
i915_mmio_reg_valid(regs.pp_div) ?
- intel_de_read(dev_priv, regs.pp_div) :
- (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
+ intel_de_read(display, regs.pp_div) :
+ (intel_de_read(display, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
}
void intel_pps_encoder_reset(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
intel_wakeref_t wakeref;
if (!intel_dp_is_edp(intel_dp))
@@ -1618,17 +1648,19 @@ bool intel_pps_init(struct intel_dp *intel_dp)
static void pps_init_late(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base;
struct intel_connector *connector = intel_dp->attached_connector;
if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
return;
- if (intel_num_pps(i915) < 2)
+ if (intel_num_pps(display) < 2)
return;
- drm_WARN(&i915->drm, connector->panel.vbt.backlight.controller >= 0 &&
+ drm_WARN(display->drm,
+ connector->panel.vbt.backlight.controller >= 0 &&
intel_dp->pps.pps_idx != connector->panel.vbt.backlight.controller,
"[ENCODER:%d:%s] power sequencer mismatch: %d (initial) vs. %d (VBT)\n",
encoder->base.base.id, encoder->base.name,
@@ -1657,32 +1689,34 @@ void intel_pps_init_late(struct intel_dp *intel_dp)
}
}
-void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
+void intel_pps_unlock_regs_wa(struct intel_display *display)
{
int pps_num;
int pps_idx;
- if (!HAS_DISPLAY(dev_priv) || HAS_DDI(dev_priv))
+ if (!HAS_DISPLAY(display) || HAS_DDI(display))
return;
/*
* This w/a is needed at least on CPT/PPT, but to be sure apply it
* everywhere where registers can be write protected.
*/
- pps_num = intel_num_pps(dev_priv);
+ pps_num = intel_num_pps(display);
for (pps_idx = 0; pps_idx < pps_num; pps_idx++)
- intel_de_rmw(dev_priv, PP_CONTROL(dev_priv, pps_idx),
+ intel_de_rmw(display, PP_CONTROL(display, pps_idx),
PANEL_UNLOCK_MASK, PANEL_UNLOCK_REGS);
}
-void intel_pps_setup(struct drm_i915_private *i915)
+void intel_pps_setup(struct intel_display *display)
{
+ struct drm_i915_private *i915 = to_i915(display->drm);
+
if (HAS_PCH_SPLIT(i915) || IS_GEMINILAKE(i915) || IS_BROXTON(i915))
- i915->display.pps.mmio_base = PCH_PPS_BASE;
+ display->pps.mmio_base = PCH_PPS_BASE;
else if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915))
- i915->display.pps.mmio_base = VLV_PPS_BASE;
+ display->pps.mmio_base = VLV_PPS_BASE;
else
- i915->display.pps.mmio_base = PPS_BASE;
+ display->pps.mmio_base = PPS_BASE;
}
static int intel_pps_show(struct seq_file *m, void *data)
@@ -1716,21 +1750,23 @@ void intel_pps_connector_debugfs_add(struct intel_connector *connector)
connector, &intel_pps_fops);
}
-void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
+void assert_pps_unlocked(struct intel_display *display, enum pipe pipe)
{
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
i915_reg_t pp_reg;
u32 val;
enum pipe panel_pipe = INVALID_PIPE;
bool locked = true;
- if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
+ if (drm_WARN_ON(display->drm, HAS_DDI(display)))
return;
if (HAS_PCH_SPLIT(dev_priv)) {
u32 port_sel;
- pp_reg = PP_CONTROL(dev_priv, 0);
- port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, 0)) & PANEL_PORT_SELECT_MASK;
+ pp_reg = PP_CONTROL(display, 0);
+ port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) &
+ PANEL_PORT_SELECT_MASK;
switch (port_sel) {
case PANEL_PORT_SELECT_LVDS:
@@ -1751,20 +1787,21 @@ void assert_pps_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
}
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
/* presumably write lock depends on pipe, not port select */
- pp_reg = PP_CONTROL(dev_priv, pipe);
+ pp_reg = PP_CONTROL(display, pipe);
panel_pipe = pipe;
} else {
u32 port_sel;
- pp_reg = PP_CONTROL(dev_priv, 0);
- port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(dev_priv, 0)) & PANEL_PORT_SELECT_MASK;
+ pp_reg = PP_CONTROL(display, 0);
+ port_sel = intel_de_read(display, PP_ON_DELAYS(display, 0)) &
+ PANEL_PORT_SELECT_MASK;
- drm_WARN_ON(&dev_priv->drm,
+ drm_WARN_ON(display->drm,
port_sel != PANEL_PORT_SELECT_LVDS);
intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
}
- val = intel_de_read(dev_priv, pp_reg);
+ val = intel_de_read(display, pp_reg);
if (!(val & PANEL_POWER_ON) ||
((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
locked = false;
diff --git a/drivers/gpu/drm/i915/display/intel_pps.h b/drivers/gpu/drm/i915/display/intel_pps.h
index 07ef96ca8da2..0c5da83a559e 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.h
+++ b/drivers/gpu/drm/i915/display/intel_pps.h
@@ -11,9 +11,9 @@
#include "intel_wakeref.h"
enum pipe;
-struct drm_i915_private;
struct intel_connector;
struct intel_crtc_state;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
@@ -43,16 +43,16 @@ void intel_pps_wait_power_cycle(struct intel_dp *intel_dp);
bool intel_pps_init(struct intel_dp *intel_dp);
void intel_pps_init_late(struct intel_dp *intel_dp);
void intel_pps_encoder_reset(struct intel_dp *intel_dp);
-void intel_pps_reset_all(struct drm_i915_private *i915);
+void intel_pps_reset_all(struct intel_display *display);
void vlv_pps_init(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
-void intel_pps_unlock_regs_wa(struct drm_i915_private *i915);
-void intel_pps_setup(struct drm_i915_private *i915);
+void intel_pps_unlock_regs_wa(struct intel_display *display);
+void intel_pps_setup(struct intel_display *display);
void intel_pps_connector_debugfs_add(struct intel_connector *connector);
-void assert_pps_unlocked(struct drm_i915_private *i915, enum pipe pipe);
+void assert_pps_unlocked(struct intel_display *display, enum pipe pipe);
#endif /* __INTEL_PPS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 257526362b39..1f83b3b67ea6 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -205,14 +205,14 @@ bool intel_encoder_can_psr(struct intel_encoder *encoder)
static bool psr_global_enabled(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
- if (i915->display.params.enable_psr == -1)
+ if (display->params.enable_psr == -1)
return connector->panel.vbt.psr.enable;
- return i915->display.params.enable_psr;
+ return display->params.enable_psr;
case I915_PSR_DEBUG_DISABLE:
return false;
default:
@@ -222,14 +222,14 @@ static bool psr_global_enabled(struct intel_dp *intel_dp)
static bool psr2_global_enabled(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DISABLE:
case I915_PSR_DEBUG_FORCE_PSR1:
return false;
default:
- if (i915->display.params.enable_psr == 1)
+ if (display->params.enable_psr == 1)
return false;
return true;
}
@@ -237,9 +237,9 @@ static bool psr2_global_enabled(struct intel_dp *intel_dp)
static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if (i915->display.params.enable_psr != -1)
+ if (display->params.enable_psr != -1)
return false;
return true;
@@ -247,9 +247,9 @@ static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp)
static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if ((i915->display.params.enable_psr != -1) ||
+ if ((display->params.enable_psr != -1) ||
(intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE))
return false;
return true;
@@ -257,111 +257,111 @@ static bool panel_replay_global_enabled(struct intel_dp *intel_dp)
static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_ERROR :
+ return DISPLAY_VER(display) >= 12 ? TGL_PSR_ERROR :
EDP_PSR_ERROR(intel_dp->psr.transcoder);
}
static u32 psr_irq_post_exit_bit_get(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_POST_EXIT :
+ return DISPLAY_VER(display) >= 12 ? TGL_PSR_POST_EXIT :
EDP_PSR_POST_EXIT(intel_dp->psr.transcoder);
}
static u32 psr_irq_pre_entry_bit_get(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_PRE_ENTRY :
+ return DISPLAY_VER(display) >= 12 ? TGL_PSR_PRE_ENTRY :
EDP_PSR_PRE_ENTRY(intel_dp->psr.transcoder);
}
static u32 psr_irq_mask_get(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- return DISPLAY_VER(dev_priv) >= 12 ? TGL_PSR_MASK :
+ return DISPLAY_VER(display) >= 12 ? TGL_PSR_MASK :
EDP_PSR_MASK(intel_dp->psr.transcoder);
}
-static i915_reg_t psr_ctl_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_ctl_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_CTL(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_CTL(display, cpu_transcoder);
else
return HSW_SRD_CTL;
}
-static i915_reg_t psr_debug_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_debug_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_DEBUG(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_DEBUG(display, cpu_transcoder);
else
return HSW_SRD_DEBUG;
}
-static i915_reg_t psr_perf_cnt_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_perf_cnt_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_PERF_CNT(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_PERF_CNT(display, cpu_transcoder);
else
return HSW_SRD_PERF_CNT;
}
-static i915_reg_t psr_status_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_status_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_STATUS(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_STATUS(display, cpu_transcoder);
else
return HSW_SRD_STATUS;
}
-static i915_reg_t psr_imr_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_imr_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 12)
- return TRANS_PSR_IMR(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 12)
+ return TRANS_PSR_IMR(display, cpu_transcoder);
else
return EDP_PSR_IMR;
}
-static i915_reg_t psr_iir_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_iir_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 12)
- return TRANS_PSR_IIR(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 12)
+ return TRANS_PSR_IIR(display, cpu_transcoder);
else
return EDP_PSR_IIR;
}
-static i915_reg_t psr_aux_ctl_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_aux_ctl_reg(struct intel_display *display,
enum transcoder cpu_transcoder)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_AUX_CTL(dev_priv, cpu_transcoder);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_AUX_CTL(display, cpu_transcoder);
else
return HSW_SRD_AUX_CTL;
}
-static i915_reg_t psr_aux_data_reg(struct drm_i915_private *dev_priv,
+static i915_reg_t psr_aux_data_reg(struct intel_display *display,
enum transcoder cpu_transcoder, int i)
{
- if (DISPLAY_VER(dev_priv) >= 8)
- return EDP_PSR_AUX_DATA(dev_priv, cpu_transcoder, i);
+ if (DISPLAY_VER(display) >= 8)
+ return EDP_PSR_AUX_DATA(display, cpu_transcoder, i);
else
return HSW_SRD_AUX_DATA(i);
}
static void psr_irq_control(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask;
@@ -373,80 +373,81 @@ static void psr_irq_control(struct intel_dp *intel_dp)
mask |= psr_irq_post_exit_bit_get(intel_dp) |
psr_irq_pre_entry_bit_get(intel_dp);
- intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
psr_irq_mask_get(intel_dp), ~mask);
}
-static void psr_event_print(struct drm_i915_private *i915,
+static void psr_event_print(struct intel_display *display,
u32 val, bool sel_update_enabled)
{
- drm_dbg_kms(&i915->drm, "PSR exit events: 0x%x\n", val);
+ drm_dbg_kms(display->drm, "PSR exit events: 0x%x\n", val);
if (val & PSR_EVENT_PSR2_WD_TIMER_EXPIRE)
- drm_dbg_kms(&i915->drm, "\tPSR2 watchdog timer expired\n");
+ drm_dbg_kms(display->drm, "\tPSR2 watchdog timer expired\n");
if ((val & PSR_EVENT_PSR2_DISABLED) && sel_update_enabled)
- drm_dbg_kms(&i915->drm, "\tPSR2 disabled\n");
+ drm_dbg_kms(display->drm, "\tPSR2 disabled\n");
if (val & PSR_EVENT_SU_DIRTY_FIFO_UNDERRUN)
- drm_dbg_kms(&i915->drm, "\tSU dirty FIFO underrun\n");
+ drm_dbg_kms(display->drm, "\tSU dirty FIFO underrun\n");
if (val & PSR_EVENT_SU_CRC_FIFO_UNDERRUN)
- drm_dbg_kms(&i915->drm, "\tSU CRC FIFO underrun\n");
+ drm_dbg_kms(display->drm, "\tSU CRC FIFO underrun\n");
if (val & PSR_EVENT_GRAPHICS_RESET)
- drm_dbg_kms(&i915->drm, "\tGraphics reset\n");
+ drm_dbg_kms(display->drm, "\tGraphics reset\n");
if (val & PSR_EVENT_PCH_INTERRUPT)
- drm_dbg_kms(&i915->drm, "\tPCH interrupt\n");
+ drm_dbg_kms(display->drm, "\tPCH interrupt\n");
if (val & PSR_EVENT_MEMORY_UP)
- drm_dbg_kms(&i915->drm, "\tMemory up\n");
+ drm_dbg_kms(display->drm, "\tMemory up\n");
if (val & PSR_EVENT_FRONT_BUFFER_MODIFY)
- drm_dbg_kms(&i915->drm, "\tFront buffer modification\n");
+ drm_dbg_kms(display->drm, "\tFront buffer modification\n");
if (val & PSR_EVENT_WD_TIMER_EXPIRE)
- drm_dbg_kms(&i915->drm, "\tPSR watchdog timer expired\n");
+ drm_dbg_kms(display->drm, "\tPSR watchdog timer expired\n");
if (val & PSR_EVENT_PIPE_REGISTERS_UPDATE)
- drm_dbg_kms(&i915->drm, "\tPIPE registers updated\n");
+ drm_dbg_kms(display->drm, "\tPIPE registers updated\n");
if (val & PSR_EVENT_REGISTER_UPDATE)
- drm_dbg_kms(&i915->drm, "\tRegister updated\n");
+ drm_dbg_kms(display->drm, "\tRegister updated\n");
if (val & PSR_EVENT_HDCP_ENABLE)
- drm_dbg_kms(&i915->drm, "\tHDCP enabled\n");
+ drm_dbg_kms(display->drm, "\tHDCP enabled\n");
if (val & PSR_EVENT_KVMR_SESSION_ENABLE)
- drm_dbg_kms(&i915->drm, "\tKVMR session enabled\n");
+ drm_dbg_kms(display->drm, "\tKVMR session enabled\n");
if (val & PSR_EVENT_VBI_ENABLE)
- drm_dbg_kms(&i915->drm, "\tVBI enabled\n");
+ drm_dbg_kms(display->drm, "\tVBI enabled\n");
if (val & PSR_EVENT_LPSP_MODE_EXIT)
- drm_dbg_kms(&i915->drm, "\tLPSP mode exited\n");
+ drm_dbg_kms(display->drm, "\tLPSP mode exited\n");
if ((val & PSR_EVENT_PSR_DISABLE) && !sel_update_enabled)
- drm_dbg_kms(&i915->drm, "\tPSR disabled\n");
+ drm_dbg_kms(display->drm, "\tPSR disabled\n");
}
void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
ktime_t time_ns = ktime_get();
if (psr_iir & psr_irq_pre_entry_bit_get(intel_dp)) {
intel_dp->psr.last_entry_attempt = time_ns;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[transcoder %s] PSR entry attempt in 2 vblanks\n",
transcoder_name(cpu_transcoder));
}
if (psr_iir & psr_irq_post_exit_bit_get(intel_dp)) {
intel_dp->psr.last_exit = time_ns;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"[transcoder %s] PSR exit completed\n",
transcoder_name(cpu_transcoder));
- if (DISPLAY_VER(dev_priv) >= 9) {
+ if (DISPLAY_VER(display) >= 9) {
u32 val;
val = intel_de_rmw(dev_priv,
PSR_EVENT(dev_priv, cpu_transcoder),
0, 0);
- psr_event_print(dev_priv, val, intel_dp->psr.sel_update_enabled);
+ psr_event_print(display, val, intel_dp->psr.sel_update_enabled);
}
}
if (psr_iir & psr_irq_psr_error_bit_get(intel_dp)) {
- drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
+ drm_warn(display->drm, "[transcoder %s] PSR aux error\n",
transcoder_name(cpu_transcoder));
intel_dp->psr.irq_aux_error = true;
@@ -459,7 +460,7 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
* again so we don't care about unmask the interruption
* or unset irq_aux_error.
*/
- intel_de_rmw(dev_priv, psr_imr_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, psr_imr_reg(display, cpu_transcoder),
0, psr_irq_psr_error_bit_get(intel_dp));
queue_work(dev_priv->unordered_wq, &intel_dp->psr.work);
@@ -468,14 +469,14 @@ void intel_psr_irq_handler(struct intel_dp *intel_dp, u32 psr_iir)
static u8 intel_dp_get_sink_sync_latency(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 val = 8; /* assume the worst if we can't read the value */
if (drm_dp_dpcd_readb(&intel_dp->aux,
DP_SYNCHRONIZATION_LATENCY_IN_SINK, &val) == 1)
val &= DP_MAX_RESYNC_FRAME_COUNT_MASK;
else
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unable to get sink synchronization latency, assuming 8 frames\n");
return val;
}
@@ -516,7 +517,7 @@ intel_dp_get_su_y_granularity_offset(struct intel_dp *intel_dp)
*/
static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
ssize_t r;
u16 w;
u8 y;
@@ -542,7 +543,7 @@ static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
intel_dp_get_su_x_granularity_offset(intel_dp),
&w, 2);
if (r != 2)
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unable to read selective update x granularity\n");
/*
* Spec says that if the value read is 0 the default granularity should
@@ -555,7 +556,7 @@ static void intel_dp_get_su_granularity(struct intel_dp *intel_dp)
intel_dp_get_su_y_granularity_offset(intel_dp),
&y, 1);
if (r != 1) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Unable to read selective update y granularity\n");
y = 4;
}
@@ -569,17 +570,17 @@ exit:
static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (intel_dp_is_edp(intel_dp)) {
if (!intel_alpm_aux_less_wake_supported(intel_dp)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel doesn't support AUX-less ALPM, eDP Panel Replay not possible\n");
return;
}
if (!(intel_dp->pr_dpcd & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel doesn't support early transport, eDP Panel Replay not possible\n");
return;
}
@@ -590,7 +591,7 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
if (intel_dp->pr_dpcd & DP_PANEL_REPLAY_SU_SUPPORT)
intel_dp->psr.sink_panel_replay_su_support = true;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel replay %sis supported by panel\n",
intel_dp->psr.sink_panel_replay_su_support ?
"selective_update " : "");
@@ -598,20 +599,19 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp)
static void _psr_init_dpcd(struct intel_dp *intel_dp)
{
- struct drm_i915_private *i915 =
- to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
+ struct intel_display *display = to_intel_display(intel_dp);
- drm_dbg_kms(&i915->drm, "eDP panel supports PSR version %x\n",
+ drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n",
intel_dp->psr_dpcd[0]);
if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR support not currently available for this panel\n");
return;
}
if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel lacks power state control, PSR cannot be enabled\n");
return;
}
@@ -620,7 +620,7 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp)
intel_dp->psr.sink_sync_latency =
intel_dp_get_sink_sync_latency(intel_dp);
- if (DISPLAY_VER(i915) >= 9 &&
+ if (DISPLAY_VER(display) >= 9 &&
intel_dp->psr_dpcd[0] >= DP_PSR2_WITH_Y_COORD_IS_SUPPORTED) {
bool y_req = intel_dp->psr_dpcd[1] &
DP_PSR2_SU_Y_COORDINATE_REQUIRED;
@@ -638,7 +638,7 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp)
*/
intel_dp->psr.sink_psr2_support = y_req &&
intel_alpm_aux_wake_supported(intel_dp);
- drm_dbg_kms(&i915->drm, "PSR2 %ssupported\n",
+ drm_dbg_kms(display->drm, "PSR2 %ssupported\n",
intel_dp->psr.sink_psr2_support ? "" : "not ");
}
}
@@ -663,7 +663,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 aux_clock_divider, aux_ctl;
/* write DP_SET_POWER=D0 */
@@ -679,7 +680,7 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
intel_de_write(dev_priv,
- psr_aux_data_reg(dev_priv, cpu_transcoder, i >> 2),
+ psr_aux_data_reg(display, cpu_transcoder, i >> 2),
intel_dp_aux_pack(&aux_msg[i], sizeof(aux_msg) - i));
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
@@ -694,15 +695,15 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
EDP_PSR_AUX_CTL_PRECHARGE_2US_MASK |
EDP_PSR_AUX_CTL_BIT_CLOCK_2X_MASK;
- intel_de_write(dev_priv, psr_aux_ctl_reg(dev_priv, cpu_transcoder),
+ intel_de_write(display, psr_aux_ctl_reg(display, cpu_transcoder),
aux_ctl);
}
static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if (DISPLAY_VER(i915) < 20 || !intel_dp_is_edp(intel_dp) ||
+ if (DISPLAY_VER(display) < 20 || !intel_dp_is_edp(intel_dp) ||
intel_dp->psr.debug & I915_PSR_DEBUG_SU_REGION_ET_DISABLE)
return false;
@@ -741,7 +742,7 @@ static void _panel_replay_enable_sink(struct intel_dp *intel_dp,
static void _psr_enable_sink(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
u8 val = DP_PSR_ENABLE;
if (crtc_state->has_sel_update) {
@@ -750,7 +751,7 @@ static void _psr_enable_sink(struct intel_dp *intel_dp,
if (intel_dp->psr.link_standby)
val |= DP_PSR_MAIN_LINK_ACTIVE;
- if (DISPLAY_VER(i915) >= 8)
+ if (DISPLAY_VER(display) >= 8)
val |= DP_PSR_CRC_VERIFICATION;
}
@@ -802,14 +803,15 @@ void intel_psr_enable_sink(struct intel_dp *intel_dp,
static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
u32 val = 0;
- if (DISPLAY_VER(dev_priv) >= 11)
+ if (DISPLAY_VER(display) >= 11)
val |= EDP_PSR_TP4_TIME_0us;
- if (dev_priv->display.params.psr_safest_params) {
+ if (display->params.psr_safest_params) {
val |= EDP_PSR_TP1_TIME_2500us;
val |= EDP_PSR_TP2_TP3_TIME_2500us;
goto check_tp3_sel;
@@ -854,8 +856,8 @@ check_tp3_sel:
static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
int idle_frames;
/* Let's use 6 as the minimum to cover all known cases including the
@@ -864,7 +866,7 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
idle_frames = max(6, connector->panel.vbt.psr.idle_frames);
idle_frames = max(idle_frames, intel_dp->psr.sink_sync_latency + 1);
- if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
+ if (drm_WARN_ON(display->drm, idle_frames > 0xf))
idle_frames = 0xf;
return idle_frames;
@@ -872,14 +874,15 @@ static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
static void hsw_activate_psr1(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 max_sleep_time = 0x1f;
u32 val = EDP_PSR_ENABLE;
val |= EDP_PSR_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
- if (DISPLAY_VER(dev_priv) < 20)
+ if (DISPLAY_VER(display) < 20)
val |= EDP_PSR_MAX_SLEEP_TIME(max_sleep_time);
if (IS_HASWELL(dev_priv))
@@ -890,23 +893,23 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
val |= intel_psr1_get_tp_time(intel_dp);
- if (DISPLAY_VER(dev_priv) >= 8)
+ if (DISPLAY_VER(display) >= 8)
val |= EDP_PSR_CRC_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
- intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, psr_ctl_reg(display, cpu_transcoder),
~EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK, val);
}
static u32 intel_psr2_get_tp_time(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector = intel_dp->attached_connector;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val = 0;
- if (dev_priv->display.params.psr_safest_params)
+ if (display->params.psr_safest_params)
return EDP_PSR2_TP2_TIME_2500us;
if (connector->panel.vbt.psr.psr2_tp2_tp3_wakeup_time_us >= 0 &&
@@ -950,7 +953,7 @@ static u8 frames_before_su_entry(struct intel_dp *intel_dp)
static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
@@ -961,38 +964,39 @@ static void dg2_activate_panel_replay(struct intel_dp *intel_dp)
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
val |= EDP_PSR2_SU_SDP_SCANLINE;
- intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder),
val);
}
- intel_de_rmw(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, intel_dp->psr.transcoder),
+ intel_de_rmw(display,
+ PSR2_MAN_TRK_CTL(display, intel_dp->psr.transcoder),
0, ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME);
- intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
+ intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder), 0,
TRANS_DP2_PANEL_REPLAY_ENABLE);
}
static void hsw_activate_psr2(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val = EDP_PSR2_ENABLE;
u32 psr_val = 0;
val |= EDP_PSR2_IDLE_FRAMES(psr_compute_idle_frames(intel_dp));
- if (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))
+ if (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))
val |= EDP_SU_TRACK_ENABLE;
- if (DISPLAY_VER(dev_priv) >= 10 && DISPLAY_VER(dev_priv) < 13)
+ if (DISPLAY_VER(display) >= 10 && DISPLAY_VER(display) < 13)
val |= EDP_Y_COORDINATE_ENABLE;
val |= EDP_PSR2_FRAME_BEFORE_SU(frames_before_su_entry(intel_dp));
val |= intel_psr2_get_tp_time(intel_dp);
- if (DISPLAY_VER(dev_priv) >= 12 && DISPLAY_VER(dev_priv) < 20) {
+ if (DISPLAY_VER(display) >= 12 && DISPLAY_VER(display) < 20) {
if (psr2_block_count(intel_dp) > 2)
val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3;
else
@@ -1000,7 +1004,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
}
/* Wa_22012278275:adl-p */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) {
+ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_E0)) {
static const u8 map[] = {
2, /* 5 lines */
1, /* 6 lines */
@@ -1023,12 +1027,12 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
tmp = map[intel_dp->alpm_parameters.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES];
val |= TGL_EDP_PSR2_FAST_WAKE(tmp + TGL_EDP_PSR2_FAST_WAKE_MIN_LINES);
- } else if (DISPLAY_VER(dev_priv) >= 20) {
+ } else if (DISPLAY_VER(display) >= 20) {
val |= LNL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
- } else if (DISPLAY_VER(dev_priv) >= 12) {
+ } else if (DISPLAY_VER(display) >= 12) {
val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
- } else if (DISPLAY_VER(dev_priv) >= 9) {
+ } else if (DISPLAY_VER(display) >= 9) {
val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->alpm_parameters.io_wake_lines);
val |= EDP_PSR2_FAST_WAKE(intel_dp->alpm_parameters.fast_wake_lines);
}
@@ -1036,18 +1040,18 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
if (intel_dp->psr.req_psr2_sdp_prior_scanline)
val |= EDP_PSR2_SU_SDP_SCANLINE;
- if (DISPLAY_VER(dev_priv) >= 20)
+ if (DISPLAY_VER(display) >= 20)
psr_val |= LNL_EDP_PSR_ENTRY_SETUP_FRAMES(intel_dp->psr.entry_setup_frames);
if (intel_dp->psr.psr2_sel_fetch_enabled) {
u32 tmp;
- tmp = intel_de_read(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder));
- drm_WARN_ON(&dev_priv->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
- } else if (HAS_PSR2_SEL_FETCH(dev_priv)) {
- intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder), 0);
+ tmp = intel_de_read(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder));
+ drm_WARN_ON(display->drm, !(tmp & PSR2_MAN_TRK_CTL_ENABLE));
+ } else if (HAS_PSR2_SEL_FETCH(display)) {
+ intel_de_write(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder), 0);
}
if (intel_dp->psr.su_region_et_enabled)
@@ -1057,19 +1061,21 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- intel_de_write(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder), psr_val);
+ intel_de_write(display, psr_ctl_reg(display, cpu_transcoder), psr_val);
- intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder), val);
+ intel_de_write(display, EDP_PSR2_CTL(display, cpu_transcoder), val);
}
static bool
-transcoder_has_psr2(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
+transcoder_has_psr2(struct intel_display *display, enum transcoder cpu_transcoder)
{
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
return cpu_transcoder == TRANSCODER_A || cpu_transcoder == TRANSCODER_B;
- else if (DISPLAY_VER(dev_priv) >= 12)
+ else if (DISPLAY_VER(display) >= 12)
return cpu_transcoder == TRANSCODER_A;
- else if (DISPLAY_VER(dev_priv) >= 9)
+ else if (DISPLAY_VER(display) >= 9)
return cpu_transcoder == TRANSCODER_EDP;
else
return false;
@@ -1087,17 +1093,18 @@ static u32 intel_get_frame_time_us(const struct intel_crtc_state *crtc_state)
static void psr2_program_idle_frames(struct intel_dp *intel_dp,
u32 idle_frames)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- intel_de_rmw(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, EDP_PSR2_CTL(display, cpu_transcoder),
EDP_PSR2_IDLE_FRAMES_MASK,
EDP_PSR2_IDLE_FRAMES(idle_frames));
}
static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
psr2_program_idle_frames(intel_dp, 0);
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_DC3CO);
@@ -1105,7 +1112,8 @@ static void tgl_psr2_enable_dc3co(struct intel_dp *intel_dp)
static void tgl_psr2_disable_dc3co(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
psr2_program_idle_frames(intel_dp, psr_compute_idle_frames(intel_dp));
@@ -1140,12 +1148,13 @@ static bool
dc3co_is_pipe_port_compatible(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum port port = dig_port->base.port;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
return pipe <= PIPE_B && port <= PORT_B;
else
return pipe == PIPE_A && port == PORT_A;
@@ -1155,9 +1164,10 @@ static void
tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- struct i915_power_domains *power_domains = &dev_priv->display.power.domains;
+ struct i915_power_domains *power_domains = &display->power.domains;
u32 exit_scanlines;
/*
@@ -1181,7 +1191,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
return;
/* Wa_16011303918:adl-p */
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0))
+ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0))
return;
/*
@@ -1191,7 +1201,7 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
exit_scanlines =
intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
- if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
+ if (drm_WARN_ON(display->drm, exit_scanlines > crtc_vdisplay))
return;
crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
@@ -1200,17 +1210,17 @@ tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if (!dev_priv->display.params.enable_psr2_sel_fetch &&
+ if (!display->params.enable_psr2_sel_fetch &&
intel_dp->psr.debug != I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR2 sel fetch not enabled, disabled by parameter\n");
return false;
}
if (crtc_state->uapi.async_flip) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR2 sel fetch not enabled, async flip enabled\n");
return false;
}
@@ -1221,7 +1231,8 @@ static bool intel_psr2_sel_fetch_config_valid(struct intel_dp *intel_dp,
static bool psr2_granularity_check(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
const int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
const int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
@@ -1243,7 +1254,7 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
* For other platforms with SW tracking we can adjust the y coordinates
* to match sink requirement if multiple of 4.
*/
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14)
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14)
y_granularity = intel_dp->psr.su_y_granularity;
else if (intel_dp->psr.su_y_granularity <= 2)
y_granularity = 4;
@@ -1264,8 +1275,8 @@ static bool psr2_granularity_check(struct intel_dp *intel_dp,
static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &crtc_state->uapi.adjusted_mode;
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 hblank_total, hblank_ns, req_ns;
hblank_total = adjusted_mode->crtc_hblank_end - adjusted_mode->crtc_hblank_start;
@@ -1278,7 +1289,7 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
return true;
/* Not supported <13 / Wa_22012279113:adl-p */
- if (DISPLAY_VER(dev_priv) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
+ if (DISPLAY_VER(display) < 14 || intel_dp->edp_dpcd[0] < DP_EDP_14b)
return false;
crtc_state->req_psr2_sdp_prior_scanline = true;
@@ -1288,12 +1299,12 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d
static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
const struct drm_display_mode *adjusted_mode)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
int entry_setup_frames = 0;
if (psr_setup_time < 0) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR condition failed: Invalid PSR setup time (0x%02x)\n",
intel_dp->psr_dpcd[1]);
return -ETIME;
@@ -1301,14 +1312,14 @@ static int intel_psr_entry_setup_frames(struct intel_dp *intel_dp,
if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
- if (DISPLAY_VER(i915) >= 20) {
+ if (DISPLAY_VER(display) >= 20) {
/* setup entry frames can be up to 3 frames */
entry_setup_frames = 1;
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR setup entry frames %d\n",
entry_setup_frames);
} else {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR condition failed: PSR setup time (%d us) too long\n",
psr_setup_time);
return -ETIME;
@@ -1322,7 +1333,7 @@ static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool aux_less)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
int vblank = crtc_state->hw.adjusted_mode.crtc_vblank_end -
crtc_state->hw.adjusted_mode.crtc_vblank_start;
int wake_lines;
@@ -1330,7 +1341,7 @@ static bool wake_lines_fit_into_vblank(struct intel_dp *intel_dp,
if (aux_less)
wake_lines = intel_dp->alpm_parameters.aux_less_wake_lines;
else
- wake_lines = DISPLAY_VER(i915) < 20 ?
+ wake_lines = DISPLAY_VER(display) < 20 ?
psr2_block_count_lines(intel_dp) :
intel_dp->alpm_parameters.io_wake_lines;
@@ -1348,16 +1359,16 @@ static bool alpm_config_valid(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
bool aux_less)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!intel_alpm_compute_params(intel_dp, crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR2/Panel Replay not enabled, Unable to use long enough wake times\n");
return false;
}
if (!wake_lines_fit_into_vblank(intel_dp, crtc_state, aux_less)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"PSR2/Panel Replay not enabled, too short vblank time\n");
return false;
}
@@ -1368,7 +1379,8 @@ static bool alpm_config_valid(struct intel_dp *intel_dp,
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
int crtc_hdisplay = crtc_state->hw.adjusted_mode.crtc_hdisplay;
int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay;
int psr_max_h = 0, psr_max_v = 0, max_bpp = 0;
@@ -1378,24 +1390,26 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
/* JSL and EHL only supports eDP 1.3 */
if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm, "PSR2 not supported by phy\n");
+ drm_dbg_kms(display->drm, "PSR2 not supported by phy\n");
return false;
}
/* Wa_16011181250 */
if (IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) ||
IS_DG2(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm, "PSR2 is defeatured for this platform\n");
+ drm_dbg_kms(display->drm,
+ "PSR2 is defeatured for this platform\n");
return false;
}
- if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
- drm_dbg_kms(&dev_priv->drm, "PSR2 not completely functional in this stepping\n");
+ if (IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ drm_dbg_kms(display->drm,
+ "PSR2 not completely functional in this stepping\n");
return false;
}
- if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
- drm_dbg_kms(&dev_priv->drm,
+ if (!transcoder_has_psr2(display, crtc_state->cpu_transcoder)) {
+ drm_dbg_kms(display->drm,
"PSR2 not supported in transcoder %s\n",
transcoder_name(crtc_state->cpu_transcoder));
return false;
@@ -1407,28 +1421,28 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* over PSR2.
*/
if (crtc_state->dsc.compression_enable &&
- (DISPLAY_VER(dev_priv) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
- drm_dbg_kms(&dev_priv->drm,
+ (DISPLAY_VER(display) < 14 && !IS_ALDERLAKE_P(dev_priv))) {
+ drm_dbg_kms(display->drm,
"PSR2 cannot be enabled since DSC is enabled\n");
return false;
}
- if (DISPLAY_VER(dev_priv) >= 12) {
+ if (DISPLAY_VER(display) >= 12) {
psr_max_h = 5120;
psr_max_v = 3200;
max_bpp = 30;
- } else if (DISPLAY_VER(dev_priv) >= 10) {
+ } else if (DISPLAY_VER(display) >= 10) {
psr_max_h = 4096;
psr_max_v = 2304;
max_bpp = 24;
- } else if (DISPLAY_VER(dev_priv) == 9) {
+ } else if (DISPLAY_VER(display) == 9) {
psr_max_h = 3640;
psr_max_v = 2304;
max_bpp = 24;
}
if (crtc_state->pipe_bpp > max_bpp) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR2 not enabled, pipe bpp %d > max supported %d\n",
crtc_state->pipe_bpp, max_bpp);
return false;
@@ -1436,8 +1450,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
/* Wa_16011303918:adl-p */
if (crtc_state->vrr.enable &&
- IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(dev_priv, STEP_A0, STEP_B0)) {
- drm_dbg_kms(&dev_priv->drm,
+ IS_ALDERLAKE_P(dev_priv) && IS_DISPLAY_STEP(display, STEP_A0, STEP_B0)) {
+ drm_dbg_kms(display->drm,
"PSR2 not enabled, not compatible with HW stepping + VRR\n");
return false;
}
@@ -1447,7 +1461,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
if (!crtc_state->enable_psr2_sel_fetch &&
(crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
crtc_hdisplay, crtc_vdisplay,
psr_max_h, psr_max_v);
@@ -1462,18 +1476,19 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
- if (HAS_PSR2_SEL_FETCH(dev_priv) &&
+ if (HAS_PSR2_SEL_FETCH(display) &&
!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) &&
- !HAS_PSR_HW_TRACKING(dev_priv)) {
- drm_dbg_kms(&dev_priv->drm,
+ !HAS_PSR_HW_TRACKING(display)) {
+ drm_dbg_kms(display->drm,
"Selective update not enabled, selective fetch not valid and no HW tracking available\n");
goto unsupported;
}
if (!psr2_global_enabled(intel_dp)) {
- drm_dbg_kms(&dev_priv->drm, "Selective update disabled by flag\n");
+ drm_dbg_kms(display->drm,
+ "Selective update disabled by flag\n");
goto unsupported;
}
@@ -1481,23 +1496,23 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp,
goto unsupported;
if (!_compute_psr2_sdp_prior_scanline_indication(intel_dp, crtc_state)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Selective update not enabled, SDP indication do not fit in hblank\n");
goto unsupported;
}
- if (crtc_state->has_panel_replay && (DISPLAY_VER(dev_priv) < 14 ||
+ if (crtc_state->has_panel_replay && (DISPLAY_VER(display) < 14 ||
!intel_dp->psr.sink_panel_replay_su_support))
goto unsupported;
if (crtc_state->crc_enabled) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Selective update not enabled because it would inhibit pipe CRC calculation\n");
goto unsupported;
}
if (!psr2_granularity_check(intel_dp, crtc_state)) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Selective update not enabled, SU granularity not compatible\n");
goto unsupported;
}
@@ -1515,7 +1530,7 @@ unsupported:
static bool _psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
int entry_setup_frames;
@@ -1534,7 +1549,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp,
if (entry_setup_frames >= 0) {
intel_dp->psr.entry_setup_frames = entry_setup_frames;
} else {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR condition failed: PSR setup timing not met\n");
return false;
}
@@ -1547,7 +1562,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_connector *connector =
to_intel_connector(conn_state->connector);
struct intel_hdcp *hdcp = &connector->hdcp;
@@ -1556,7 +1571,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return false;
if (!panel_replay_global_enabled(intel_dp)) {
- drm_dbg_kms(&i915->drm, "Panel Replay disabled by flag\n");
+ drm_dbg_kms(display->drm, "Panel Replay disabled by flag\n");
return false;
}
@@ -1567,7 +1582,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
/* 128b/132b Panel Replay is not supported on eDP */
if (intel_dp_is_uhbr(crtc_state)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel Replay is not supported with 128b/132b\n");
return false;
}
@@ -1578,7 +1593,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
(conn_state->content_protection ==
DRM_MODE_CONTENT_PROTECTION_ENABLED && hdcp->value ==
DRM_MODE_CONTENT_PROTECTION_UNDESIRED)) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel Replay is not supported with HDCP\n");
return false;
}
@@ -1587,7 +1602,7 @@ _panel_replay_compute_config(struct intel_dp *intel_dp,
return false;
if (crtc_state->crc_enabled) {
- drm_dbg_kms(&i915->drm,
+ drm_dbg_kms(display->drm,
"Panel Replay not enabled because it would inhibit pipe CRC calculation\n");
return false;
}
@@ -1599,22 +1614,22 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode;
if (!psr_global_enabled(intel_dp)) {
- drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
+ drm_dbg_kms(display->drm, "PSR disabled by flag\n");
return;
}
if (intel_dp->psr.sink_not_reliable) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR sink implementation is not reliable\n");
return;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR condition failed: Interlaced mode enabled\n");
return;
}
@@ -1625,7 +1640,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
* PSR is a transcoder level feature.
*/
if (crtc_state->joiner_pipes) {
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR disabled due to joiner\n");
return;
}
@@ -1646,7 +1661,7 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
void intel_psr_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct intel_display *display = to_intel_display(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
struct intel_dp *intel_dp;
@@ -1679,18 +1694,18 @@ void intel_psr_get_config(struct intel_encoder *encoder,
if (!intel_dp->psr.sel_update_enabled)
goto unlock;
- if (HAS_PSR2_SEL_FETCH(dev_priv)) {
- val = intel_de_read(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder));
+ if (HAS_PSR2_SEL_FETCH(display)) {
+ val = intel_de_read(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder));
if (val & PSR2_MAN_TRK_CTL_ENABLE)
pipe_config->enable_psr2_sel_fetch = true;
}
pipe_config->enable_psr2_su_region_et = intel_dp->psr.su_region_et_enabled;
- if (DISPLAY_VER(dev_priv) >= 12) {
- val = intel_de_read(dev_priv,
- TRANS_EXITLINE(dev_priv, cpu_transcoder));
+ if (DISPLAY_VER(display) >= 12) {
+ val = intel_de_read(display,
+ TRANS_EXITLINE(display, cpu_transcoder));
pipe_config->dc3co_exitline = REG_FIELD_GET(EXITLINE_MASK, val);
}
unlock:
@@ -1699,17 +1714,17 @@ unlock:
static void intel_psr_activate(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
- drm_WARN_ON(&dev_priv->drm,
- transcoder_has_psr2(dev_priv, cpu_transcoder) &&
- intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv, cpu_transcoder)) & EDP_PSR2_ENABLE);
+ drm_WARN_ON(display->drm,
+ transcoder_has_psr2(display, cpu_transcoder) &&
+ intel_de_read(display, EDP_PSR2_CTL(display, cpu_transcoder)) & EDP_PSR2_ENABLE);
- drm_WARN_ON(&dev_priv->drm,
- intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder)) & EDP_PSR_ENABLE);
+ drm_WARN_ON(display->drm,
+ intel_de_read(display, psr_ctl_reg(display, cpu_transcoder)) & EDP_PSR_ENABLE);
- drm_WARN_ON(&dev_priv->drm, intel_dp->psr.active);
+ drm_WARN_ON(display->drm, intel_dp->psr.active);
lockdep_assert_held(&intel_dp->psr.lock);
@@ -1748,30 +1763,31 @@ static u32 wa_16013835468_bit_get(struct intel_dp *intel_dp)
static void wm_optimization_wa(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
bool set_wa_bit = false;
/* Wa_14015648006 */
- if (IS_DISPLAY_VER(dev_priv, 11, 14))
+ if (IS_DISPLAY_VER(display, 11, 14))
set_wa_bit |= crtc_state->wm_level_disabled;
/* Wa_16013835468 */
- if (DISPLAY_VER(dev_priv) == 12)
+ if (DISPLAY_VER(display) == 12)
set_wa_bit |= crtc_state->hw.adjusted_mode.crtc_vblank_start !=
crtc_state->hw.adjusted_mode.crtc_vdisplay;
if (set_wa_bit)
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
0, wa_16013835468_bit_get(intel_dp));
else
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
wa_16013835468_bit_get(intel_dp), 0);
}
static void intel_psr_enable_source(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 mask = 0;
@@ -1779,7 +1795,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* Only HSW and BDW have PSR AUX registers that need to be setup.
* SKL+ use hardcoded values PSR AUX transactions
*/
- if (DISPLAY_VER(dev_priv) < 9)
+ if (DISPLAY_VER(display) < 9)
hsw_psr_setup_aux(intel_dp);
/*
@@ -1796,7 +1812,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* Panel Replay on DP: No bits are applicable
* Panel Replay on eDP: All bits are applicable
*/
- if (DISPLAY_VER(dev_priv) < 20 || intel_dp_is_edp(intel_dp))
+ if (DISPLAY_VER(display) < 20 || intel_dp_is_edp(intel_dp))
mask = EDP_PSR_DEBUG_MASK_HPD;
if (intel_dp_is_edp(intel_dp)) {
@@ -1810,17 +1826,17 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* As a workaround leave LPSP unmasked to prevent PSR entry
* when external displays are active.
*/
- if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
+ if (DISPLAY_VER(display) >= 8 || IS_HASWELL_ULT(dev_priv))
mask |= EDP_PSR_DEBUG_MASK_LPSP;
- if (DISPLAY_VER(dev_priv) < 20)
+ if (DISPLAY_VER(display) < 20)
mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
/*
* No separate pipe reg write mask on hsw/bdw, so have to unmask all
* registers in order to keep the CURSURFLIVE tricks working :(
*/
- if (IS_DISPLAY_VER(dev_priv, 9, 10))
+ if (IS_DISPLAY_VER(display, 9, 10))
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
/* allow PSR with sprite enabled */
@@ -1828,7 +1844,7 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
mask |= EDP_PSR_DEBUG_MASK_SPRITE_ENABLE;
}
- intel_de_write(dev_priv, psr_debug_reg(dev_priv, cpu_transcoder), mask);
+ intel_de_write(display, psr_debug_reg(display, cpu_transcoder), mask);
psr_irq_control(intel_dp);
@@ -1837,13 +1853,13 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* transcoder, EXITLINE will need to be unset when disabling PSR
*/
if (intel_dp->psr.dc3co_exitline)
- intel_de_rmw(dev_priv,
- TRANS_EXITLINE(dev_priv, cpu_transcoder),
+ intel_de_rmw(display,
+ TRANS_EXITLINE(display, cpu_transcoder),
EXITLINE_MASK,
intel_dp->psr.dc3co_exitline << EXITLINE_SHIFT | EXITLINE_ENABLE);
- if (HAS_PSR_HW_TRACKING(dev_priv) && HAS_PSR2_SEL_FETCH(dev_priv))
- intel_de_rmw(dev_priv, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
+ if (HAS_PSR_HW_TRACKING(display) && HAS_PSR2_SEL_FETCH(display))
+ intel_de_rmw(display, CHICKEN_PAR1_1, IGNORE_PSR2_HW_TRACKING,
intel_dp->psr.psr2_sel_fetch_enabled ?
IGNORE_PSR2_HW_TRACKING : 0);
@@ -1857,8 +1873,8 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
wm_optimization_wa(intel_dp, crtc_state);
if (intel_dp->psr.sel_update_enabled) {
- if (DISPLAY_VER(dev_priv) == 9)
- intel_de_rmw(dev_priv, CHICKEN_TRANS(cpu_transcoder), 0,
+ if (DISPLAY_VER(display) == 9)
+ intel_de_rmw(display, CHICKEN_TRANS(cpu_transcoder), 0,
PSR2_VSC_ENABLE_PROG_HEADER |
PSR2_ADD_VERTICAL_LINE_COUNT);
@@ -1868,27 +1884,27 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
* cause issues if non-supported panels are used.
*/
if (!intel_dp->psr.panel_replay_enabled &&
- (IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
+ (IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0) ||
IS_ALDERLAKE_P(dev_priv)))
- intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, hsw_chicken_trans_reg(dev_priv, cpu_transcoder),
0, ADLP_1_BASED_X_GRANULARITY);
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (!intel_dp->psr.panel_replay_enabled &&
- IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv,
- MTL_CLKGATE_DIS_TRANS(dev_priv, cpu_transcoder),
+ IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0))
+ intel_de_rmw(display,
+ MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
0,
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS);
else if (IS_ALDERLAKE_P(dev_priv))
- intel_de_rmw(dev_priv, CLKGATE_DIS_MISC, 0,
+ intel_de_rmw(display, CLKGATE_DIS_MISC, 0,
CLKGATE_DIS_MISC_DMASC_GATING_DIS);
}
}
static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val;
@@ -1903,11 +1919,11 @@ static bool psr_interrupt_error_check(struct intel_dp *intel_dp)
* first time that PSR HW tries to activate so lets keep PSR disabled
* to avoid any rendering problems.
*/
- val = intel_de_read(dev_priv, psr_iir_reg(dev_priv, cpu_transcoder));
+ val = intel_de_read(display, psr_iir_reg(display, cpu_transcoder));
val &= psr_irq_psr_error_bit_get(intel_dp);
if (val) {
intel_dp->psr.sink_not_reliable = true;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR interruption error set, not enabling PSR\n");
return false;
}
@@ -1919,11 +1935,11 @@ no_err:
static void intel_psr_enable_locked(struct intel_dp *intel_dp,
const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val;
- drm_WARN_ON(&dev_priv->drm, intel_dp->psr.enabled);
+ drm_WARN_ON(display->drm, intel_dp->psr.enabled);
intel_dp->psr.sel_update_enabled = crtc_state->has_sel_update;
intel_dp->psr.panel_replay_enabled = crtc_state->has_panel_replay;
@@ -1944,9 +1960,9 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
return;
if (intel_dp->psr.panel_replay_enabled) {
- drm_dbg_kms(&dev_priv->drm, "Enabling Panel Replay\n");
+ drm_dbg_kms(display->drm, "Enabling Panel Replay\n");
} else {
- drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
+ drm_dbg_kms(display->drm, "Enabling PSR%s\n",
intel_dp->psr.sel_update_enabled ? "2" : "1");
/*
@@ -1968,68 +1984,71 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp,
static void intel_psr_exit(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
u32 val;
if (!intel_dp->psr.active) {
- if (transcoder_has_psr2(dev_priv, cpu_transcoder)) {
- val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(dev_priv, cpu_transcoder));
- drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
+ if (transcoder_has_psr2(display, cpu_transcoder)) {
+ val = intel_de_read(display,
+ EDP_PSR2_CTL(display, cpu_transcoder));
+ drm_WARN_ON(display->drm, val & EDP_PSR2_ENABLE);
}
- val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
- drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
+ val = intel_de_read(display,
+ psr_ctl_reg(display, cpu_transcoder));
+ drm_WARN_ON(display->drm, val & EDP_PSR_ENABLE);
return;
}
if (intel_dp->psr.panel_replay_enabled) {
- intel_de_rmw(dev_priv, TRANS_DP2_CTL(intel_dp->psr.transcoder),
+ intel_de_rmw(display, TRANS_DP2_CTL(intel_dp->psr.transcoder),
TRANS_DP2_PANEL_REPLAY_ENABLE, 0);
} else if (intel_dp->psr.sel_update_enabled) {
tgl_disallow_dc3co_on_psr2_exit(intel_dp);
- val = intel_de_rmw(dev_priv,
- EDP_PSR2_CTL(dev_priv, cpu_transcoder),
+ val = intel_de_rmw(display,
+ EDP_PSR2_CTL(display, cpu_transcoder),
EDP_PSR2_ENABLE, 0);
- drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
+ drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE));
} else {
- val = intel_de_rmw(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder),
+ val = intel_de_rmw(display,
+ psr_ctl_reg(display, cpu_transcoder),
EDP_PSR_ENABLE, 0);
- drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
+ drm_WARN_ON(display->drm, !(val & EDP_PSR_ENABLE));
}
intel_dp->psr.active = false;
}
static void intel_psr_wait_exit_locked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
i915_reg_t psr_status;
u32 psr_status_mask;
if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
intel_dp->psr.panel_replay_enabled)) {
- psr_status = EDP_PSR2_STATUS(dev_priv, cpu_transcoder);
+ psr_status = EDP_PSR2_STATUS(display, cpu_transcoder);
psr_status_mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- psr_status = psr_status_reg(dev_priv, cpu_transcoder);
+ psr_status = psr_status_reg(display, cpu_transcoder);
psr_status_mask = EDP_PSR_STATUS_STATE_MASK;
}
/* Wait till PSR is idle */
- if (intel_de_wait_for_clear(dev_priv, psr_status,
+ if (intel_de_wait_for_clear(display, psr_status,
psr_status_mask, 2000))
- drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
+ drm_err(display->drm, "Timed out waiting PSR idle state\n");
}
static void intel_psr_disable_locked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
lockdep_assert_held(&intel_dp->psr.lock);
@@ -2038,9 +2057,9 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
return;
if (intel_dp->psr.panel_replay_enabled)
- drm_dbg_kms(&dev_priv->drm, "Disabling Panel Replay\n");
+ drm_dbg_kms(display->drm, "Disabling Panel Replay\n");
else
- drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
+ drm_dbg_kms(display->drm, "Disabling PSR%s\n",
intel_dp->psr.sel_update_enabled ? "2" : "1");
intel_psr_exit(intel_dp);
@@ -2050,19 +2069,19 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
* Wa_16013835468
* Wa_14015648006
*/
- if (DISPLAY_VER(dev_priv) >= 11)
- intel_de_rmw(dev_priv, GEN8_CHICKEN_DCPR_1,
+ if (DISPLAY_VER(display) >= 11)
+ intel_de_rmw(display, GEN8_CHICKEN_DCPR_1,
wa_16013835468_bit_get(intel_dp), 0);
if (intel_dp->psr.sel_update_enabled) {
/* Wa_16012604467:adlp,mtl[a0,b0] */
if (!intel_dp->psr.panel_replay_enabled &&
- IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0))
- intel_de_rmw(dev_priv,
- MTL_CLKGATE_DIS_TRANS(dev_priv, cpu_transcoder),
+ IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0))
+ intel_de_rmw(display,
+ MTL_CLKGATE_DIS_TRANS(display, cpu_transcoder),
MTL_CLKGATE_DIS_TRANS_DMASC_GATING_DIS, 0);
else if (IS_ALDERLAKE_P(dev_priv))
- intel_de_rmw(dev_priv, CLKGATE_DIS_MISC,
+ intel_de_rmw(display, CLKGATE_DIS_MISC,
CLKGATE_DIS_MISC_DMASC_GATING_DIS, 0);
}
@@ -2071,12 +2090,12 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
/* Panel Replay on eDP is always using ALPM aux less. */
if (intel_dp->psr.panel_replay_enabled && intel_dp_is_edp(intel_dp)) {
- intel_de_rmw(dev_priv, ALPM_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display, ALPM_CTL(display, cpu_transcoder),
ALPM_CTL_ALPM_ENABLE |
ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
- intel_de_rmw(dev_priv,
- PORT_ALPM_CTL(dev_priv, cpu_transcoder),
+ intel_de_rmw(display,
+ PORT_ALPM_CTL(display, cpu_transcoder),
PORT_ALPM_CTL_ALPM_AUX_LESS_ENABLE, 0);
}
@@ -2107,12 +2126,12 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
if (!old_crtc_state->has_psr)
return;
- if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(intel_dp)))
+ if (drm_WARN_ON(display->drm, !CAN_PSR(intel_dp)))
return;
mutex_lock(&intel_dp->psr.lock);
@@ -2132,7 +2151,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
*/
void intel_psr_pause(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
if (!CAN_PSR(intel_dp) && !CAN_PANEL_REPLAY(intel_dp))
@@ -2146,7 +2165,7 @@ void intel_psr_pause(struct intel_dp *intel_dp)
}
/* If we ever hit this, we will need to add refcount to pause/resume */
- drm_WARN_ON(&dev_priv->drm, psr->paused);
+ drm_WARN_ON(display->drm, psr->paused);
intel_psr_exit(intel_dp);
intel_psr_wait_exit_locked(intel_dp);
@@ -2183,45 +2202,53 @@ unlock:
mutex_unlock(&psr->lock);
}
-static u32 man_trk_ctl_enable_bit_get(struct drm_i915_private *dev_priv)
+static u32 man_trk_ctl_enable_bit_get(struct intel_display *display)
{
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ? 0 :
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ? 0 :
PSR2_MAN_TRK_CTL_ENABLE;
}
-static u32 man_trk_ctl_single_full_frame_bit_get(struct drm_i915_private *dev_priv)
+static u32 man_trk_ctl_single_full_frame_bit_get(struct intel_display *display)
{
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_SINGLE_FULL_FRAME;
}
-static u32 man_trk_ctl_partial_frame_bit_get(struct drm_i915_private *dev_priv)
+static u32 man_trk_ctl_partial_frame_bit_get(struct intel_display *display)
{
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE :
PSR2_MAN_TRK_CTL_SF_PARTIAL_FRAME_UPDATE;
}
-static u32 man_trk_ctl_continuos_full_frame(struct drm_i915_private *dev_priv)
+static u32 man_trk_ctl_continuos_full_frame(struct intel_display *display)
{
- return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14 ?
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
+
+ return IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14 ?
ADLP_PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME :
PSR2_MAN_TRK_CTL_SF_CONTINUOS_FULL_FRAME;
}
static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
if (intel_dp->psr.psr2_sel_fetch_enabled)
- intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
- man_trk_ctl_enable_bit_get(dev_priv) |
- man_trk_ctl_partial_frame_bit_get(dev_priv) |
- man_trk_ctl_single_full_frame_bit_get(dev_priv) |
- man_trk_ctl_continuos_full_frame(dev_priv));
+ intel_de_write(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder),
+ man_trk_ctl_enable_bit_get(display) |
+ man_trk_ctl_partial_frame_bit_get(display) |
+ man_trk_ctl_single_full_frame_bit_get(display) |
+ man_trk_ctl_continuos_full_frame(display));
/*
* Display WA #0884: skl+
@@ -2236,20 +2263,20 @@ static void psr_force_hw_tracking_exit(struct intel_dp *intel_dp)
* but testing proved that it works for up display 13, for newer
* than that testing will be needed.
*/
- intel_de_write(dev_priv, CURSURFLIVE(dev_priv, intel_dp->psr.pipe), 0);
+ intel_de_write(display, CURSURFLIVE(display, intel_dp->psr.pipe), 0);
}
void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
struct intel_encoder *encoder;
if (!crtc_state->enable_psr2_sel_fetch)
return;
- for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
+ for_each_intel_encoder_mask_with_psr(display->drm, encoder,
crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -2259,36 +2286,37 @@ void intel_psr2_program_trans_man_trk_ctl(const struct intel_crtc_state *crtc_st
break;
}
- intel_de_write(dev_priv, PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display, PSR2_MAN_TRK_CTL(display, cpu_transcoder),
crtc_state->psr2_man_track_ctl);
if (!crtc_state->enable_psr2_su_region_et)
return;
- intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
+ intel_de_write(display, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
crtc_state->pipe_srcsz_early_tpt);
}
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
bool full_update)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 val = man_trk_ctl_enable_bit_get(dev_priv);
+ u32 val = man_trk_ctl_enable_bit_get(display);
/* SF partial frame enable has to be set even on full update */
- val |= man_trk_ctl_partial_frame_bit_get(dev_priv);
+ val |= man_trk_ctl_partial_frame_bit_get(display);
if (full_update) {
- val |= man_trk_ctl_single_full_frame_bit_get(dev_priv);
- val |= man_trk_ctl_continuos_full_frame(dev_priv);
+ val |= man_trk_ctl_single_full_frame_bit_get(display);
+ val |= man_trk_ctl_continuos_full_frame(display);
goto exit;
}
if (crtc_state->psr2_su_area.y1 == -1)
goto exit;
- if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14) {
+ if (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14) {
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_START_ADDR(crtc_state->psr2_su_area.y1);
val |= ADLP_PSR2_MAN_TRK_CTL_SU_REGION_END_ADDR(crtc_state->psr2_su_area.y2 - 1);
} else {
@@ -2341,13 +2369,14 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
{
+ struct intel_display *display = to_intel_display(crtc_state);
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
u16 y_alignment;
/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
if (crtc_state->dsc.compression_enable &&
- (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(dev_priv) >= 14))
+ (IS_ALDERLAKE_P(dev_priv) || DISPLAY_VER(display) >= 14))
y_alignment = vdsc_cfg->slice_height;
else
y_alignment = crtc_state->su_y_granularity;
@@ -2435,6 +2464,7 @@ static bool psr2_sel_fetch_pipe_state_supported(const struct intel_crtc_state *c
int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane_state *new_plane_state, *old_plane_state;
@@ -2531,7 +2561,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
* calculation for those.
*/
if (crtc_state->psr2_su_area.y1 == -1) {
- drm_info_once(&dev_priv->drm,
+ drm_info_once(display->drm,
"Selective fetch area calculation failed in pipe %c\n",
pipe_name(crtc->pipe));
full_update = true;
@@ -2542,7 +2572,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
/* Wa_14014971492 */
if (!crtc_state->has_panel_replay &&
- ((IS_DISPLAY_VER_STEP(dev_priv, IP_VER(14, 0), STEP_A0, STEP_B0) ||
+ ((IS_DISPLAY_VER_STEP(display, IP_VER(14, 0), STEP_A0, STEP_B0) ||
IS_ALDERLAKE_P(dev_priv) || IS_TIGERLAKE(dev_priv))) &&
crtc_state->splitter.enable)
crtc_state->psr2_su_area.y1 = 0;
@@ -2628,6 +2658,7 @@ skip_sel_fetch_set_loop:
void intel_psr_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
+ struct intel_display *display = to_intel_display(state);
struct drm_i915_private *i915 = to_i915(state->base.dev);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
@@ -2635,7 +2666,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
- if (!HAS_PSR(i915))
+ if (!HAS_PSR(display))
return;
for_each_intel_encoder_mask_with_psr(state->base.dev, encoder,
@@ -2676,7 +2707,7 @@ void intel_psr_pre_plane_update(struct intel_atomic_state *state,
void intel_psr_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_display *display = to_intel_display(state);
const struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_encoder *encoder;
@@ -2692,13 +2723,14 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
mutex_lock(&psr->lock);
- drm_WARN_ON(&dev_priv->drm, psr->enabled && !crtc_state->active_planes);
+ drm_WARN_ON(display->drm,
+ psr->enabled && !crtc_state->active_planes);
keep_disabled |= psr->sink_not_reliable;
keep_disabled |= !crtc_state->active_planes;
/* Display WA #1136: skl, bxt */
- keep_disabled |= DISPLAY_VER(dev_priv) < 11 &&
+ keep_disabled |= DISPLAY_VER(display) < 11 &&
crtc_state->wm_level_disabled;
if (!psr->enabled && !keep_disabled)
@@ -2723,7 +2755,7 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
/*
@@ -2731,14 +2763,14 @@ static int _psr2_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
* As all higher states has bit 4 of PSR2 state set we can just wait for
* EDP_PSR2_STATUS_STATE_DEEP_SLEEP to be cleared.
*/
- return intel_de_wait_for_clear(dev_priv,
- EDP_PSR2_STATUS(dev_priv, cpu_transcoder),
+ return intel_de_wait_for_clear(display,
+ EDP_PSR2_STATUS(display, cpu_transcoder),
EDP_PSR2_STATUS_STATE_DEEP_SLEEP, 50);
}
static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
/*
@@ -2747,8 +2779,8 @@ static int _psr1_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
* exit training time + 1.5 ms of aux channel handshake. 50 ms is
* defensive enough to cover everything.
*/
- return intel_de_wait_for_clear(dev_priv,
- psr_status_reg(dev_priv, cpu_transcoder),
+ return intel_de_wait_for_clear(display,
+ psr_status_reg(display, cpu_transcoder),
EDP_PSR_STATUS_STATE_MASK, 50);
}
@@ -2768,13 +2800,13 @@ static int _panel_replay_ready_for_pipe_update_locked(struct intel_dp *intel_dp)
*/
void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_state)
{
- struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(new_crtc_state);
struct intel_encoder *encoder;
if (!new_crtc_state->has_psr)
return;
- for_each_intel_encoder_mask_with_psr(&dev_priv->drm, encoder,
+ for_each_intel_encoder_mask_with_psr(display->drm, encoder,
new_crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
int ret;
@@ -2792,13 +2824,14 @@ void intel_psr_wait_for_idle_locked(const struct intel_crtc_state *new_crtc_stat
ret = _psr1_ready_for_pipe_update_locked(intel_dp);
if (ret)
- drm_err(&dev_priv->drm, "PSR wait timed out, atomic update may fail\n");
+ drm_err(display->drm,
+ "PSR wait timed out, atomic update may fail\n");
}
}
static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
i915_reg_t reg;
u32 mask;
@@ -2809,18 +2842,18 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
if (intel_dp_is_edp(intel_dp) && (intel_dp->psr.sel_update_enabled ||
intel_dp->psr.panel_replay_enabled)) {
- reg = EDP_PSR2_STATUS(dev_priv, cpu_transcoder);
+ reg = EDP_PSR2_STATUS(display, cpu_transcoder);
mask = EDP_PSR2_STATUS_STATE_MASK;
} else {
- reg = psr_status_reg(dev_priv, cpu_transcoder);
+ reg = psr_status_reg(display, cpu_transcoder);
mask = EDP_PSR_STATUS_STATE_MASK;
}
mutex_unlock(&intel_dp->psr.lock);
- err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
+ err = intel_de_wait_for_clear(display, reg, mask, 50);
if (err)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Timed out waiting for PSR Idle for re-enable\n");
/* After the unlocked wait, verify that PSR is still wanted! */
@@ -2828,7 +2861,7 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp)
return err == 0 && intel_dp->psr.enabled;
}
-static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
+static int intel_psr_fastset_force(struct intel_display *display)
{
struct drm_connector_list_iter conn_iter;
struct drm_modeset_acquire_ctx ctx;
@@ -2836,7 +2869,7 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
struct drm_connector *conn;
int err = 0;
- state = drm_atomic_state_alloc(&dev_priv->drm);
+ state = drm_atomic_state_alloc(display->drm);
if (!state)
return -ENOMEM;
@@ -2846,7 +2879,7 @@ static int intel_psr_fastset_force(struct drm_i915_private *dev_priv)
to_intel_atomic_state(state)->internal = true;
retry:
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_connector_list_iter_begin(display->drm, &conn_iter);
drm_for_each_connector_iter(conn, &conn_iter) {
struct drm_connector_state *conn_state;
struct drm_crtc_state *crtc_state;
@@ -2893,7 +2926,7 @@ retry:
int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
const u32 mode = val & I915_PSR_DEBUG_MODE_MASK;
const u32 disable_bits = val & (I915_PSR_DEBUG_SU_REGION_ET_DISABLE |
I915_PSR_DEBUG_PANEL_REPLAY_DISABLE);
@@ -2904,7 +2937,7 @@ int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
I915_PSR_DEBUG_PANEL_REPLAY_DISABLE |
I915_PSR_DEBUG_MODE_MASK) ||
mode > I915_PSR_DEBUG_ENABLE_SEL_FETCH) {
- drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
+ drm_dbg_kms(display->drm, "Invalid debug mask %llx\n", val);
return -EINVAL;
}
@@ -2929,7 +2962,7 @@ int intel_psr_debug_set(struct intel_dp *intel_dp, u64 val)
mutex_unlock(&intel_dp->psr.lock);
if (old_mode != mode || old_disable_bits != disable_bits)
- ret = intel_psr_fastset_force(dev_priv);
+ ret = intel_psr_fastset_force(display);
return ret;
}
@@ -2981,7 +3014,7 @@ unlock:
static void _psr_invalidate_handle(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
if (intel_dp->psr.psr2_sel_fetch_enabled) {
@@ -2989,20 +3022,20 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp)
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* Send one update otherwise lag is observed in screen */
- intel_de_write(dev_priv,
- CURSURFLIVE(dev_priv, intel_dp->psr.pipe),
+ intel_de_write(display,
+ CURSURFLIVE(display, intel_dp->psr.pipe),
0);
return;
}
- val = man_trk_ctl_enable_bit_get(dev_priv) |
- man_trk_ctl_partial_frame_bit_get(dev_priv) |
- man_trk_ctl_continuos_full_frame(dev_priv);
- intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
+ val = man_trk_ctl_enable_bit_get(display) |
+ man_trk_ctl_partial_frame_bit_get(display) |
+ man_trk_ctl_continuos_full_frame(display);
+ intel_de_write(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder),
val);
- intel_de_write(dev_priv,
- CURSURFLIVE(dev_priv, intel_dp->psr.pipe), 0);
+ intel_de_write(display,
+ CURSURFLIVE(display, intel_dp->psr.pipe), 0);
intel_dp->psr.psr2_sel_fetch_cff_enabled = true;
} else {
intel_psr_exit(intel_dp);
@@ -3011,7 +3044,7 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp)
/**
* intel_psr_invalidate - Invalidate PSR
- * @dev_priv: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the invalidate
*
@@ -3022,7 +3055,7 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp)
*
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits."
*/
-void intel_psr_invalidate(struct drm_i915_private *dev_priv,
+void intel_psr_invalidate(struct intel_display *display,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
struct intel_encoder *encoder;
@@ -3030,7 +3063,7 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
if (origin == ORIGIN_FLIP)
return;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -3060,7 +3093,8 @@ static void
tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
enum fb_op_origin origin)
{
- struct drm_i915_private *i915 = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *i915 = to_i915(display->drm);
if (!intel_dp->psr.dc3co_exitline || !intel_dp->psr.sel_update_enabled ||
!intel_dp->psr.active)
@@ -3081,17 +3115,18 @@ tgl_dc3co_flush_locked(struct intel_dp *intel_dp, unsigned int frontbuffer_bits,
static void _psr_flush_handle(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
if (intel_dp->psr.psr2_sel_fetch_enabled) {
if (intel_dp->psr.psr2_sel_fetch_cff_enabled) {
/* can we turn CFF off? */
if (intel_dp->psr.busy_frontbuffer_bits == 0) {
- u32 val = man_trk_ctl_enable_bit_get(dev_priv) |
- man_trk_ctl_partial_frame_bit_get(dev_priv) |
- man_trk_ctl_single_full_frame_bit_get(dev_priv) |
- man_trk_ctl_continuos_full_frame(dev_priv);
+ u32 val = man_trk_ctl_enable_bit_get(display) |
+ man_trk_ctl_partial_frame_bit_get(display) |
+ man_trk_ctl_single_full_frame_bit_get(display) |
+ man_trk_ctl_continuos_full_frame(display);
/*
* Set psr2_sel_fetch_cff_enabled as false to allow selective
@@ -3099,11 +3134,11 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
* SU configuration in case update is sent for any reason after
* sff bit gets cleared by the HW on next vblank.
*/
- intel_de_write(dev_priv,
- PSR2_MAN_TRK_CTL(dev_priv, cpu_transcoder),
+ intel_de_write(display,
+ PSR2_MAN_TRK_CTL(display, cpu_transcoder),
val);
- intel_de_write(dev_priv,
- CURSURFLIVE(dev_priv, intel_dp->psr.pipe),
+ intel_de_write(display,
+ CURSURFLIVE(display, intel_dp->psr.pipe),
0);
intel_dp->psr.psr2_sel_fetch_cff_enabled = false;
}
@@ -3124,7 +3159,7 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
/**
* intel_psr_flush - Flush PSR
- * @dev_priv: i915 device
+ * @display: display device
* @frontbuffer_bits: frontbuffer plane tracking bits
* @origin: which operation caused the flush
*
@@ -3135,12 +3170,12 @@ static void _psr_flush_handle(struct intel_dp *intel_dp)
*
* Dirty frontbuffers relevant to PSR are tracked in busy_frontbuffer_bits.
*/
-void intel_psr_flush(struct drm_i915_private *dev_priv,
+void intel_psr_flush(struct intel_display *display,
unsigned frontbuffer_bits, enum fb_op_origin origin)
{
struct intel_encoder *encoder;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
unsigned int pipe_frontbuffer_bits = frontbuffer_bits;
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -3189,11 +3224,12 @@ unlock:
*/
void intel_psr_init(struct intel_dp *intel_dp)
{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_connector *connector = intel_dp->attached_connector;
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (!(HAS_PSR(dev_priv) || HAS_DP20(dev_priv)))
+ if (!(HAS_PSR(display) || HAS_DP20(dev_priv)))
return;
/*
@@ -3205,21 +3241,21 @@ void intel_psr_init(struct intel_dp *intel_dp)
* So lets keep it hardcoded to PORT_A for BDW, GEN9 and GEN11.
* But GEN12 supports a instance of PSR registers per transcoder.
*/
- if (DISPLAY_VER(dev_priv) < 12 && dig_port->base.port != PORT_A) {
- drm_dbg_kms(&dev_priv->drm,
+ if (DISPLAY_VER(display) < 12 && dig_port->base.port != PORT_A) {
+ drm_dbg_kms(display->drm,
"PSR condition failed: Port not supported\n");
return;
}
if ((HAS_DP20(dev_priv) && !intel_dp_is_edp(intel_dp)) ||
- DISPLAY_VER(dev_priv) >= 20)
+ DISPLAY_VER(display) >= 20)
intel_dp->psr.source_panel_replay_support = true;
- if (HAS_PSR(dev_priv) && intel_dp_is_edp(intel_dp))
+ if (HAS_PSR(display) && intel_dp_is_edp(intel_dp))
intel_dp->psr.source_support = true;
/* Set link_standby x link_off defaults */
- if (DISPLAY_VER(dev_priv) < 12)
+ if (DISPLAY_VER(display) < 12)
/* For new platforms up to TGL let's respect VBT back again */
intel_dp->psr.link_standby = connector->panel.vbt.psr.full_link;
@@ -3256,7 +3292,7 @@ static int psr_get_status_and_error_status(struct intel_dp *intel_dp,
static void psr_alpm_check(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct drm_dp_aux *aux = &intel_dp->aux;
struct intel_psr *psr = &intel_dp->psr;
u8 val;
@@ -3267,14 +3303,14 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
if (r != 1) {
- drm_err(&dev_priv->drm, "Error reading ALPM status\n");
+ drm_err(display->drm, "Error reading ALPM status\n");
return;
}
if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"ALPM lock timeout error, disabling PSR\n");
/* Clearing error */
@@ -3284,21 +3320,21 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
static void psr_capability_changed_check(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
u8 val;
int r;
r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
if (r != 1) {
- drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
+ drm_err(display->drm, "Error reading DP_PSR_ESI\n");
return;
}
if (val & DP_PSR_CAPS_CHANGE) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"Sink PSR capability changed, disabling PSR\n");
/* Clearing it */
@@ -3315,7 +3351,7 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp)
*/
void intel_psr_short_pulse(struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
struct intel_psr *psr = &intel_dp->psr;
u8 status, error_status;
const u8 errors = DP_PSR_RFB_STORAGE_ERROR |
@@ -3331,7 +3367,7 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
goto exit;
if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"Error reading PSR status or error status\n");
goto exit;
}
@@ -3344,20 +3380,20 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
if (!psr->panel_replay_enabled && status == DP_PSR_SINK_INTERNAL_ERROR &&
!error_status)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR sink internal error, disabling PSR\n");
if (error_status & DP_PSR_RFB_STORAGE_ERROR)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR RFB storage error, disabling PSR\n");
if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR VSC SDP uncorrectable error, disabling PSR\n");
if (error_status & DP_PSR_LINK_CRC_ERROR)
- drm_dbg_kms(&dev_priv->drm,
+ drm_dbg_kms(display->drm,
"PSR Link CRC error, disabling PSR\n");
if (error_status & ~errors)
- drm_err(&dev_priv->drm,
+ drm_err(display->drm,
"PSR_ERROR_STATUS unhandled errors %x\n",
error_status & ~errors);
/* clear status register */
@@ -3396,13 +3432,13 @@ bool intel_psr_enabled(struct intel_dp *intel_dp)
*/
void intel_psr_lock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_encoder *encoder;
if (!crtc_state->has_psr)
return;
- for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
+ for_each_intel_encoder_mask_with_psr(display->drm, encoder,
crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -3419,13 +3455,13 @@ void intel_psr_lock(const struct intel_crtc_state *crtc_state)
*/
void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
{
- struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+ struct intel_display *display = to_intel_display(crtc_state);
struct intel_encoder *encoder;
if (!crtc_state->has_psr)
return;
- for_each_intel_encoder_mask_with_psr(&i915->drm, encoder,
+ for_each_intel_encoder_mask_with_psr(display->drm, encoder,
crtc_state->uapi.encoder_mask) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
@@ -3437,7 +3473,7 @@ void intel_psr_unlock(const struct intel_crtc_state *crtc_state)
static void
psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
const char *status = "unknown";
u32 val, status_val;
@@ -3457,8 +3493,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
"BUF_ON",
"TG_ON"
};
- val = intel_de_read(dev_priv,
- EDP_PSR2_STATUS(dev_priv, cpu_transcoder));
+ val = intel_de_read(display,
+ EDP_PSR2_STATUS(display, cpu_transcoder));
status_val = REG_FIELD_GET(EDP_PSR2_STATUS_STATE_MASK, val);
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
@@ -3473,7 +3509,8 @@ psr_source_status(struct intel_dp *intel_dp, struct seq_file *m)
"SRDOFFACK",
"SRDENT_ON",
};
- val = intel_de_read(dev_priv, psr_status_reg(dev_priv, cpu_transcoder));
+ val = intel_de_read(display,
+ psr_status_reg(display, cpu_transcoder));
status_val = REG_FIELD_GET(EDP_PSR_STATUS_STATE_MASK, val);
if (status_val < ARRAY_SIZE(live_status))
status = live_status[status_val];
@@ -3534,7 +3571,8 @@ static void intel_psr_print_mode(struct intel_dp *intel_dp,
static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
{
- struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
enum transcoder cpu_transcoder = intel_dp->psr.transcoder;
struct intel_psr *psr = &intel_dp->psr;
intel_wakeref_t wakeref;
@@ -3559,20 +3597,20 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
}
if (psr->panel_replay_enabled) {
- val = intel_de_read(dev_priv, TRANS_DP2_CTL(cpu_transcoder));
+ val = intel_de_read(display, TRANS_DP2_CTL(cpu_transcoder));
if (intel_dp_is_edp(intel_dp))
- psr2_ctl = intel_de_read(dev_priv,
- EDP_PSR2_CTL(dev_priv,
+ psr2_ctl = intel_de_read(display,
+ EDP_PSR2_CTL(display,
cpu_transcoder));
enabled = val & TRANS_DP2_PANEL_REPLAY_ENABLE;
} else if (psr->sel_update_enabled) {
- val = intel_de_read(dev_priv,
- EDP_PSR2_CTL(dev_priv, cpu_transcoder));
+ val = intel_de_read(display,
+ EDP_PSR2_CTL(display, cpu_transcoder));
enabled = val & EDP_PSR2_ENABLE;
} else {
- val = intel_de_read(dev_priv, psr_ctl_reg(dev_priv, cpu_transcoder));
+ val = intel_de_read(display, psr_ctl_reg(display, cpu_transcoder));
enabled = val & EDP_PSR_ENABLE;
}
seq_printf(m, "Source PSR/PanelReplay ctl: %s [0x%08x]\n",
@@ -3587,7 +3625,7 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
/*
* SKL+ Perf counter is reset to 0 everytime DC state is entered
*/
- val = intel_de_read(dev_priv, psr_perf_cnt_reg(dev_priv, cpu_transcoder));
+ val = intel_de_read(display, psr_perf_cnt_reg(display, cpu_transcoder));
seq_printf(m, "Performance counter: %u\n",
REG_FIELD_GET(EDP_PSR_PERF_CNT_MASK, val));
@@ -3606,8 +3644,8 @@ static int intel_psr_status(struct seq_file *m, struct intel_dp *intel_dp)
* frame boundary between register reads
*/
for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
- val = intel_de_read(dev_priv,
- PSR2_SU_STATUS(dev_priv, cpu_transcoder, frame));
+ val = intel_de_read(display,
+ PSR2_SU_STATUS(display, cpu_transcoder, frame));
su_frames_val[frame / 3] = val;
}
@@ -3635,15 +3673,15 @@ unlock:
static int i915_edp_psr_status_show(struct seq_file *m, void *data)
{
- struct drm_i915_private *dev_priv = m->private;
+ struct intel_display *display = m->private;
struct intel_dp *intel_dp = NULL;
struct intel_encoder *encoder;
- if (!HAS_PSR(dev_priv))
+ if (!HAS_PSR(display))
return -ENODEV;
/* Find the first EDP which supports PSR */
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
intel_dp = enc_to_intel_dp(encoder);
break;
}
@@ -3658,18 +3696,19 @@ DEFINE_SHOW_ATTRIBUTE(i915_edp_psr_status);
static int
i915_edp_psr_debug_set(void *data, u64 val)
{
- struct drm_i915_private *dev_priv = data;
+ struct intel_display *display = data;
+ struct drm_i915_private *dev_priv = to_i915(display->drm);
struct intel_encoder *encoder;
intel_wakeref_t wakeref;
int ret = -ENODEV;
- if (!HAS_PSR(dev_priv))
+ if (!HAS_PSR(display))
return ret;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
+ drm_dbg_kms(display->drm, "Setting PSR debug to %llx\n", val);
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
@@ -3685,13 +3724,13 @@ i915_edp_psr_debug_set(void *data, u64 val)
static int
i915_edp_psr_debug_get(void *data, u64 *val)
{
- struct drm_i915_private *dev_priv = data;
+ struct intel_display *display = data;
struct intel_encoder *encoder;
- if (!HAS_PSR(dev_priv))
+ if (!HAS_PSR(display))
return -ENODEV;
- for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) {
+ for_each_intel_encoder_with_psr(display->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
// TODO: split to each transcoder's PSR debug state
@@ -3706,15 +3745,15 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
i915_edp_psr_debug_get, i915_edp_psr_debug_set,
"%llu\n");
-void intel_psr_debugfs_register(struct drm_i915_private *i915)
+void intel_psr_debugfs_register(struct intel_display *display)
{
- struct drm_minor *minor = i915->drm.primary;
+ struct drm_minor *minor = display->drm->primary;
debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root,
- i915, &i915_edp_psr_debug_fops);
+ display, &i915_edp_psr_debug_fops);
debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root,
- i915, &i915_edp_psr_status_fops);
+ display, &i915_edp_psr_status_fops);
}
static const char *psr_mode_str(struct intel_dp *intel_dp)
@@ -3795,6 +3834,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_psr_status);
void intel_psr_connector_debugfs_add(struct intel_connector *connector)
{
+ struct intel_display *display = to_intel_display(connector);
struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct dentry *root = connector->base.debugfs_entry;
@@ -3807,7 +3847,7 @@ void intel_psr_connector_debugfs_add(struct intel_connector *connector)
debugfs_create_file("i915_psr_sink_status", 0444, root,
connector, &i915_psr_sink_status_fops);
- if (HAS_PSR(i915) || HAS_DP20(i915))
+ if (HAS_PSR(display) || HAS_DP20(i915))
debugfs_create_file("i915_psr_status", 0444, root,
connector, &i915_psr_status_fops);
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index d483c85870e1..4e09c10908e4 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -11,11 +11,11 @@
enum fb_op_origin;
struct drm_connector;
struct drm_connector_state;
-struct drm_i915_private;
struct intel_atomic_state;
struct intel_connector;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_display;
struct intel_dp;
struct intel_encoder;
struct intel_plane;
@@ -35,10 +35,10 @@ void intel_psr_post_plane_update(struct intel_atomic_state *state,
void intel_psr_disable(struct intel_dp *intel_dp,
const struct intel_crtc_state *old_crtc_state);
int intel_psr_debug_set(struct intel_dp *intel_dp, u64 value);
-void intel_psr_invalidate(struct drm_i915_private *dev_priv,
+void intel_psr_invalidate(struct intel_display *display,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
-void intel_psr_flush(struct drm_i915_private *dev_priv,
+void intel_psr_flush(struct intel_display *display,
unsigned frontbuffer_bits,
enum fb_op_origin origin);
void intel_psr_init(struct intel_dp *intel_dp);
@@ -60,6 +60,6 @@ void intel_psr_resume(struct intel_dp *intel_dp);
void intel_psr_lock(const struct intel_crtc_state *crtc_state);
void intel_psr_unlock(const struct intel_crtc_state *crtc_state);
void intel_psr_connector_debugfs_add(struct intel_connector *connector);
-void intel_psr_debugfs_register(struct drm_i915_private *i915);
+void intel_psr_debugfs_register(struct intel_display *display);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 14d5fefc9c5b..29b56d53a340 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -14,6 +14,11 @@ static void intel_set_quirk(struct intel_display *display, enum intel_quirk_id q
display->quirks.mask |= BIT(quirk);
}
+static void intel_set_dpcd_quirk(struct intel_dp *intel_dp, enum intel_quirk_id quirk)
+{
+ intel_dp->quirks.mask |= BIT(quirk);
+}
+
/*
* Some machines (Lenovo U160) do not work with SSC on LVDS for some reason
*/
@@ -65,6 +70,14 @@ static void quirk_no_pps_backlight_power_hook(struct intel_display *display)
drm_info(display->drm, "Applying no pps backlight power quirk\n");
}
+static void quirk_fw_sync_len(struct intel_dp *intel_dp)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+
+ intel_set_dpcd_quirk(intel_dp, QUIRK_FW_SYNC_LEN);
+ drm_info(display->drm, "Applying Fast Wake sync pulse count quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -72,6 +85,21 @@ struct intel_quirk {
void (*hook)(struct intel_display *display);
};
+struct intel_dpcd_quirk {
+ int device;
+ int subsystem_vendor;
+ int subsystem_device;
+ u8 sink_oui[3];
+ u8 sink_device_id[6];
+ void (*hook)(struct intel_dp *intel_dp);
+};
+
+#define SINK_OUI(first, second, third) { (first), (second), (third) }
+#define SINK_DEVICE_ID(first, second, third, fourth, fifth, sixth) \
+ { (first), (second), (third), (fourth), (fifth), (sixth) }
+
+#define SINK_DEVICE_ID_ANY SINK_DEVICE_ID(0, 0, 0, 0, 0, 0)
+
/* For systems that don't have a meaningful PCI subdevice/subvendor ID */
struct intel_dmi_quirk {
void (*hook)(struct intel_display *display);
@@ -203,6 +231,18 @@ static struct intel_quirk intel_quirks[] = {
{ 0x0f31, 0x103c, 0x220f, quirk_invert_brightness },
};
+static struct intel_dpcd_quirk intel_dpcd_quirks[] = {
+ /* Dell Precision 5490 */
+ {
+ .device = 0x7d55,
+ .subsystem_vendor = 0x1028,
+ .subsystem_device = 0x0cc7,
+ .sink_oui = SINK_OUI(0x38, 0xec, 0x11),
+ .hook = quirk_fw_sync_len,
+ },
+
+};
+
void intel_init_quirks(struct intel_display *display)
{
struct pci_dev *d = to_pci_dev(display->drm->dev);
@@ -224,7 +264,35 @@ void intel_init_quirks(struct intel_display *display)
}
}
+void intel_init_dpcd_quirks(struct intel_dp *intel_dp,
+ const struct drm_dp_dpcd_ident *ident)
+{
+ struct intel_display *display = to_intel_display(intel_dp);
+ struct pci_dev *d = to_pci_dev(display->drm->dev);
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_dpcd_quirks); i++) {
+ struct intel_dpcd_quirk *q = &intel_dpcd_quirks[i];
+
+ if (d->device == q->device &&
+ (d->subsystem_vendor == q->subsystem_vendor ||
+ q->subsystem_vendor == PCI_ANY_ID) &&
+ (d->subsystem_device == q->subsystem_device ||
+ q->subsystem_device == PCI_ANY_ID) &&
+ !memcmp(q->sink_oui, ident->oui, sizeof(ident->oui)) &&
+ (!memcmp(q->sink_device_id, ident->device_id,
+ sizeof(ident->device_id)) ||
+ mem_is_zero(q->sink_device_id, sizeof(q->sink_device_id))))
+ q->hook(intel_dp);
+ }
+}
+
bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk)
{
return display->quirks.mask & BIT(quirk);
}
+
+bool intel_has_dpcd_quirk(struct intel_dp *intel_dp, enum intel_quirk_id quirk)
+{
+ return intel_dp->quirks.mask & BIT(quirk);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h
index 151c8f4ae576..cafdebda7535 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.h
+++ b/drivers/gpu/drm/i915/display/intel_quirks.h
@@ -9,6 +9,8 @@
#include <linux/types.h>
struct intel_display;
+struct intel_dp;
+struct drm_dp_dpcd_ident;
enum intel_quirk_id {
QUIRK_BACKLIGHT_PRESENT,
@@ -17,9 +19,13 @@ enum intel_quirk_id {
QUIRK_INVERT_BRIGHTNESS,
QUIRK_LVDS_SSC_DISABLE,
QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK,
+ QUIRK_FW_SYNC_LEN,
};
void intel_init_quirks(struct intel_display *display);
+void intel_init_dpcd_quirks(struct intel_dp *intel_dp,
+ const struct drm_dp_dpcd_ident *ident);
bool intel_has_quirk(struct intel_display *display, enum intel_quirk_id quirk);
+bool intel_has_dpcd_quirk(struct intel_dp *intel_dp, enum intel_quirk_id quirk);
#endif /* __INTEL_QUIRKS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index e613288937e4..42022756bbd5 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -1080,6 +1080,8 @@ struct bdb_edp {
u16 edp_fast_link_training_rate[16]; /* 224+ */
u16 edp_max_port_link_rate[16]; /* 244+ */
u16 edp_dsc_disable; /* 251+ */
+ u16 t6_delay_support; /* 260+ */
+ u16 link_idle_time[16]; /* 260+ */
} __packed;
/*
@@ -1321,7 +1323,7 @@ struct als_data_entry {
} __packed;
struct aggressiveness_profile_entry {
- u8 dpst_aggressiveness : 4;
+ u8 dpst_aggressiveness : 4; /* (228/252)-256 */
u8 lace_aggressiveness : 4;
} __packed;
@@ -1330,12 +1332,27 @@ struct aggressiveness_profile2_entry {
u8 elp_aggressiveness : 4;
} __packed;
+struct aggressiveness_profile3_entry {
+ u8 apd_aggressiveness:4;
+ u8 pixoptix_aggressiveness:4;
+} __packed;
+
+struct aggressiveness_profile4_entry {
+ u8 xpst_aggressiveness:4;
+ u8 tcon_aggressiveness:4;
+} __packed;
+
+struct panel_identification {
+ u8 panel_technology:4;
+ u8 reserved:4;
+} __packed;
+
struct bdb_lfp_power {
struct lfp_power_features features; /* ???-227 */
struct als_data_entry als[5];
u8 lace_aggressiveness_profile:3; /* 210-227 */
u8 reserved1:5;
- u16 dpst; /* 228+ */
+ u16 dpst; /* 228-256 */
u16 psr; /* 228+ */
u16 drrs; /* 228+ */
u16 lace_support; /* 228+ */
@@ -1343,12 +1360,20 @@ struct bdb_lfp_power {
u16 dmrrs; /* 228+ */
u16 adb; /* 228+ */
u16 lace_enabled_status; /* 228+ */
- struct aggressiveness_profile_entry aggressiveness[16]; /* 228+ */
+ struct aggressiveness_profile_entry aggressiveness[16];
u16 hobl; /* 232+ */
u16 vrr_feature_enabled; /* 233+ */
- u16 elp; /* 247+ */
- u16 opst; /* 247+ */
- struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247+ */
+ u16 elp; /* 247-256 */
+ u16 opst; /* 247-256 */
+ struct aggressiveness_profile2_entry aggressiveness2[16]; /* 247-256 */
+ u16 apd; /* 253-256 */
+ u16 pixoptix; /* 253-256 */
+ struct aggressiveness_profile3_entry aggressiveness3[16]; /* 253-256 */
+ struct panel_identification panel_identification[16]; /* 257+ */
+ u16 xpst_support; /* 257+ */
+ u16 tcon_based_backlight_optimization; /* 257+ */
+ struct aggressiveness_profile4_entry aggressiveness4[16]; /* 257+ */
+ u16 tcon_backlight_xpst_coexistence; /* 257+ */
} __packed;
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index 01b7587dd1f8..a3b83cfe1726 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -825,7 +825,7 @@ static int eb_select_context(struct i915_execbuffer *eb)
struct i915_gem_context *ctx;
ctx = i915_gem_context_lookup(eb->file->driver_priv, eb->args->rsvd1);
- if (unlikely(IS_ERR(ctx)))
+ if (IS_ERR(ctx))
return PTR_ERR(ctx);
eb->gem_context = ctx;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_regs.h b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
index e42b3a5d4e63..57a3c83d3655 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_regs.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_regs.h
@@ -1553,6 +1553,8 @@
#define VLV_RENDER_C0_COUNT _MMIO(0x138118)
#define VLV_MEDIA_C0_COUNT _MMIO(0x13811c)
+#define PCU_PWM_FAN_SPEED _MMIO(0x138140)
+
#define GEN12_RPSTAT1 _MMIO(0x1381b4)
#define GEN12_VOLTAGE_MASK REG_GENMASK(10, 0)
#define GEN12_CAGF_MASK REG_GENMASK(19, 11)
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index bfe6d8fc820f..e539a656cfc3 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -111,9 +111,8 @@ static void wa_init_finish(struct i915_wa_list *wal)
{
/* Trim unused entries. */
if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
- struct i915_wa *list = kmemdup(wal->list,
- wal->count * sizeof(*list),
- GFP_KERNEL);
+ struct i915_wa *list = kmemdup_array(wal->list, wal->count,
+ sizeof(*list), GFP_KERNEL);
if (list) {
kfree(wal->list);
@@ -2072,7 +2071,7 @@ static void dg2_whitelist_build(struct intel_engine_cs *engine)
case RENDER_CLASS:
/* Required by recommended tuning setting (not a workaround) */
whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
-
+ whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
break;
default:
break;
@@ -2087,7 +2086,7 @@ static void xelpg_whitelist_build(struct intel_engine_cs *engine)
case RENDER_CLASS:
/* Required by recommended tuning setting (not a workaround) */
whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
-
+ whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
break;
default:
break;
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
index 021f51d9b456..aab2759067d2 100644
--- a/drivers/gpu/drm/i915/gt/sysfs_engines.c
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -530,9 +530,8 @@ void intel_engines_add_sysfs(struct drm_i915_private *i915)
err_object:
kobject_put(kobj);
err_engine:
- dev_err(kdev, "Failed to add sysfs engine '%s'\n",
- engine->name);
- break;
+ dev_warn(kdev, "Failed to add sysfs engine '%s'\n",
+ engine->name);
}
}
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
index 453d855dd1de..3d3191deb0ab 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_gsc_uc.c
@@ -302,7 +302,7 @@ void intel_gsc_uc_load_start(struct intel_gsc_uc *gsc)
{
struct intel_gt *gt = gsc_uc_to_gt(gsc);
- if (!intel_uc_fw_is_loadable(&gsc->fw))
+ if (!intel_uc_fw_is_loadable(&gsc->fw) || intel_uc_fw_is_in_error(&gsc->fw))
return;
if (intel_gsc_uc_fw_init_done(gsc))
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index c3a5d9e1288e..8322c913bc3c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -4507,7 +4507,7 @@ static void guc_default_vfuncs(struct intel_engine_cs *engine)
/* Wa_16019325821 */
/* Wa_14019159160 */
if ((engine->class == COMPUTE_CLASS || engine->class == RENDER_CLASS) &&
- IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
+ IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 74)))
engine->flags |= I915_ENGINE_USES_WA_HOLD_SWITCHOUT;
/*
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 9a431726c8d5..ac7b3aad2222 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -258,6 +258,11 @@ static inline bool intel_uc_fw_is_running(struct intel_uc_fw *uc_fw)
return __intel_uc_fw_status(uc_fw) == INTEL_UC_FIRMWARE_RUNNING;
}
+static inline bool intel_uc_fw_is_in_error(struct intel_uc_fw *uc_fw)
+{
+ return intel_uc_fw_status_to_error(__intel_uc_fw_status(uc_fw)) != 0;
+}
+
static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
{
return uc_fw->user_overridden;
diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c
index ccdd2983cfb5..fe905d65ddf7 100644
--- a/drivers/gpu/drm/i915/i915_driver.c
+++ b/drivers/gpu/drm/i915/i915_driver.c
@@ -723,7 +723,7 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ERR(i915))
return i915;
- pci_set_drvdata(pdev, i915);
+ pci_set_drvdata(pdev, &i915->drm);
/* Device parameters start as a copy of module parameters. */
i915_params_copy(&i915->params, &i915_modparams);
@@ -1167,7 +1167,7 @@ static int i915_drm_resume(struct drm_device *dev)
intel_dmc_resume(dev_priv);
i915_restore_display(dev_priv);
- intel_pps_unlock_regs_wa(dev_priv);
+ intel_pps_unlock_regs_wa(display);
intel_init_pch_refclk(dev_priv);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index d772cbe15fec..39f6614a0a99 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -365,12 +365,16 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
{
- return dev_get_drvdata(kdev);
+ struct drm_device *drm = dev_get_drvdata(kdev);
+
+ return drm ? to_i915(drm) : NULL;
}
static inline struct drm_i915_private *pdev_to_i915(struct pci_dev *pdev)
{
- return pci_get_drvdata(pdev);
+ struct drm_device *drm = pci_get_drvdata(pdev);
+
+ return drm ? to_i915(drm) : NULL;
}
static inline struct intel_gt *to_gt(const struct drm_i915_private *i915)
diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c
index 49db3e09826c..17d30f6b84b0 100644
--- a/drivers/gpu/drm/i915/i915_hwmon.c
+++ b/drivers/gpu/drm/i915/i915_hwmon.c
@@ -5,6 +5,7 @@
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
+#include <linux/jiffies.h>
#include <linux/types.h>
#include "i915_drv.h"
@@ -36,6 +37,7 @@ struct hwm_reg {
i915_reg_t pkg_rapl_limit;
i915_reg_t energy_status_all;
i915_reg_t energy_status_tile;
+ i915_reg_t fan_speed;
};
struct hwm_energy_info {
@@ -43,11 +45,17 @@ struct hwm_energy_info {
long accum_energy; /* Accumulated energy for energy1_input */
};
+struct hwm_fan_info {
+ u32 reg_val_prev;
+ u64 time_prev;
+};
+
struct hwm_drvdata {
struct i915_hwmon *hwmon;
struct intel_uncore *uncore;
struct device *hwmon_dev;
struct hwm_energy_info ei; /* Energy info for energy1_input */
+ struct hwm_fan_info fi; /* Fan info for fan1_input */
char name[12];
int gt_n;
bool reset_in_progress;
@@ -276,6 +284,7 @@ static const struct hwmon_channel_info * const hwm_info[] = {
HWMON_CHANNEL_INFO(power, HWMON_P_MAX | HWMON_P_RATED_MAX | HWMON_P_CRIT),
HWMON_CHANNEL_INFO(energy, HWMON_E_INPUT),
HWMON_CHANNEL_INFO(curr, HWMON_C_CRIT),
+ HWMON_CHANNEL_INFO(fan, HWMON_F_INPUT),
NULL
};
@@ -614,6 +623,69 @@ hwm_curr_write(struct hwm_drvdata *ddat, u32 attr, long val)
}
static umode_t
+hwm_fan_is_visible(const struct hwm_drvdata *ddat, u32 attr)
+{
+ struct i915_hwmon *hwmon = ddat->hwmon;
+
+ if (attr == hwmon_fan_input && i915_mmio_reg_valid(hwmon->rg.fan_speed))
+ return 0444;
+
+ return 0;
+}
+
+static int
+hwm_fan_input_read(struct hwm_drvdata *ddat, long *val)
+{
+ struct i915_hwmon *hwmon = ddat->hwmon;
+ struct hwm_fan_info *fi = &ddat->fi;
+ u64 rotations, time_now, time;
+ intel_wakeref_t wakeref;
+ u32 reg_val;
+ int ret = 0;
+
+ wakeref = intel_runtime_pm_get(ddat->uncore->rpm);
+ mutex_lock(&hwmon->hwmon_lock);
+
+ reg_val = intel_uncore_read(ddat->uncore, hwmon->rg.fan_speed);
+ time_now = get_jiffies_64();
+
+ /*
+ * HW register value is accumulated count of pulses from
+ * PWM fan with the scale of 2 pulses per rotation.
+ */
+ rotations = (reg_val - fi->reg_val_prev) / 2;
+
+ time = jiffies_delta_to_msecs(time_now - fi->time_prev);
+ if (unlikely(!time)) {
+ ret = -EAGAIN;
+ goto exit;
+ }
+
+ /*
+ * Calculate fan speed in RPM by time averaging two subsequent
+ * readings in minutes.
+ * RPM = number of rotations * msecs per minute / time in msecs
+ */
+ *val = DIV_ROUND_UP_ULL(rotations * (MSEC_PER_SEC * 60), time);
+
+ fi->reg_val_prev = reg_val;
+ fi->time_prev = time_now;
+exit:
+ mutex_unlock(&hwmon->hwmon_lock);
+ intel_runtime_pm_put(ddat->uncore->rpm, wakeref);
+ return ret;
+}
+
+static int
+hwm_fan_read(struct hwm_drvdata *ddat, u32 attr, long *val)
+{
+ if (attr == hwmon_fan_input)
+ return hwm_fan_input_read(ddat, val);
+
+ return -EOPNOTSUPP;
+}
+
+static umode_t
hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type,
u32 attr, int channel)
{
@@ -628,6 +700,8 @@ hwm_is_visible(const void *drvdata, enum hwmon_sensor_types type,
return hwm_energy_is_visible(ddat, attr);
case hwmon_curr:
return hwm_curr_is_visible(ddat, attr);
+ case hwmon_fan:
+ return hwm_fan_is_visible(ddat, attr);
default:
return 0;
}
@@ -648,6 +722,8 @@ hwm_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
return hwm_energy_read(ddat, attr, val);
case hwmon_curr:
return hwm_curr_read(ddat, attr, val);
+ case hwmon_fan:
+ return hwm_fan_read(ddat, attr, val);
default:
return -EOPNOTSUPP;
}
@@ -739,12 +815,14 @@ hwm_get_preregistration_info(struct drm_i915_private *i915)
hwmon->rg.pkg_rapl_limit = PCU_PACKAGE_RAPL_LIMIT;
hwmon->rg.energy_status_all = PCU_PACKAGE_ENERGY_STATUS;
hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
+ hwmon->rg.fan_speed = PCU_PWM_FAN_SPEED;
} else {
hwmon->rg.pkg_power_sku_unit = INVALID_MMIO_REG;
hwmon->rg.pkg_power_sku = INVALID_MMIO_REG;
hwmon->rg.pkg_rapl_limit = INVALID_MMIO_REG;
hwmon->rg.energy_status_all = INVALID_MMIO_REG;
hwmon->rg.energy_status_tile = INVALID_MMIO_REG;
+ hwmon->rg.fan_speed = INVALID_MMIO_REG;
}
with_intel_runtime_pm(uncore->rpm, wakeref) {
@@ -755,6 +833,16 @@ hwm_get_preregistration_info(struct drm_i915_private *i915)
if (i915_mmio_reg_valid(hwmon->rg.pkg_power_sku_unit))
val_sku_unit = intel_uncore_read(uncore,
hwmon->rg.pkg_power_sku_unit);
+
+ /*
+ * Store the initial fan register value, so that we can use it for
+ * initial fan speed calculation.
+ */
+ if (i915_mmio_reg_valid(hwmon->rg.fan_speed)) {
+ ddat->fi.reg_val_prev = intel_uncore_read(uncore,
+ hwmon->rg.fan_speed);
+ ddat->fi.time_prev = get_jiffies_64();
+ }
}
hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
diff --git a/drivers/gpu/drm/i915/i915_module.c b/drivers/gpu/drm/i915/i915_module.c
index 65acd7bf75d0..7ed6d70389af 100644
--- a/drivers/gpu/drm/i915/i915_module.c
+++ b/drivers/gpu/drm/i915/i915_module.c
@@ -30,15 +30,20 @@ static int i915_check_nomodeset(void)
*/
if (i915_modparams.modeset == 0)
+ pr_warn("i915.modeset=0 is deprecated. Please use the 'nomodeset' kernel parameter instead.\n");
+ else if (i915_modparams.modeset != -1)
+ pr_warn("i915.modeset=%d is deprecated. Please remove it and the 'nomodeset' kernel parameter instead.\n",
+ i915_modparams.modeset);
+
+ if (i915_modparams.modeset == 0)
use_kms = false;
if (drm_firmware_drivers_only() && i915_modparams.modeset == -1)
use_kms = false;
if (!use_kms) {
- /* Silently fail loading to not upset userspace. */
DRM_DEBUG_DRIVER("KMS disabled.\n");
- return 1;
+ return -ENODEV;
}
return 0;
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 316e55f3e87b..37746dd619fd 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -64,8 +64,7 @@ struct i915_params i915_modparams __read_mostly = {
*/
i915_param_named(modeset, int, 0400,
- "Use kernel modesetting [KMS] (0=disable, "
- "1=on, -1=force vga console preference [default])");
+ "Deprecated. Use the 'nomodeset' kernel parameter instead.");
i915_param_named_unsafe(reset, uint, 0400,
"Attempt GPU resets (0=disabled, 1=full gpu reset, 2=engine reset [default])");
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 8a9aad523eec..1d4cc91c0e40 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -51,7 +51,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
debug_object_init(fence, &i915_sw_fence_debug_descr);
}
-static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
+static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
{
debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
}
@@ -77,7 +77,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
debug_object_destroy(fence, &i915_sw_fence_debug_descr);
}
-static inline void debug_fence_free(struct i915_sw_fence *fence)
+static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
{
debug_object_free(fence, &i915_sw_fence_debug_descr);
smp_wmb(); /* flush the change in state before reallocation */
@@ -94,7 +94,7 @@ static inline void debug_fence_init(struct i915_sw_fence *fence)
{
}
-static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
+static inline __maybe_unused void debug_fence_init_onstack(struct i915_sw_fence *fence)
{
}
@@ -115,7 +115,7 @@ static inline void debug_fence_destroy(struct i915_sw_fence *fence)
{
}
-static inline void debug_fence_free(struct i915_sw_fence *fence)
+static inline __maybe_unused void debug_fence_free(struct i915_sw_fence *fence)
{
}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 0bd29846873b..91794ca17a58 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -172,7 +172,7 @@ struct drm_i915_private *mock_gem_device(void)
return NULL;
}
- pci_set_drvdata(pdev, i915);
+ pci_set_drvdata(pdev, &i915->drm);
/* Device parameters start as a copy of module parameters. */
i915_params_copy(&i915->params, &i915_modparams);
diff --git a/drivers/gpu/drm/imagination/pvr_vm.c b/drivers/gpu/drm/imagination/pvr_vm.c
index ffd466509d0b..39773991710a 100644
--- a/drivers/gpu/drm/imagination/pvr_vm.c
+++ b/drivers/gpu/drm/imagination/pvr_vm.c
@@ -114,6 +114,8 @@ struct pvr_vm_gpuva {
struct drm_gpuva base;
};
+#define to_pvr_vm_gpuva(va) container_of_const(va, struct pvr_vm_gpuva, base)
+
enum pvr_vm_bind_type {
PVR_VM_BIND_TYPE_MAP,
PVR_VM_BIND_TYPE_UNMAP,
@@ -386,6 +388,7 @@ pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx)
drm_gpuva_unmap(&op->unmap);
drm_gpuva_unlink(op->unmap.va);
+ kfree(to_pvr_vm_gpuva(op->unmap.va));
return 0;
}
@@ -433,6 +436,7 @@ pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx)
}
drm_gpuva_unlink(op->remap.unmap->va);
+ kfree(to_pvr_vm_gpuva(op->remap.unmap->va));
return 0;
}
diff --git a/drivers/gpu/drm/imx/dcss/Kconfig b/drivers/gpu/drm/imx/dcss/Kconfig
index 3ffc061d392b..59e3b6a1dff0 100644
--- a/drivers/gpu/drm/imx/dcss/Kconfig
+++ b/drivers/gpu/drm/imx/dcss/Kconfig
@@ -2,6 +2,8 @@ config DRM_IMX_DCSS
tristate "i.MX8MQ DCSS"
select IMX_IRQSTEER
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
depends on DRM && ARCH_MXC && ARM64
diff --git a/drivers/gpu/drm/imx/lcdc/Kconfig b/drivers/gpu/drm/imx/lcdc/Kconfig
index 7e57922bbd9d..9c28bb0f4662 100644
--- a/drivers/gpu/drm/imx/lcdc/Kconfig
+++ b/drivers/gpu/drm/imx/lcdc/Kconfig
@@ -3,5 +3,7 @@ config DRM_IMX_LCDC
depends on DRM && (ARCH_MXC || COMPILE_TEST)
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
help
Found on i.MX1, i.MX21, i.MX25 and i.MX27.
diff --git a/drivers/gpu/drm/ingenic/Kconfig b/drivers/gpu/drm/ingenic/Kconfig
index 3db117c5edd9..8cd7b750dffe 100644
--- a/drivers/gpu/drm/ingenic/Kconfig
+++ b/drivers/gpu/drm/ingenic/Kconfig
@@ -8,6 +8,8 @@ config DRM_INGENIC
select DRM_BRIDGE
select DRM_PANEL_BRIDGE
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
select REGMAP
select REGMAP_MMIO
diff --git a/drivers/gpu/drm/kmb/Kconfig b/drivers/gpu/drm/kmb/Kconfig
index fd011367db1d..e5ae3ec52392 100644
--- a/drivers/gpu/drm/kmb/Kconfig
+++ b/drivers/gpu/drm/kmb/Kconfig
@@ -3,6 +3,8 @@ config DRM_KMB_DISPLAY
depends on DRM
depends on ARCH_KEEMBAY || COMPILE_TEST
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
select DRM_MIPI_DSI
help
diff --git a/drivers/gpu/drm/mediatek/Kconfig b/drivers/gpu/drm/mediatek/Kconfig
index d6449ebae838..417ac8c9af41 100644
--- a/drivers/gpu/drm/mediatek/Kconfig
+++ b/drivers/gpu/drm/mediatek/Kconfig
@@ -9,6 +9,8 @@ config DRM_MEDIATEK
depends on MTK_MMSYS
select DRM_GEM_DMA_HELPER if DRM_FBDEV_EMULATION
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_MIPI_DSI
select DRM_PANEL
select MEMORY
diff --git a/drivers/gpu/drm/mediatek/mtk_crtc.c b/drivers/gpu/drm/mediatek/mtk_crtc.c
index 6f34f573e127..175b00e5a253 100644
--- a/drivers/gpu/drm/mediatek/mtk_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_crtc.c
@@ -69,6 +69,8 @@ struct mtk_crtc {
/* lock for display hardware access */
struct mutex hw_lock;
bool config_updating;
+ /* lock for config_updating to cmd buffer */
+ spinlock_t config_lock;
};
struct mtk_crtc_state {
@@ -106,51 +108,18 @@ static void mtk_crtc_finish_page_flip(struct mtk_crtc *mtk_crtc)
static void mtk_drm_finish_page_flip(struct mtk_crtc *mtk_crtc)
{
+ unsigned long flags;
+
drm_crtc_handle_vblank(&mtk_crtc->base);
+
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
if (!mtk_crtc->config_updating && mtk_crtc->pending_needs_vblank) {
mtk_crtc_finish_page_flip(mtk_crtc);
mtk_crtc->pending_needs_vblank = false;
}
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
}
-#if IS_REACHABLE(CONFIG_MTK_CMDQ)
-static int mtk_drm_cmdq_pkt_create(struct cmdq_client *client, struct cmdq_pkt *pkt,
- size_t size)
-{
- struct device *dev;
- dma_addr_t dma_addr;
-
- pkt->va_base = kzalloc(size, GFP_KERNEL);
- if (!pkt->va_base)
- return -ENOMEM;
-
- pkt->buf_size = size;
- pkt->cl = (void *)client;
-
- dev = client->chan->mbox->dev;
- dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, dma_addr)) {
- dev_err(dev, "dma map failed, size=%u\n", (u32)(u64)size);
- kfree(pkt->va_base);
- return -ENOMEM;
- }
-
- pkt->pa_base = dma_addr;
-
- return 0;
-}
-
-static void mtk_drm_cmdq_pkt_destroy(struct cmdq_pkt *pkt)
-{
- struct cmdq_client *client = (struct cmdq_client *)pkt->cl;
-
- dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size,
- DMA_TO_DEVICE);
- kfree(pkt->va_base);
-}
-#endif
-
static void mtk_crtc_destroy(struct drm_crtc *crtc)
{
struct mtk_crtc *mtk_crtc = to_mtk_crtc(crtc);
@@ -158,7 +127,7 @@ static void mtk_crtc_destroy(struct drm_crtc *crtc)
mtk_mutex_put(mtk_crtc->mutex);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
- mtk_drm_cmdq_pkt_destroy(&mtk_crtc->cmdq_handle);
+ cmdq_pkt_destroy(&mtk_crtc->cmdq_client, &mtk_crtc->cmdq_handle);
if (mtk_crtc->cmdq_client.chan) {
mbox_free_channel(mtk_crtc->cmdq_client.chan);
@@ -308,12 +277,19 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
struct mtk_crtc *mtk_crtc = container_of(cmdq_cl, struct mtk_crtc, cmdq_client);
struct mtk_crtc_state *state;
unsigned int i;
+ unsigned long flags;
if (data->sta < 0)
return;
state = to_mtk_crtc_state(mtk_crtc->base.state);
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
+ if (mtk_crtc->config_updating) {
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+ goto ddp_cmdq_cb_out;
+ }
+
state->pending_config = false;
if (mtk_crtc->pending_planes) {
@@ -340,6 +316,10 @@ static void ddp_cmdq_cb(struct mbox_client *cl, void *mssg)
mtk_crtc->pending_async_planes = false;
}
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+
+ddp_cmdq_cb_out:
+
mtk_crtc->cmdq_vblank_cnt = 0;
wake_up(&mtk_crtc->cb_blocking_queue);
}
@@ -449,6 +429,7 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc)
{
struct drm_device *drm = mtk_crtc->base.dev;
struct drm_crtc *crtc = &mtk_crtc->base;
+ unsigned long flags;
int i;
for (i = 0; i < mtk_crtc->ddp_comp_nr; i++) {
@@ -480,10 +461,10 @@ static void mtk_crtc_ddp_hw_fini(struct mtk_crtc *mtk_crtc)
pm_runtime_put(drm->dev);
if (crtc->state->event && !crtc->state->active) {
- spin_lock_irq(&crtc->dev->event_lock);
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
drm_crtc_send_vblank_event(crtc, crtc->state->event);
crtc->state->event = NULL;
- spin_unlock_irq(&crtc->dev->event_lock);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
}
@@ -569,9 +550,14 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
struct mtk_drm_private *priv = crtc->dev->dev_private;
unsigned int pending_planes = 0, pending_async_planes = 0;
int i;
+ unsigned long flags;
mutex_lock(&mtk_crtc->hw_lock);
+
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
mtk_crtc->config_updating = true;
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+
if (needs_vblank)
mtk_crtc->pending_needs_vblank = true;
@@ -607,7 +593,7 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event, false);
mtk_crtc_ddp_config(crtc, cmdq_handle);
- cmdq_pkt_finalize(cmdq_handle);
+ cmdq_pkt_eoc(cmdq_handle);
dma_sync_single_for_device(mtk_crtc->cmdq_client.chan->mbox->dev,
cmdq_handle->pa_base,
cmdq_handle->cmd_buf_size,
@@ -625,7 +611,10 @@ static void mtk_crtc_update_config(struct mtk_crtc *mtk_crtc, bool needs_vblank)
mbox_client_txdone(mtk_crtc->cmdq_client.chan, 0);
}
#endif
+ spin_lock_irqsave(&mtk_crtc->config_lock, flags);
mtk_crtc->config_updating = false;
+ spin_unlock_irqrestore(&mtk_crtc->config_lock, flags);
+
mutex_unlock(&mtk_crtc->hw_lock);
}
@@ -925,7 +914,7 @@ static int mtk_crtc_init_comp_planes(struct drm_device *drm_dev,
mtk_crtc_plane_type(mtk_crtc->layer_nr, num_planes),
mtk_ddp_comp_supported_rotations(comp),
mtk_ddp_comp_get_formats(comp),
- mtk_ddp_comp_get_num_formats(comp));
+ mtk_ddp_comp_get_num_formats(comp), i);
if (ret)
return ret;
@@ -1068,6 +1057,7 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, gamma_lut_size);
drm_crtc_enable_color_mgmt(&mtk_crtc->base, 0, has_ctm, gamma_lut_size);
mutex_init(&mtk_crtc->hw_lock);
+ spin_lock_init(&mtk_crtc->config_lock);
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
i = priv->mbox_index++;
@@ -1094,9 +1084,9 @@ int mtk_crtc_create(struct drm_device *drm_dev, const unsigned int *path,
mbox_free_channel(mtk_crtc->cmdq_client.chan);
mtk_crtc->cmdq_client.chan = NULL;
} else {
- ret = mtk_drm_cmdq_pkt_create(&mtk_crtc->cmdq_client,
- &mtk_crtc->cmdq_handle,
- PAGE_SIZE);
+ ret = cmdq_pkt_create(&mtk_crtc->cmdq_client,
+ &mtk_crtc->cmdq_handle,
+ PAGE_SIZE);
if (ret) {
dev_dbg(dev, "mtk_crtc %d failed to create cmdq packet\n",
drm_crtc_index(&mtk_crtc->base));
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 9d6d9fd8342e..89b439dcf3a6 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -56,8 +56,12 @@
#define GMC_THRESHOLD_HIGH ((1 << GMC_THRESHOLD_BITS) / 4)
#define GMC_THRESHOLD_LOW ((1 << GMC_THRESHOLD_BITS) / 8)
+#define OVL_CON_CLRFMT_MAN BIT(23)
#define OVL_CON_BYTE_SWAP BIT(24)
-#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
+
+/* OVL_CON_RGB_SWAP works only if OVL_CON_CLRFMT_MAN is enabled */
+#define OVL_CON_RGB_SWAP BIT(25)
+
#define OVL_CON_CLRFMT_RGB (1 << 12)
#define OVL_CON_CLRFMT_ARGB8888 (2 << 12)
#define OVL_CON_CLRFMT_RGBA8888 (3 << 12)
@@ -65,6 +69,11 @@
#define OVL_CON_CLRFMT_BGRA8888 (OVL_CON_CLRFMT_ARGB8888 | OVL_CON_BYTE_SWAP)
#define OVL_CON_CLRFMT_UYVY (4 << 12)
#define OVL_CON_CLRFMT_YUYV (5 << 12)
+#define OVL_CON_MTX_YUV_TO_RGB (6 << 16)
+#define OVL_CON_CLRFMT_PARGB8888 ((3 << 12) | OVL_CON_CLRFMT_MAN)
+#define OVL_CON_CLRFMT_PABGR8888 (OVL_CON_CLRFMT_PARGB8888 | OVL_CON_RGB_SWAP)
+#define OVL_CON_CLRFMT_PBGRA8888 (OVL_CON_CLRFMT_PARGB8888 | OVL_CON_BYTE_SWAP)
+#define OVL_CON_CLRFMT_PRGBA8888 (OVL_CON_CLRFMT_PABGR8888 | OVL_CON_BYTE_SWAP)
#define OVL_CON_CLRFMT_RGB565(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
0 : OVL_CON_CLRFMT_RGB)
#define OVL_CON_CLRFMT_RGB888(ovl) ((ovl)->data->fmt_rgb565_is_0 ? \
@@ -377,7 +386,8 @@ void mtk_ovl_layer_off(struct device *dev, unsigned int idx,
DISP_REG_OVL_RDMA_CTRL(idx));
}
-static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
+static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt,
+ unsigned int blend_mode)
{
/* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
* is defined in mediatek HW data sheet.
@@ -398,22 +408,30 @@ static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
case DRM_FORMAT_RGBA8888:
case DRM_FORMAT_RGBX1010102:
case DRM_FORMAT_RGBA1010102:
- return OVL_CON_CLRFMT_RGBA8888;
+ return blend_mode == DRM_MODE_BLEND_COVERAGE ?
+ OVL_CON_CLRFMT_RGBA8888 :
+ OVL_CON_CLRFMT_PRGBA8888;
case DRM_FORMAT_BGRX8888:
case DRM_FORMAT_BGRA8888:
case DRM_FORMAT_BGRX1010102:
case DRM_FORMAT_BGRA1010102:
- return OVL_CON_CLRFMT_BGRA8888;
+ return blend_mode == DRM_MODE_BLEND_COVERAGE ?
+ OVL_CON_CLRFMT_BGRA8888 :
+ OVL_CON_CLRFMT_PBGRA8888;
case DRM_FORMAT_XRGB8888:
case DRM_FORMAT_ARGB8888:
case DRM_FORMAT_XRGB2101010:
case DRM_FORMAT_ARGB2101010:
- return OVL_CON_CLRFMT_ARGB8888;
+ return blend_mode == DRM_MODE_BLEND_COVERAGE ?
+ OVL_CON_CLRFMT_ARGB8888 :
+ OVL_CON_CLRFMT_PARGB8888;
case DRM_FORMAT_XBGR8888:
case DRM_FORMAT_ABGR8888:
case DRM_FORMAT_XBGR2101010:
case DRM_FORMAT_ABGR2101010:
- return OVL_CON_CLRFMT_ABGR8888;
+ return blend_mode == DRM_MODE_BLEND_COVERAGE ?
+ OVL_CON_CLRFMT_ABGR8888 :
+ OVL_CON_CLRFMT_PABGR8888;
case DRM_FORMAT_UYVY:
return OVL_CON_CLRFMT_UYVY | OVL_CON_MTX_YUV_TO_RGB;
case DRM_FORMAT_YUYV:
@@ -434,6 +452,7 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
unsigned int fmt = pending->format;
unsigned int offset = (pending->y << 16) | pending->x;
unsigned int src_size = (pending->height << 16) | pending->width;
+ unsigned int blend_mode = state->base.pixel_blend_mode;
unsigned int ignore_pixel_alpha = 0;
unsigned int con;
bool is_afbc = pending->modifier != DRM_FORMAT_MOD_LINEAR;
@@ -452,7 +471,7 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
return;
}
- con = ovl_fmt_convert(ovl, fmt);
+ con = ovl_fmt_convert(ovl, fmt, blend_mode);
if (state->base.fb) {
con |= OVL_CON_AEN;
con |= state->base.alpha & OVL_CON_ALPHA;
@@ -463,7 +482,8 @@ void mtk_ovl_layer_config(struct device *dev, unsigned int idx,
* For RGB888 related formats, whether CONST_BLD is enabled or not won't
* affect the result. Therefore we use !has_alpha as the condition.
*/
- if (state->base.fb && !state->base.fb->format->has_alpha)
+ if ((state->base.fb && !state->base.fb->format->has_alpha) ||
+ blend_mode == DRM_MODE_BLEND_PIXEL_NONE)
ignore_pixel_alpha = OVL_CONST_BLEND;
if (pending->rotation & DRM_MODE_REFLECT_Y) {
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
index 1a2a73757370..c6768210b08b 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl_adaptor.c
@@ -17,7 +17,6 @@
#include <linux/soc/mediatek/mtk-mmsys.h>
#include <linux/soc/mediatek/mtk-mutex.h>
-#include "mtk_crtc.h"
#include "mtk_ddp_comp.h"
#include "mtk_disp_drv.h"
#include "mtk_drm_drv.h"
@@ -494,12 +493,12 @@ static int compare_of(struct device *dev, void *data)
static int ovl_adaptor_comp_init(struct device *dev, struct component_match **match)
{
struct mtk_disp_ovl_adaptor *priv = dev_get_drvdata(dev);
- struct device_node *node, *parent;
+ struct device_node *parent;
struct platform_device *comp_pdev;
parent = dev->parent->parent->of_node->parent;
- for_each_child_of_node(parent, node) {
+ for_each_child_of_node_scoped(parent, node) {
const struct of_device_id *of_id;
enum mtk_ovl_adaptor_comp_type type;
int id;
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 634bbba5d43f..07243f372260 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -341,14 +341,11 @@ static int mtk_disp_rdma_probe(struct platform_device *pdev)
dev_dbg(dev, "get mediatek,gce-client-reg fail!\n");
#endif
- if (of_find_property(dev->of_node, "mediatek,rdma-fifo-size", &ret)) {
- ret = of_property_read_u32(dev->of_node,
- "mediatek,rdma-fifo-size",
- &priv->fifo_size);
- if (ret)
- return dev_err_probe(dev, ret,
- "Failed to get rdma fifo size\n");
- }
+ ret = of_property_read_u32(dev->of_node,
+ "mediatek,rdma-fifo-size",
+ &priv->fifo_size);
+ if (ret && (ret != -EINVAL))
+ return dev_err_probe(dev, ret, "Failed to get rdma fifo size\n");
/* Disable and clear pending interrupts */
writel(0x0, priv->regs + DISP_REG_RDMA_INT_ENABLE);
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index b6e3c011a12d..eeec641cab60 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -88,12 +88,15 @@
#define DSI_HSA_WC 0x50
#define DSI_HBP_WC 0x54
#define DSI_HFP_WC 0x58
+#define HFP_HS_VB_PS_WC GENMASK(30, 16)
+#define HFP_HS_EN BIT(31)
#define DSI_CMDQ_SIZE 0x60
#define CMDQ_SIZE 0x3f
#define CMDQ_SIZE_SEL BIT(15)
#define DSI_HSTX_CKL_WC 0x64
+#define HSTX_CKL_WC GENMASK(15, 2)
#define DSI_RX_DATA0 0x74
#define DSI_RX_DATA1 0x78
@@ -187,6 +190,7 @@ struct mtk_dsi_driver_data {
bool has_shadow_ctl;
bool has_size_ctl;
bool cmdq_long_packet_ctl;
+ bool support_per_frame_lp;
};
struct mtk_dsi {
@@ -426,7 +430,75 @@ static void mtk_dsi_ps_control(struct mtk_dsi *dsi, bool config_vact)
writel(ps_val, dsi->regs + DSI_PSCTRL);
}
-static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
+static void mtk_dsi_config_vdo_timing_per_frame_lp(struct mtk_dsi *dsi)
+{
+ u32 horizontal_sync_active_byte;
+ u32 horizontal_backporch_byte;
+ u32 horizontal_frontporch_byte;
+ u32 hfp_byte_adjust, v_active_adjust;
+ u32 cklp_wc_min_adjust, cklp_wc_max_adjust;
+ u32 dsi_tmp_buf_bpp;
+ unsigned int da_hs_trail;
+ unsigned int ps_wc, hs_vb_ps_wc;
+ u32 v_active_roundup, hstx_cklp_wc;
+ u32 hstx_cklp_wc_max, hstx_cklp_wc_min;
+ struct videomode *vm = &dsi->vm;
+
+ if (dsi->format == MIPI_DSI_FMT_RGB565)
+ dsi_tmp_buf_bpp = 2;
+ else
+ dsi_tmp_buf_bpp = 3;
+
+ da_hs_trail = dsi->phy_timing.da_hs_trail;
+ ps_wc = vm->hactive * dsi_tmp_buf_bpp;
+
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) {
+ horizontal_sync_active_byte =
+ vm->hsync_len * dsi_tmp_buf_bpp - 10;
+ horizontal_backporch_byte =
+ vm->hback_porch * dsi_tmp_buf_bpp - 10;
+ hfp_byte_adjust = 12;
+ v_active_adjust = 32 + horizontal_sync_active_byte;
+ cklp_wc_min_adjust = 12 + 2 + 4 + horizontal_sync_active_byte;
+ cklp_wc_max_adjust = 20 + 6 + 4 + horizontal_sync_active_byte;
+ } else {
+ horizontal_sync_active_byte = vm->hsync_len * dsi_tmp_buf_bpp - 4;
+ horizontal_backporch_byte = (vm->hback_porch + vm->hsync_len) *
+ dsi_tmp_buf_bpp - 10;
+ cklp_wc_min_adjust = 4;
+ cklp_wc_max_adjust = 12 + 4 + 4;
+ if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) {
+ hfp_byte_adjust = 18;
+ v_active_adjust = 28;
+ } else {
+ hfp_byte_adjust = 12;
+ v_active_adjust = 22;
+ }
+ }
+ horizontal_frontporch_byte = vm->hfront_porch * dsi_tmp_buf_bpp - hfp_byte_adjust;
+ v_active_roundup = (v_active_adjust + horizontal_backporch_byte + ps_wc +
+ horizontal_frontporch_byte) % dsi->lanes;
+ if (v_active_roundup)
+ horizontal_backporch_byte += dsi->lanes - v_active_roundup;
+ hstx_cklp_wc_min = (DIV_ROUND_UP(cklp_wc_min_adjust, dsi->lanes) + da_hs_trail + 1)
+ * dsi->lanes / 6 - 1;
+ hstx_cklp_wc_max = (DIV_ROUND_UP((cklp_wc_max_adjust + horizontal_backporch_byte +
+ ps_wc), dsi->lanes) + da_hs_trail + 1) * dsi->lanes / 6 - 1;
+
+ hstx_cklp_wc = FIELD_PREP(HSTX_CKL_WC, (hstx_cklp_wc_min + hstx_cklp_wc_max) / 2);
+ writel(hstx_cklp_wc, dsi->regs + DSI_HSTX_CKL_WC);
+
+ hs_vb_ps_wc = ps_wc - (dsi->phy_timing.lpx + dsi->phy_timing.da_hs_exit +
+ dsi->phy_timing.da_hs_prepare + dsi->phy_timing.da_hs_zero + 2) * dsi->lanes;
+ horizontal_frontporch_byte |= FIELD_PREP(HFP_HS_EN, 1) |
+ FIELD_PREP(HFP_HS_VB_PS_WC, hs_vb_ps_wc);
+
+ writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
+ writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
+ writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
+}
+
+static void mtk_dsi_config_vdo_timing_per_line_lp(struct mtk_dsi *dsi)
{
u32 horizontal_sync_active_byte;
u32 horizontal_backporch_byte;
@@ -436,7 +508,6 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
u32 dsi_tmp_buf_bpp, data_phy_cycles;
u32 delta;
struct mtk_phy_timing *timing = &dsi->phy_timing;
-
struct videomode *vm = &dsi->vm;
if (dsi->format == MIPI_DSI_FMT_RGB565)
@@ -444,16 +515,6 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
else
dsi_tmp_buf_bpp = 3;
- writel(vm->vsync_len, dsi->regs + DSI_VSA_NL);
- writel(vm->vback_porch, dsi->regs + DSI_VBP_NL);
- writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
- writel(vm->vactive, dsi->regs + DSI_VACT_NL);
-
- if (dsi->driver_data->has_size_ctl)
- writel(FIELD_PREP(DSI_HEIGHT, vm->vactive) |
- FIELD_PREP(DSI_WIDTH, vm->hactive),
- dsi->regs + DSI_SIZE_CON);
-
horizontal_sync_active_byte = (vm->hsync_len * dsi_tmp_buf_bpp - 10);
if (dsi->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE)
@@ -499,6 +560,26 @@ static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
writel(horizontal_sync_active_byte, dsi->regs + DSI_HSA_WC);
writel(horizontal_backporch_byte, dsi->regs + DSI_HBP_WC);
writel(horizontal_frontporch_byte, dsi->regs + DSI_HFP_WC);
+}
+
+static void mtk_dsi_config_vdo_timing(struct mtk_dsi *dsi)
+{
+ struct videomode *vm = &dsi->vm;
+
+ writel(vm->vsync_len, dsi->regs + DSI_VSA_NL);
+ writel(vm->vback_porch, dsi->regs + DSI_VBP_NL);
+ writel(vm->vfront_porch, dsi->regs + DSI_VFP_NL);
+ writel(vm->vactive, dsi->regs + DSI_VACT_NL);
+
+ if (dsi->driver_data->has_size_ctl)
+ writel(FIELD_PREP(DSI_HEIGHT, vm->vactive) |
+ FIELD_PREP(DSI_WIDTH, vm->hactive),
+ dsi->regs + DSI_SIZE_CON);
+
+ if (dsi->driver_data->support_per_frame_lp)
+ mtk_dsi_config_vdo_timing_per_frame_lp(dsi);
+ else
+ mtk_dsi_config_vdo_timing_per_line_lp(dsi);
mtk_dsi_ps_control(dsi, false);
}
@@ -1197,6 +1278,7 @@ static const struct mtk_dsi_driver_data mt8188_dsi_driver_data = {
.has_shadow_ctl = true,
.has_size_ctl = true,
.cmdq_long_packet_ctl = true,
+ .support_per_frame_lp = true,
};
static const struct of_device_id mtk_dsi_of_match[] = {
diff --git a/drivers/gpu/drm/mediatek/mtk_ethdr.c b/drivers/gpu/drm/mediatek/mtk_ethdr.c
index 9dfd13d32dfa..d1d9cf8b10e1 100644
--- a/drivers/gpu/drm/mediatek/mtk_ethdr.c
+++ b/drivers/gpu/drm/mediatek/mtk_ethdr.c
@@ -3,6 +3,7 @@
* Copyright (c) 2021 MediaTek Inc.
*/
+#include <drm/drm_blend.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
#include <linux/clk.h>
@@ -35,6 +36,7 @@
#define MIX_SRC_L0_EN BIT(0)
#define MIX_L_SRC_CON(n) (0x28 + 0x18 * (n))
#define NON_PREMULTI_SOURCE (2 << 12)
+#define PREMULTI_SOURCE (3 << 12)
#define MIX_L_SRC_SIZE(n) (0x30 + 0x18 * (n))
#define MIX_L_SRC_OFFSET(n) (0x34 + 0x18 * (n))
#define MIX_FUNC_DCM0 0x120
@@ -175,7 +177,13 @@ void mtk_ethdr_layer_config(struct device *dev, unsigned int idx,
alpha_con |= state->base.alpha & MIXER_ALPHA;
}
- if (state->base.fb && !state->base.fb->format->has_alpha) {
+ if (state->base.pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
+ alpha_con |= PREMULTI_SOURCE;
+ else
+ alpha_con |= NON_PREMULTI_SOURCE;
+
+ if ((state->base.fb && !state->base.fb->format->has_alpha) ||
+ state->base.pixel_blend_mode == DRM_MODE_BLEND_PIXEL_NONE) {
/*
* Mixer doesn't support CONST_BLD mode,
* use a trick to make the output equivalent
@@ -191,8 +199,7 @@ void mtk_ethdr_layer_config(struct device *dev, unsigned int idx,
mtk_ddp_write(cmdq_pkt, pending->height << 16 | align_width, &mixer->cmdq_base,
mixer->regs, MIX_L_SRC_SIZE(idx));
mtk_ddp_write(cmdq_pkt, offset, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_OFFSET(idx));
- mtk_ddp_write_mask(cmdq_pkt, alpha_con, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_CON(idx),
- 0x1ff);
+ mtk_ddp_write(cmdq_pkt, alpha_con, &mixer->cmdq_base, mixer->regs, MIX_L_SRC_CON(idx));
mtk_ddp_write_mask(cmdq_pkt, BIT(idx), &mixer->cmdq_base, mixer->regs, MIX_SRC_CON,
BIT(idx));
}
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.c b/drivers/gpu/drm/mediatek/mtk_plane.c
index 1723d4333f37..7d2cb4e0fafa 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_plane.c
@@ -321,7 +321,7 @@ static const struct drm_plane_helper_funcs mtk_plane_helper_funcs = {
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
unsigned int supported_rotations, const u32 *formats,
- size_t num_formats)
+ size_t num_formats, unsigned int plane_idx)
{
int err;
@@ -338,6 +338,22 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
return err;
}
+ /*
+ * The hardware does not support repositioning planes by muxing: their
+ * Z-position is infact fixed and the only way to change the actual
+ * order is to swap the contents of the entire register set of one
+ * overlay with another, which may be more expensive than desired.
+ *
+ * With no repositioning, the caller of this function guarantees that
+ * the plane_idx is correct. This means that, for example, the PRIMARY
+ * plane fed to this function will always have plane_idx zero.
+ */
+ err = drm_plane_create_zpos_immutable_property(plane, plane_idx);
+ if (err) {
+ DRM_ERROR("Failed to create zpos property for plane %u\n", plane_idx);
+ return err;
+ }
+
if (supported_rotations) {
err = drm_plane_create_rotation_property(plane,
DRM_MODE_ROTATE_0,
@@ -346,6 +362,17 @@ int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
DRM_INFO("Create rotation property failed\n");
}
+ err = drm_plane_create_alpha_property(plane);
+ if (err)
+ DRM_ERROR("failed to create property: alpha\n");
+
+ err = drm_plane_create_blend_mode_property(plane,
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE) |
+ BIT(DRM_MODE_BLEND_PIXEL_NONE));
+ if (err)
+ DRM_ERROR("failed to create property: blend_mode\n");
+
drm_plane_helper_add(plane, &mtk_plane_helper_funcs);
return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_plane.h b/drivers/gpu/drm/mediatek/mtk_plane.h
index 231bb7aac947..5b177eac67b7 100644
--- a/drivers/gpu/drm/mediatek/mtk_plane.h
+++ b/drivers/gpu/drm/mediatek/mtk_plane.h
@@ -49,6 +49,5 @@ to_mtk_plane_state(struct drm_plane_state *state)
int mtk_plane_init(struct drm_device *dev, struct drm_plane *plane,
unsigned long possible_crtcs, enum drm_plane_type type,
unsigned int supported_rotations, const u32 *formats,
- size_t num_formats);
-
+ size_t num_formats, unsigned int plane_idx);
#endif
diff --git a/drivers/gpu/drm/meson/Kconfig b/drivers/gpu/drm/meson/Kconfig
index 615fdd0ce41b..2544756538cc 100644
--- a/drivers/gpu/drm/meson/Kconfig
+++ b/drivers/gpu/drm/meson/Kconfig
@@ -4,6 +4,8 @@ config DRM_MESON
depends on DRM && OF && (ARM || ARM64)
depends on ARCH_MESON || COMPILE_TEST
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
select DRM_DISPLAY_CONNECTOR
select VIDEOMODE_HELPERS
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 8bfebd579a74..c8dda0ebd043 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -17,6 +17,7 @@ config DRM_MSM
select DRM_DISPLAY_DP_AUX_BUS
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_EXEC
select DRM_KMS_HELPER
select DRM_PANEL
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index f5e2838c6a76..13110fcc46a8 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -37,6 +37,7 @@ msm-display-$(CONFIG_DRM_MSM_HDMI) += \
hdmi/hdmi_phy.o \
hdmi/hdmi_phy_8960.o \
hdmi/hdmi_phy_8996.o \
+ hdmi/hdmi_phy_8998.o \
hdmi/hdmi_phy_8x60.o \
hdmi/hdmi_phy_8x74.o \
hdmi/hdmi_pll_8960.o \
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
index 0de8465b6cf0..2eb6c3e93748 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_catalog.c
@@ -42,6 +42,17 @@ static const struct adreno_info a3xx_gpus[] = {
.inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x03000620),
+ .family = ADRENO_3XX,
+ .revn = 308,
+ .fw = {
+ [ADRENO_FW_PM4] = "a300_pm4.fw",
+ [ADRENO_FW_PFP] = "a300_pfp.fw",
+ },
+ .gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a3xx_gpu_init,
+ }, {
.chip_ids = ADRENO_CHIP_IDS(
0x03020000,
0x03020001,
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 5273dc849838..b46ff49f47cf 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -145,6 +145,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x0000000a);
gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x0000000a);
+ } else if (adreno_is_a306a(adreno_gpu)) {
+ gpu_write(gpu, REG_A3XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x0003);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_RD_LIM_CONF0, 0x00000010);
+ gpu_write(gpu, REG_A3XX_VBIF_OUT_WR_LIM_CONF0, 0x00000010);
} else if (adreno_is_a320(adreno_gpu)) {
/* Set up 16 deep read/write request queues: */
gpu_write(gpu, REG_A3XX_VBIF_IN_RD_LIM_CONF0, 0x10101010);
@@ -237,7 +241,9 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_UCHE_CACHE_MODE_CONTROL_REG, 0x00000001);
/* Enable Clock gating: */
- if (adreno_is_a305b(adreno_gpu) || adreno_is_a306(adreno_gpu))
+ if (adreno_is_a305b(adreno_gpu) ||
+ adreno_is_a306(adreno_gpu) ||
+ adreno_is_a306a(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xaaaaaaaa);
else if (adreno_is_a320(adreno_gpu))
gpu_write(gpu, REG_A3XX_RBBM_CLOCK_CTL, 0xbfffffff);
@@ -334,8 +340,10 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A3XX_CP_PFP_UCODE_DATA, ptr[i]);
/* CP ROQ queue sizes (bytes) - RB:16, ST:16, IB1:32, IB2:64 */
- if (adreno_is_a305(adreno_gpu) || adreno_is_a306(adreno_gpu) ||
- adreno_is_a320(adreno_gpu)) {
+ if (adreno_is_a305(adreno_gpu) ||
+ adreno_is_a306(adreno_gpu) ||
+ adreno_is_a306a(adreno_gpu) ||
+ adreno_is_a320(adreno_gpu)) {
gpu_write(gpu, REG_AXXX_CP_QUEUE_THRESHOLDS,
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB1_START(2) |
AXXX_CP_QUEUE_THRESHOLDS_CSQ_IB2_START(6) |
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index c0b5373e90d7..e09044930547 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -65,6 +65,8 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
struct drm_gem_object *obj;
uint32_t *ptr, dwords;
@@ -109,6 +111,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
}
}
+ a5xx_gpu->last_seqno[ring->id] = submit->seqno;
a5xx_flush(gpu, ring, true);
a5xx_preempt_trigger(gpu);
@@ -150,9 +153,13 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
OUT_RING(ring, 1);
- /* Enable local preemption for finegrain preemption */
+ /*
+ * Disable local preemption by default because it requires
+ * user-space to be aware of it and provide additional handling
+ * to restore rendering state or do various flushes on switch.
+ */
OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
- OUT_RING(ring, 0x1);
+ OUT_RING(ring, 0x0);
/* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
@@ -206,6 +213,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
/* Write the fence to the scratch register */
OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
OUT_RING(ring, submit->seqno);
+ a5xx_gpu->last_seqno[ring->id] = submit->seqno;
/*
* Execute a CACHE_FLUSH_TS event. This will ensure that the
@@ -1793,5 +1801,9 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
else
adreno_gpu->ubwc_config.highest_bank_bit = 14;
+ /* a5xx only supports UBWC 1.0, these are not configurable */
+ adreno_gpu->ubwc_config.macrotile_mode = 0;
+ adreno_gpu->ubwc_config.ubwc_swizzle = 0x7;
+
return gpu;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
index c7187bcc5e90..9c0d701fe4b8 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -34,8 +34,10 @@ struct a5xx_gpu {
struct drm_gem_object *preempt_counters_bo[MSM_GPU_MAX_RINGS];
struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
+ uint32_t last_seqno[MSM_GPU_MAX_RINGS];
atomic_t preempt_state;
+ spinlock_t preempt_start_lock;
struct timer_list preempt_timer;
struct drm_gem_object *shadow_bo;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
index f58dd564d122..0469fea55010 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -55,6 +55,8 @@ static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
/* Return the highest priority ringbuffer with something in it */
static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
unsigned long flags;
int i;
@@ -64,6 +66,8 @@ static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
spin_lock_irqsave(&ring->preempt_lock, flags);
empty = (get_wptr(ring) == gpu->funcs->get_rptr(gpu, ring));
+ if (!empty && ring == a5xx_gpu->cur_ring)
+ empty = ring->memptrs->fence == a5xx_gpu->last_seqno[i];
spin_unlock_irqrestore(&ring->preempt_lock, flags);
if (!empty)
@@ -98,11 +102,18 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
return;
/*
+ * Serialize preemption start to ensure that we always make
+ * decision on latest state. Otherwise we can get stuck in
+ * lower priority or empty ring.
+ */
+ spin_lock_irqsave(&a5xx_gpu->preempt_start_lock, flags);
+
+ /*
* Try to start preemption by moving from NONE to START. If
* unsuccessful, a preemption is already in flight
*/
if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
- return;
+ goto out;
/* Get the next ring to preempt to */
ring = get_next_ring(gpu);
@@ -127,9 +138,11 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
- return;
+ goto out;
}
+ spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags);
+
/* Make sure the wptr doesn't update while we're in motion */
spin_lock_irqsave(&ring->preempt_lock, flags);
a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
@@ -152,6 +165,10 @@ void a5xx_preempt_trigger(struct msm_gpu *gpu)
/* And actually start the preemption */
gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
+ return;
+
+out:
+ spin_unlock_irqrestore(&a5xx_gpu->preempt_start_lock, flags);
}
void a5xx_preempt_irq(struct msm_gpu *gpu)
@@ -188,6 +205,12 @@ void a5xx_preempt_irq(struct msm_gpu *gpu)
update_wptr(gpu, a5xx_gpu->cur_ring);
set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+
+ /*
+ * Try to trigger preemption again in case there was a submit or
+ * retire during ring switch
+ */
+ a5xx_preempt_trigger(gpu);
}
void a5xx_preempt_hw_init(struct msm_gpu *gpu)
@@ -204,6 +227,8 @@ void a5xx_preempt_hw_init(struct msm_gpu *gpu)
return;
for (i = 0; i < gpu->nr_rings; i++) {
+ a5xx_gpu->preempt[i]->data = 0;
+ a5xx_gpu->preempt[i]->info = 0;
a5xx_gpu->preempt[i]->wptr = 0;
a5xx_gpu->preempt[i]->rptr = 0;
a5xx_gpu->preempt[i]->rbase = gpu->rb[i]->iova;
@@ -298,5 +323,6 @@ void a5xx_preempt_init(struct msm_gpu *gpu)
}
}
+ spin_lock_init(&a5xx_gpu->preempt_start_lock);
timer_setup(&a5xx_gpu->preempt_timer, a5xx_preempt_timer, 0);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 68ba9aed5506..0312b6ee0356 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -129,6 +129,59 @@ static const struct adreno_reglist a615_hwcg[] = {
{},
};
+static const struct adreno_reglist a620_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x01002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x25222022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TEX_FCHE, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TEX_FCHE, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_TEX_FCHE, 0x00000777},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_ISDB_CNT, 0x00000182},
+ {REG_A6XX_RBBM_RAC_THRESHOLD_CNT, 0x00000000},
+ {REG_A6XX_RBBM_SP_HYST_CNT, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
+ {},
+};
+
static const struct adreno_reglist a630_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
{REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
@@ -448,7 +501,6 @@ static const struct adreno_reglist a690_hwcg[] = {
{REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
{REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
{REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555},
- {REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, 0x20200},
{REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, 0x10111},
{REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, 0x5555},
{}
@@ -491,7 +543,6 @@ static const u32 a630_protect_regs[] = {
};
DECLARE_ADRENO_PROTECT(a630_protect, 32);
-/* These are for a620 and a650 */
static const u32 a650_protect_regs[] = {
A6XX_PROTECT_RDONLY(0x00000, 0x04ff),
A6XX_PROTECT_RDONLY(0x00501, 0x0005),
@@ -636,6 +687,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a612_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00080000,
},
/*
* There are (at least) three SoCs implementing A610: SM6125
@@ -652,6 +705,35 @@ static const struct adreno_info a6xx_gpus[] = {
{ 127, 4 },
),
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06010500),
+ .family = ADRENO_6XX_GEN1,
+ .revn = 615,
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a630_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .init = a6xx_gpu_init,
+ .zapfw = "a615_zap.mdt",
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a615_hwcg,
+ .protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x0018000,
+ },
+ .speedbins = ADRENO_SPEEDBINS(
+ /*
+ * The default speed bin (0) has the same values as
+ * speed bin 90 which goes up to 432 MHz.
+ */
+ { 0, 0 },
+ { 90, 0 },
+ { 105, 1 },
+ { 146, 2 },
+ { 163, 3 },
+ ),
+ }, {
.machine = "qcom,sm7150",
.chip_ids = ADRENO_CHIP_IDS(0x06010800),
.family = ADRENO_6XX_GEN1,
@@ -667,6 +749,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x00180000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -689,6 +773,8 @@ static const struct adreno_info a6xx_gpus[] = {
.init = a6xx_gpu_init,
.a6xx = &(const struct a6xx_info) {
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x00180000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -711,6 +797,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x00018000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -733,6 +821,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x00018000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -755,6 +845,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a615_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00000222,
+ .prim_fifo_threshold = 0x00018000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -764,6 +856,30 @@ static const struct adreno_info a6xx_gpus[] = {
{ 180, 1 },
),
}, {
+ .chip_ids = ADRENO_CHIP_IDS(0x06020100),
+ .family = ADRENO_6XX_GEN3,
+ .fw = {
+ [ADRENO_FW_SQE] = "a650_sqe.fw",
+ [ADRENO_FW_GMU] = "a621_gmu.bin",
+ },
+ .gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
+ .quirks = ADRENO_QUIRK_HAS_CACHED_COHERENT |
+ ADRENO_QUIRK_HAS_HW_APRIV,
+ .init = a6xx_gpu_init,
+ .zapfw = "a620_zap.mbn",
+ .a6xx = &(const struct a6xx_info) {
+ .hwcg = a620_hwcg,
+ .protect = &a650_protect,
+ .gmu_cgc_mode = 0x00020200,
+ .prim_fifo_threshold = 0x00010000,
+ },
+ .address_space_size = SZ_16G,
+ .speedbins = ADRENO_SPEEDBINS(
+ { 0, 0 },
+ { 137, 1 },
+ ),
+ }, {
.chip_ids = ADRENO_CHIP_IDS(
0x06030001,
0x06030002
@@ -782,6 +898,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a630_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00180000,
},
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06040001),
@@ -799,6 +917,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a640_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00180000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -821,6 +941,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a650_hwcg,
.protect = &a650_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00300200,
},
.address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
@@ -846,6 +968,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a660_hwcg,
.protect = &a660_protect,
+ .gmu_cgc_mode = 0x00020000,
+ .prim_fifo_threshold = 0x00300200,
},
.address_space_size = SZ_16G,
}, {
@@ -864,11 +988,14 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a660_hwcg,
.protect = &a660_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00200200,
},
.address_space_size = SZ_16G,
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
{ 117, 0 },
+ { 129, 4 },
{ 172, 2 }, /* Called speedbin 1 downstream, but let's not break things! */
{ 190, 1 },
),
@@ -888,6 +1015,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a640_hwcg,
.protect = &a630_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x00200200,
},
}, {
.chip_ids = ADRENO_CHIP_IDS(0x06090000),
@@ -905,6 +1034,8 @@ static const struct adreno_info a6xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a690_hwcg,
.protect = &a690_protect,
+ .gmu_cgc_mode = 0x00020200,
+ .prim_fifo_threshold = 0x00800200,
},
.address_space_size = SZ_16G,
}
@@ -1165,6 +1296,8 @@ static const struct adreno_info a7xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a702_hwcg,
.protect = &a650_protect,
+ .gmu_cgc_mode = 0x00020202,
+ .prim_fifo_threshold = 0x0000c000,
},
.speedbins = ADRENO_SPEEDBINS(
{ 0, 0 },
@@ -1188,6 +1321,7 @@ static const struct adreno_info a7xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.hwcg = a730_hwcg,
.protect = &a730_protect,
+ .gmu_cgc_mode = 0x00020000,
},
.address_space_size = SZ_16G,
}, {
@@ -1207,6 +1341,7 @@ static const struct adreno_info a7xx_gpus[] = {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.gmu_chipid = 0x7020100,
+ .gmu_cgc_mode = 0x00020202,
},
.address_space_size = SZ_16G,
}, {
@@ -1225,6 +1360,7 @@ static const struct adreno_info a7xx_gpus[] = {
.hwcg = a740_hwcg,
.protect = &a730_protect,
.gmu_chipid = 0x7050001,
+ .gmu_cgc_mode = 0x00020202,
},
.address_space_size = SZ_256G,
}, {
@@ -1243,6 +1379,7 @@ static const struct adreno_info a7xx_gpus[] = {
.a6xx = &(const struct a6xx_info) {
.protect = &a730_protect,
.gmu_chipid = 0x7090100,
+ .gmu_cgc_mode = 0x00020202,
},
.address_space_size = SZ_16G,
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index cb538a262d1c..37927bdd6fbe 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -423,6 +423,20 @@ static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
}
+static void a6xx_gemnoc_workaround(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+
+ /*
+ * GEMNoC can power collapse whilst the GPU is being powered down, resulting
+ * in the power down sequence not being fully executed. That in turn can
+ * prevent CX_GDSC from collapsing. Assert Qactive to avoid this.
+ */
+ if (adreno_is_a621(adreno_gpu) || adreno_is_7c3(adreno_gpu))
+ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, BIT(0));
+}
+
/* Let the GMU know that we are about to go into slumber */
static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
{
@@ -456,6 +470,8 @@ static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
}
out:
+ a6xx_gemnoc_workaround(gmu);
+
/* Put fence into allow mode */
gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
return ret;
@@ -525,8 +541,7 @@ static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
if (IS_ERR(pdcptr))
goto err;
- if (adreno_is_a650(adreno_gpu) ||
- adreno_is_a660_family(adreno_gpu) ||
+ if (adreno_is_a650_family(adreno_gpu) ||
adreno_is_a7xx(adreno_gpu))
pdc_in_aop = true;
else if (adreno_is_a618(adreno_gpu) || adreno_is_a640_family(adreno_gpu))
@@ -946,6 +961,8 @@ static void a6xx_gmu_force_off(struct a6xx_gmu *gmu)
/* Force off SPTP in case the GMU is managing it */
a6xx_sptprac_disable(gmu);
+ a6xx_gemnoc_workaround(gmu);
+
/* Make sure there are no outstanding RPMh votes */
a6xx_gmu_rpmh_off(gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index bcaec86ac67a..06cab2c6fd66 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -402,7 +402,8 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
const struct adreno_reglist *reg;
unsigned int i;
- u32 val, clock_cntl_on, cgc_mode;
+ u32 cgc_delay, cgc_hyst;
+ u32 val, clock_cntl_on;
if (!(adreno_gpu->info->a6xx->hwcg || adreno_is_a7xx(adreno_gpu)))
return;
@@ -416,16 +417,15 @@ static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
else
clock_cntl_on = 0x8aa8aa82;
- if (adreno_is_a7xx(adreno_gpu)) {
- cgc_mode = adreno_is_a740_family(adreno_gpu) ? 0x20222 : 0x20000;
-
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
- state ? cgc_mode : 0);
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
- state ? 0x10111 : 0);
- gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
- state ? 0x5555 : 0);
- }
+ cgc_delay = adreno_is_a615_family(adreno_gpu) ? 0x111 : 0x10111;
+ cgc_hyst = adreno_is_a615_family(adreno_gpu) ? 0x555 : 0x5555;
+
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL,
+ state ? adreno_gpu->info->a6xx->gmu_cgc_mode : 0);
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL,
+ state ? cgc_delay : 0);
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL,
+ state ? cgc_hyst : 0);
if (!adreno_gpu->info->a6xx->hwcg) {
gpu_write(gpu, REG_A7XX_RBBM_CLOCK_CNTL_GLOBAL, 1);
@@ -493,24 +493,17 @@ static void a6xx_set_cp_protect(struct msm_gpu *gpu)
static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
{
- /* Unknown, introduced with A650 family, related to UBWC mode/ver 4 */
gpu->ubwc_config.rgb565_predicator = 0;
- /* Unknown, introduced with A650 family */
gpu->ubwc_config.uavflagprd_inv = 0;
- /* Whether the minimum access length is 64 bits */
gpu->ubwc_config.min_acc_len = 0;
- /* Entirely magic, per-GPU-gen value */
- gpu->ubwc_config.ubwc_mode = 0;
- /*
- * The Highest Bank Bit value represents the bit of the highest DDR bank.
- * This should ideally use DRAM type detection.
- */
+ gpu->ubwc_config.ubwc_swizzle = 0x6;
+ gpu->ubwc_config.macrotile_mode = 0;
gpu->ubwc_config.highest_bank_bit = 15;
if (adreno_is_a610(gpu)) {
gpu->ubwc_config.highest_bank_bit = 13;
gpu->ubwc_config.min_acc_len = 1;
- gpu->ubwc_config.ubwc_mode = 1;
+ gpu->ubwc_config.ubwc_swizzle = 0x7;
}
if (adreno_is_a618(gpu))
@@ -523,9 +516,18 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
if (adreno_is_a619_holi(gpu))
gpu->ubwc_config.highest_bank_bit = 13;
+ if (adreno_is_a621(gpu)) {
+ gpu->ubwc_config.highest_bank_bit = 13;
+ gpu->ubwc_config.amsbc = 1;
+ gpu->ubwc_config.uavflagprd_inv = 2;
+ }
+
if (adreno_is_a640_family(gpu))
gpu->ubwc_config.amsbc = 1;
+ if (adreno_is_a680(gpu))
+ gpu->ubwc_config.macrotile_mode = 1;
+
if (adreno_is_a650(gpu) ||
adreno_is_a660(gpu) ||
adreno_is_a690(gpu) ||
@@ -536,6 +538,7 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.amsbc = 1;
gpu->ubwc_config.rgb565_predicator = 1;
gpu->ubwc_config.uavflagprd_inv = 2;
+ gpu->ubwc_config.macrotile_mode = 1;
}
if (adreno_is_7c3(gpu)) {
@@ -543,12 +546,12 @@ static void a6xx_calc_ubwc_config(struct adreno_gpu *gpu)
gpu->ubwc_config.amsbc = 1;
gpu->ubwc_config.rgb565_predicator = 1;
gpu->ubwc_config.uavflagprd_inv = 2;
+ gpu->ubwc_config.macrotile_mode = 1;
}
if (adreno_is_a702(gpu)) {
gpu->ubwc_config.highest_bank_bit = 14;
gpu->ubwc_config.min_acc_len = 1;
- gpu->ubwc_config.ubwc_mode = 0;
}
}
@@ -564,21 +567,26 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
u32 hbb = adreno_gpu->ubwc_config.highest_bank_bit - 13;
u32 hbb_hi = hbb >> 2;
u32 hbb_lo = hbb & 3;
+ u32 ubwc_mode = adreno_gpu->ubwc_config.ubwc_swizzle & 1;
+ u32 level2_swizzling_dis = !(adreno_gpu->ubwc_config.ubwc_swizzle & 2);
gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL,
+ level2_swizzling_dis << 12 |
adreno_gpu->ubwc_config.rgb565_predicator << 11 |
hbb_hi << 10 | adreno_gpu->ubwc_config.amsbc << 4 |
adreno_gpu->ubwc_config.min_acc_len << 3 |
- hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
+ hbb_lo << 1 | ubwc_mode);
- gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, hbb_hi << 4 |
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL,
+ level2_swizzling_dis << 6 | hbb_hi << 4 |
adreno_gpu->ubwc_config.min_acc_len << 3 |
- hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
+ hbb_lo << 1 | ubwc_mode);
- gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, hbb_hi << 10 |
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL,
+ level2_swizzling_dis << 12 | hbb_hi << 10 |
adreno_gpu->ubwc_config.uavflagprd_inv << 4 |
adreno_gpu->ubwc_config.min_acc_len << 3 |
- hbb_lo << 1 | adreno_gpu->ubwc_config.ubwc_mode);
+ hbb_lo << 1 | ubwc_mode);
if (adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A7XX_GRAS_NC_MODE_CNTL,
@@ -586,6 +594,9 @@ static void a6xx_set_ubwc_config(struct msm_gpu *gpu)
gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL,
adreno_gpu->ubwc_config.min_acc_len << 23 | hbb_lo << 21);
+
+ gpu_write(gpu, REG_A6XX_RBBM_NC_MODE_CNTL,
+ adreno_gpu->ubwc_config.macrotile_mode);
}
static int a6xx_cp_init(struct msm_gpu *gpu)
@@ -976,25 +987,11 @@ static int hw_init(struct msm_gpu *gpu)
} else if (!adreno_is_a7xx(adreno_gpu))
gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
- /* Setting the primFifo thresholds default values,
- * and vccCacheSkipDis=1 bit (0x200) for A640 and newer
- */
- if (adreno_is_a702(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x0000c000);
- else if (adreno_is_a690(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00800200);
- else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
- else if (adreno_is_a640_family(adreno_gpu) || adreno_is_7c3(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00200200);
- else if (adreno_is_a650(adreno_gpu) || adreno_is_a660(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00300200);
- else if (adreno_is_a619(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00018000);
- else if (adreno_is_a610(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00080000);
- else if (!adreno_is_a7xx(adreno_gpu))
- gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, 0x00180000);
+
+ /* Set the default primFifo threshold values */
+ if (adreno_gpu->info->a6xx->prim_fifo_threshold)
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL,
+ adreno_gpu->info->a6xx->prim_fifo_threshold);
/* Set the AHB default slave response to "ERROR" */
gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index e3e5c53ae8af..0fb7febf70e7 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -22,6 +22,8 @@ struct a6xx_info {
const struct adreno_reglist *hwcg;
const struct adreno_protect *protect;
u32 gmu_chipid;
+ u32 gmu_cgc_mode;
+ u32 prim_fifo_threshold;
};
struct a6xx_gpu {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 789a11416f7a..0fcae53c0b14 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -388,18 +388,18 @@ static void a7xx_get_debugbus_blocks(struct msm_gpu *gpu,
const u32 *debugbus_blocks, *gbif_debugbus_blocks;
int i;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
debugbus_blocks = gen7_0_0_debugbus_blocks;
debugbus_blocks_count = ARRAY_SIZE(gen7_0_0_debugbus_blocks);
gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks;
gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks);
- } else if (adreno_is_a740_family(adreno_gpu)) {
+ } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
debugbus_blocks = gen7_2_0_debugbus_blocks;
debugbus_blocks_count = ARRAY_SIZE(gen7_2_0_debugbus_blocks);
gbif_debugbus_blocks = a7xx_gbif_debugbus_blocks;
gbif_debugbus_blocks_count = ARRAY_SIZE(a7xx_gbif_debugbus_blocks);
} else {
- BUG_ON(!adreno_is_a750(adreno_gpu));
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
debugbus_blocks = gen7_9_0_debugbus_blocks;
debugbus_blocks_count = ARRAY_SIZE(gen7_9_0_debugbus_blocks);
gbif_debugbus_blocks = gen7_9_0_gbif_debugbus_blocks;
@@ -509,7 +509,7 @@ static void a6xx_get_debugbus(struct msm_gpu *gpu,
const struct a6xx_debugbus_block *cx_debugbus_blocks;
if (adreno_is_a7xx(adreno_gpu)) {
- BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)));
+ BUG_ON(adreno_gpu->info->family > ADRENO_7XX_GEN3);
cx_debugbus_blocks = a7xx_cx_debugbus_blocks;
nr_cx_debugbus_blocks = ARRAY_SIZE(a7xx_cx_debugbus_blocks);
} else {
@@ -660,13 +660,16 @@ static void a7xx_get_dbgahb_clusters(struct msm_gpu *gpu,
const struct gen7_sptp_cluster_registers *dbgahb_clusters;
unsigned dbgahb_clusters_size;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
dbgahb_clusters = gen7_0_0_sptp_clusters;
dbgahb_clusters_size = ARRAY_SIZE(gen7_0_0_sptp_clusters);
- } else {
- BUG_ON(!adreno_is_a740_family(adreno_gpu));
+ } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
dbgahb_clusters = gen7_2_0_sptp_clusters;
dbgahb_clusters_size = ARRAY_SIZE(gen7_2_0_sptp_clusters);
+ } else {
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
+ dbgahb_clusters = gen7_9_0_sptp_clusters;
+ dbgahb_clusters_size = ARRAY_SIZE(gen7_9_0_sptp_clusters);
}
a6xx_state->dbgahb_clusters = state_kcalloc(a6xx_state,
@@ -818,14 +821,14 @@ static void a7xx_get_clusters(struct msm_gpu *gpu,
const struct gen7_cluster_registers *clusters;
unsigned clusters_size;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
clusters = gen7_0_0_clusters;
clusters_size = ARRAY_SIZE(gen7_0_0_clusters);
- } else if (adreno_is_a740_family(adreno_gpu)) {
+ } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
clusters = gen7_2_0_clusters;
clusters_size = ARRAY_SIZE(gen7_2_0_clusters);
} else {
- BUG_ON(!adreno_is_a750(adreno_gpu));
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
clusters = gen7_9_0_clusters;
clusters_size = ARRAY_SIZE(gen7_9_0_clusters);
}
@@ -893,7 +896,7 @@ static void a7xx_get_shader_block(struct msm_gpu *gpu,
if (WARN_ON(datasize > A6XX_CD_DATA_SIZE))
return;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
gpu_rmw(gpu, REG_A7XX_SP_DBG_CNTL, GENMASK(1, 0), 3);
}
@@ -923,7 +926,7 @@ static void a7xx_get_shader_block(struct msm_gpu *gpu,
datasize);
out:
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
gpu_rmw(gpu, REG_A7XX_SP_DBG_CNTL, GENMASK(1, 0), 0);
}
}
@@ -956,14 +959,14 @@ static void a7xx_get_shaders(struct msm_gpu *gpu,
unsigned num_shader_blocks;
int i;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
shader_blocks = gen7_0_0_shader_blocks;
num_shader_blocks = ARRAY_SIZE(gen7_0_0_shader_blocks);
- } else if (adreno_is_a740_family(adreno_gpu)) {
+ } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
shader_blocks = gen7_2_0_shader_blocks;
num_shader_blocks = ARRAY_SIZE(gen7_2_0_shader_blocks);
} else {
- BUG_ON(!adreno_is_a750(adreno_gpu));
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
shader_blocks = gen7_9_0_shader_blocks;
num_shader_blocks = ARRAY_SIZE(gen7_9_0_shader_blocks);
}
@@ -1348,14 +1351,14 @@ static void a7xx_get_registers(struct msm_gpu *gpu,
const u32 *pre_crashdumper_regs;
const struct gen7_reg_list *reglist;
- if (adreno_is_a730(adreno_gpu)) {
+ if (adreno_gpu->info->family == ADRENO_7XX_GEN1) {
reglist = gen7_0_0_reg_list;
pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers;
- } else if (adreno_is_a740_family(adreno_gpu)) {
+ } else if (adreno_gpu->info->family == ADRENO_7XX_GEN2) {
reglist = gen7_2_0_reg_list;
pre_crashdumper_regs = gen7_0_0_pre_crashdumper_gpu_registers;
} else {
- BUG_ON(!adreno_is_a750(adreno_gpu));
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
reglist = gen7_9_0_reg_list;
pre_crashdumper_regs = gen7_9_0_pre_crashdumper_gpu_registers;
}
@@ -1405,8 +1408,7 @@ static void a7xx_get_post_crashdumper_registers(struct msm_gpu *gpu,
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
const u32 *regs;
- BUG_ON(!(adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu) ||
- adreno_is_a750(adreno_gpu)));
+ BUG_ON(adreno_gpu->info->family > ADRENO_7XX_GEN3);
regs = gen7_0_0_post_crashdumper_registers;
a7xx_get_ahb_gpu_registers(gpu,
@@ -1514,11 +1516,11 @@ static void a7xx_get_indexed_registers(struct msm_gpu *gpu,
const struct a6xx_indexed_registers *indexed_regs;
int i, indexed_count, mempool_count;
- if (adreno_is_a730(adreno_gpu) || adreno_is_a740_family(adreno_gpu)) {
+ if (adreno_gpu->info->family <= ADRENO_7XX_GEN2) {
indexed_regs = a7xx_indexed_reglist;
indexed_count = ARRAY_SIZE(a7xx_indexed_reglist);
} else {
- BUG_ON(!adreno_is_a750(adreno_gpu));
+ BUG_ON(adreno_gpu->info->family != ADRENO_7XX_GEN3);
indexed_regs = gen7_9_0_cp_indexed_reg_list;
indexed_count = ARRAY_SIZE(gen7_9_0_cp_indexed_reg_list);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
index 260d66eccfec..9a327d543f27 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gen7_9_0_snapshot.h
@@ -1303,7 +1303,7 @@ static struct a6xx_indexed_registers gen7_9_0_cp_indexed_reg_list[] = {
REG_A6XX_CP_ROQ_DBG_DATA, 0x00800},
{ "CP_UCODE_DBG_DATA", REG_A6XX_CP_SQE_UCODE_DBG_ADDR,
REG_A6XX_CP_SQE_UCODE_DBG_DATA, 0x08000},
- { "CP_BV_SQE_STAT_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
+ { "CP_BV_DRAW_STATE_ADDR", REG_A7XX_CP_BV_DRAW_STATE_ADDR,
REG_A7XX_CP_BV_DRAW_STATE_DATA, 0x00200},
{ "CP_BV_ROQ_DBG_ADDR", REG_A7XX_CP_BV_ROQ_DBG_ADDR,
REG_A7XX_CP_BV_ROQ_DBG_DATA, 0x00800},
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index ecc3fc5cec22..465a4cd14a43 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -379,6 +379,12 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_file_private *ctx,
case MSM_PARAM_RAYTRACING:
*value = adreno_gpu->has_ray_tracing;
return 0;
+ case MSM_PARAM_UBWC_SWIZZLE:
+ *value = adreno_gpu->ubwc_config.ubwc_swizzle;
+ return 0;
+ case MSM_PARAM_MACROTILE_MODE:
+ *value = adreno_gpu->ubwc_config.macrotile_mode;
+ return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
@@ -478,7 +484,7 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
ret = request_firmware_direct(&fw, fwname, drm->dev);
if (!ret) {
DRM_DEV_INFO(drm->dev, "loaded %s from legacy location\n",
- newname);
+ fwname);
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
@@ -688,11 +694,9 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
size = j + 1;
if (size) {
- state->ring[i].data = kvmalloc(size << 2, GFP_KERNEL);
- if (state->ring[i].data) {
- memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
+ state->ring[i].data = kvmemdup(gpu->rb[i]->start, size << 2, GFP_KERNEL);
+ if (state->ring[i].data)
state->ring[i].data_size = size << 2;
- }
}
}
@@ -1083,6 +1087,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->chip_id = config->chip_id;
gpu->allow_relocs = config->info->family < ADRENO_6XX_GEN1;
+ gpu->pdev = pdev;
/* Only handle the core clock when GMU is not in use (or is absent). */
if (adreno_has_gmu_wrapper(adreno_gpu) ||
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 1ab523a163a0..58d7e7915c57 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -191,12 +191,42 @@ struct adreno_gpu {
const struct firmware *fw[ADRENO_FW_MAX];
struct {
+ /**
+ * @rgb565_predicator: Unknown, introduced with A650 family,
+ * related to UBWC mode/ver 4
+ */
u32 rgb565_predicator;
+ /** @uavflagprd_inv: Unknown, introduced with A650 family */
u32 uavflagprd_inv;
+ /** @min_acc_len: Whether the minimum access length is 64 bits */
u32 min_acc_len;
- u32 ubwc_mode;
+ /**
+ * @ubwc_swizzle: Whether to enable level 1, 2 & 3 bank swizzling.
+ *
+ * UBWC 1.0 always enables all three levels.
+ * UBWC 2.0 removes level 1 bank swizzling, leaving levels 2 & 3.
+ * UBWC 4.0 adds the optional ability to disable levels 2 & 3.
+ *
+ * This is a bitmask where BIT(0) enables level 1, BIT(1)
+ * controls level 2, and BIT(2) enables level 3.
+ */
+ u32 ubwc_swizzle;
+ /**
+ * @highest_bank_bit: Highest Bank Bit
+ *
+ * The Highest Bank Bit value represents the bit of the highest
+ * DDR bank. This should ideally use DRAM type detection.
+ */
u32 highest_bank_bit;
u32 amsbc;
+ /**
+ * @macrotile_mode: Macrotile Mode
+ *
+ * Whether to use 4-channel macrotiling mode or the newer
+ * 8-channel macrotiling mode introduced in UBWC 3.1. 0 is
+ * 4-channel and 1 is 8-channel.
+ */
+ u32 macrotile_mode;
} ubwc_config;
/*
@@ -294,6 +324,12 @@ static inline bool adreno_is_a306(const struct adreno_gpu *gpu)
return adreno_is_revn(gpu, 307);
}
+static inline bool adreno_is_a306a(const struct adreno_gpu *gpu)
+{
+ /* a306a (marketing name is a308) */
+ return adreno_is_revn(gpu, 308);
+}
+
static inline bool adreno_is_a320(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 320);
@@ -384,6 +420,11 @@ static inline int adreno_is_a619_holi(const struct adreno_gpu *gpu)
return adreno_is_a619(gpu) && adreno_has_gmu_wrapper(gpu);
}
+static inline int adreno_is_a621(const struct adreno_gpu *gpu)
+{
+ return gpu->info->chip_ids[0] == 0x06020100;
+}
+
static inline int adreno_is_a630(const struct adreno_gpu *gpu)
{
return adreno_is_revn(gpu, 630);
@@ -433,7 +474,13 @@ static inline int adreno_is_a610_family(const struct adreno_gpu *gpu)
return adreno_is_a610(gpu) || adreno_is_a702(gpu);
}
-/* check for a615, a616, a618, a619 or any a630 derivatives */
+/* TODO: 615/616 */
+static inline int adreno_is_a615_family(const struct adreno_gpu *gpu)
+{
+ return adreno_is_a618(gpu) ||
+ adreno_is_a619(gpu);
+}
+
static inline int adreno_is_a630_family(const struct adreno_gpu *gpu)
{
if (WARN_ON_ONCE(!gpu->info))
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
index 145f3d5953a3..6ccfde82fecd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_0_sm8150.h
@@ -33,6 +33,7 @@ static const struct dpu_mdp_cfg sm8150_mdp = {
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@@ -290,6 +291,21 @@ static const struct dpu_dsc_cfg sm8150_dsc[] = {
},
};
+static const struct dpu_wb_cfg sm8150_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SDM845_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sm8150_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -384,6 +400,8 @@ const struct dpu_mdss_cfg dpu_sm8150_cfg = {
.pingpong = sm8150_pp,
.merge_3d_count = ARRAY_SIZE(sm8150_merge_3d),
.merge_3d = sm8150_merge_3d,
+ .wb_count = ARRAY_SIZE(sm8150_wb),
+ .wb = sm8150_wb,
.intf_count = ARRAY_SIZE(sm8150_intf),
.intf = sm8150_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
index 9e3bec8bc121..bab19ddd1d4f 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_1_sc8180x.h
@@ -33,6 +33,7 @@ static const struct dpu_mdp_cfg sc8180x_mdp = {
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 },
[DPU_CLK_CTRL_DMA3] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@@ -297,6 +298,21 @@ static const struct dpu_dsc_cfg sc8180x_dsc[] = {
},
};
+static const struct dpu_wb_cfg sc8180x_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SDM845_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 4096,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sc8180x_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -410,6 +426,8 @@ const struct dpu_mdss_cfg dpu_sc8180x_cfg = {
.pingpong = sc8180x_pp,
.merge_3d_count = ARRAY_SIZE(sc8180x_merge_3d),
.merge_3d = sc8180x_merge_3d,
+ .wb_count = ARRAY_SIZE(sc8180x_wb),
+ .wb = sc8180x_wb,
.intf_count = ARRAY_SIZE(sc8180x_intf),
.intf = sc8180x_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
index 76b2ec0d2489..d039b96beb97 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_5_4_sm6125.h
@@ -27,6 +27,7 @@ static const struct dpu_mdp_cfg sm6125_mdp = {
[DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 },
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
},
};
@@ -139,6 +140,21 @@ static const struct dpu_pingpong_cfg sm6125_pp[] = {
},
};
+static const struct dpu_wb_cfg sm6125_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SDM845_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 2160,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sm6125_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -210,6 +226,8 @@ const struct dpu_mdss_cfg dpu_sm6125_cfg = {
.dspp = sm6125_dspp,
.pingpong_count = ARRAY_SIZE(sm6125_pp),
.pingpong = sm6125_pp,
+ .wb_count = ARRAY_SIZE(sm6125_wb),
+ .wb = sm6125_wb,
.intf_count = ARRAY_SIZE(sm6125_intf),
.intf = sm6125_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
index e17a30be7525..0502cee2f116 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_6_4_sm6350.h
@@ -26,6 +26,7 @@ static const struct dpu_mdp_cfg sm6350_mdp = {
[DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 },
[DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 },
[DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2c4, .bit_off = 8 },
+ [DPU_CLK_CTRL_WB2] = { .reg_off = 0x2bc, .bit_off = 16 },
[DPU_CLK_CTRL_REG_DMA] = { .reg_off = 0x2bc, .bit_off = 20 },
},
};
@@ -145,6 +146,21 @@ static const struct dpu_dsc_cfg sm6350_dsc[] = {
},
};
+static const struct dpu_wb_cfg sm6350_wb[] = {
+ {
+ .name = "wb_2", .id = WB_2,
+ .base = 0x65000, .len = 0x2c8,
+ .features = WB_SM8250_MASK,
+ .format_list = wb2_formats_rgb,
+ .num_formats = ARRAY_SIZE(wb2_formats_rgb),
+ .clk_ctrl = DPU_CLK_CTRL_WB2,
+ .xin_id = 6,
+ .vbif_idx = VBIF_RT,
+ .maxlinewidth = 1920,
+ .intr_wb_done = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 4),
+ },
+};
+
static const struct dpu_intf_cfg sm6350_intf[] = {
{
.name = "intf_0", .id = INTF_0,
@@ -218,6 +234,8 @@ const struct dpu_mdss_cfg dpu_sm6350_cfg = {
.dsc = sm6350_dsc,
.pingpong_count = ARRAY_SIZE(sm6350_pp),
.pingpong = sm6350_pp,
+ .wb_count = ARRAY_SIZE(sm6350_wb),
+ .wb = sm6350_wb,
.intf_count = ARRAY_SIZE(sm6350_intf),
.intf = sm6350_intf,
.vbif_count = ARRAY_SIZE(sdm845_vbif),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index 648c8d0a4c36..dcb4fd85e73b 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -96,14 +96,16 @@
#define INTF_SC7280_MASK (INTF_SC7180_MASK)
-#define WB_SM8250_MASK (BIT(DPU_WB_LINE_MODE) | \
+#define WB_SDM845_MASK (BIT(DPU_WB_LINE_MODE) | \
BIT(DPU_WB_UBWC) | \
BIT(DPU_WB_YUV_CONFIG) | \
BIT(DPU_WB_PIPE_ALPHA) | \
BIT(DPU_WB_XY_ROI_OFFSET) | \
BIT(DPU_WB_QOS) | \
BIT(DPU_WB_QOS_8LVL) | \
- BIT(DPU_WB_CDP) | \
+ BIT(DPU_WB_CDP))
+
+#define WB_SM8250_MASK (WB_SDM845_MASK | \
BIT(DPU_WB_INPUT_CTRL))
#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
index 6e2ac50b94a4..0f40eea7f5e2 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -2,6 +2,8 @@
/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
*/
+#include <linux/bitfield.h>
+
#include <drm/drm_managed.h>
#include "dpu_hwio.h"
@@ -231,8 +233,38 @@ static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
}
+static void dpu_hw_dp_phy_intf_sel(struct dpu_hw_mdp *mdp,
+ enum dpu_dp_phy_sel phys[2])
+{
+ struct dpu_hw_blk_reg_map *c = &mdp->hw;
+ unsigned int intf;
+ u32 sel = 0;
+
+ sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_INTF0, phys[0]);
+ sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_INTF1, phys[1]);
+
+ for (intf = 0; intf < 2; intf++) {
+ switch (phys[intf]) {
+ case DPU_DP_PHY_0:
+ sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY0, intf + 1);
+ break;
+ case DPU_DP_PHY_1:
+ sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY1, intf + 1);
+ break;
+ case DPU_DP_PHY_2:
+ sel |= FIELD_PREP(MDP_DP_PHY_INTF_SEL_PHY2, intf + 1);
+ break;
+ default:
+ /* ignore */
+ break;
+ }
+ }
+
+ DPU_REG_WRITE(c, MDP_DP_PHY_INTF_SEL, sel);
+}
+
static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
- unsigned long cap)
+ unsigned long cap, const struct dpu_mdss_version *mdss_rev)
{
ops->setup_split_pipe = dpu_hw_setup_split_pipe;
ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
@@ -245,6 +277,9 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
ops->get_safe_status = dpu_hw_get_safe_status;
+ if (mdss_rev->core_major_ver >= 5)
+ ops->dp_phy_intf_sel = dpu_hw_dp_phy_intf_sel;
+
if (cap & BIT(DPU_MDP_AUDIO_SELECT))
ops->intf_audio_select = dpu_hw_intf_audio_select;
}
@@ -252,7 +287,7 @@ static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
const struct dpu_mdp_cfg *cfg,
void __iomem *addr,
- const struct dpu_mdss_cfg *m)
+ const struct dpu_mdss_version *mdss_rev)
{
struct dpu_hw_mdp *mdp;
@@ -270,7 +305,7 @@ struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
* Assign ops
*/
mdp->caps = cfg;
- _setup_mdp_ops(&mdp->ops, mdp->caps->features);
+ _setup_mdp_ops(&mdp->ops, mdp->caps->features, mdss_rev);
return mdp;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
index 5c9a7ede991e..f1ab9fd106e5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -67,6 +67,13 @@ struct dpu_vsync_source_cfg {
enum dpu_vsync_source vsync_source;
};
+enum dpu_dp_phy_sel {
+ DPU_DP_PHY_NONE,
+ DPU_DP_PHY_0,
+ DPU_DP_PHY_1,
+ DPU_DP_PHY_2,
+};
+
/**
* struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
* Assumption is these functions will be called after clocks are enabled.
@@ -126,6 +133,13 @@ struct dpu_hw_mdp_ops {
struct dpu_danger_safe_status *status);
/**
+ * dp_phy_intf_sel - configure intf to phy mapping
+ * @mdp: mdp top context driver
+ * @phys: list of phys the DP interfaces should be connected to. 0 disables the INTF.
+ */
+ void (*dp_phy_intf_sel)(struct dpu_hw_mdp *mdp, enum dpu_dp_phy_sel phys[2]);
+
+ /**
* intf_audio_select - select the external interface for audio
* @mdp: mdp top context driver
*/
@@ -148,12 +162,12 @@ struct dpu_hw_mdp {
* @dev: Corresponding device for devres management
* @cfg: MDP TOP configuration from catalog
* @addr: Mapped register io address of MDP
- * @m: Pointer to mdss catalog data
+ * @mdss_rev: dpu core's major and minor versions
*/
struct dpu_hw_mdp *dpu_hw_mdptop_init(struct drm_device *dev,
const struct dpu_mdp_cfg *cfg,
void __iomem *addr,
- const struct dpu_mdss_cfg *m);
+ const struct dpu_mdss_version *mdss_rev);
void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
index 5acd5683d25a..054fe097ebf8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
@@ -60,6 +60,13 @@
#define MDP_WD_TIMER_4_LOAD_VALUE 0x448
#define DCE_SEL 0x450
+#define MDP_DP_PHY_INTF_SEL 0x460
+#define MDP_DP_PHY_INTF_SEL_INTF0 GENMASK(2, 0)
+#define MDP_DP_PHY_INTF_SEL_INTF1 GENMASK(5, 3)
+#define MDP_DP_PHY_INTF_SEL_PHY0 GENMASK(8, 6)
+#define MDP_DP_PHY_INTF_SEL_PHY1 GENMASK(11, 9)
+#define MDP_DP_PHY_INTF_SEL_PHY2 GENMASK(14, 12)
+
#define MDP_PERIPH_TOP0 MDP_WD_TIMER_0_CTL
#define MDP_PERIPH_TOP0_END CLK_CTRL3
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index d1e2143110f2..9bcae53c4f45 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1146,7 +1146,7 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
dpu_kms->hw_mdp = dpu_hw_mdptop_init(dev,
dpu_kms->catalog->mdp,
dpu_kms->mmio,
- dpu_kms->catalog);
+ dpu_kms->catalog->mdss_ver);
if (IS_ERR(dpu_kms->hw_mdp)) {
rc = PTR_ERR(dpu_kms->hw_mdp);
DPU_ERROR("failed to get hw_mdp: %d\n", rc);
@@ -1181,6 +1181,16 @@ static int dpu_kms_hw_init(struct msm_kms *kms)
goto err_pm_put;
}
+ /*
+ * We need to program DP <-> PHY relationship only for SC8180X since it
+ * has fewer DP controllers than DP PHYs.
+ * If any other platform requires the same kind of programming, or if
+ * the INTF <->DP relationship isn't static anymore, this needs to be
+ * configured through the DT.
+ */
+ if (of_device_is_compatible(dpu_kms->pdev->dev.of_node, "qcom,sc8180x-dpu"))
+ dpu_kms->hw_mdp->ops.dp_phy_intf_sel(dpu_kms->hw_mdp, (unsigned int[]){ 1, 2, });
+
dpu_kms->hw_intr = dpu_hw_intr_init(dev, dpu_kms->mmio, dpu_kms->catalog);
if (IS_ERR(dpu_kms->hw_intr)) {
rc = PTR_ERR(dpu_kms->hw_intr);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
index 3a7f7edda96b..500b7dc895d0 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_smp.c
@@ -351,7 +351,7 @@ void mdp5_smp_dump(struct mdp5_smp *smp, struct drm_printer *p,
drm_printf(p, "%s:%d\t%d\t%s\n",
pipe2name(pipe), j, inuse,
- plane ? plane->name : NULL);
+ plane ? plane->name : "(null)");
total += inuse;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index 9622e58dce3e..e1228fb093ee 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -119,7 +119,7 @@ struct msm_dp_desc {
};
static const struct msm_dp_desc sc7180_dp_descs[] = {
- { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 },
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{}
};
@@ -130,9 +130,9 @@ static const struct msm_dp_desc sc7280_dp_descs[] = {
};
static const struct msm_dp_desc sc8180x_dp_descs[] = {
- { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0 },
- { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1 },
- { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2 },
+ { .io_start = 0x0ae90000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
+ { .io_start = 0x0ae98000, .id = MSM_DP_CONTROLLER_1, .wide_bus_supported = true },
+ { .io_start = 0x0ae9a000, .id = MSM_DP_CONTROLLER_2, .wide_bus_supported = true },
{}
};
@@ -149,7 +149,7 @@ static const struct msm_dp_desc sc8280xp_dp_descs[] = {
};
static const struct msm_dp_desc sm8650_dp_descs[] = {
- { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0 },
+ { .io_start = 0x0af54000, .id = MSM_DP_CONTROLLER_0, .wide_bus_supported = true },
{}
};
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 3b59137ca674..031446c87dae 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -135,7 +135,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config
config->pll_clock_inverters = 0x00;
else
config->pll_clock_inverters = 0x40;
- } else {
+ } else if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_1) {
if (pll_freq <= 1000000000ULL)
config->pll_clock_inverters = 0xa0;
else if (pll_freq <= 2500000000ULL)
@@ -144,6 +144,16 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config
config->pll_clock_inverters = 0x00;
else
config->pll_clock_inverters = 0x40;
+ } else {
+ /* 4.2, 4.3 */
+ if (pll_freq <= 1000000000ULL)
+ config->pll_clock_inverters = 0xa0;
+ else if (pll_freq <= 2500000000ULL)
+ config->pll_clock_inverters = 0x20;
+ else if (pll_freq <= 3500000000ULL)
+ config->pll_clock_inverters = 0x00;
+ else
+ config->pll_clock_inverters = 0x40;
}
config->decimal_div_start = dec;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 24abcb7254cc..0bfee41c2e71 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -549,6 +549,7 @@ static void msm_hdmi_dev_remove(struct platform_device *pdev)
}
static const struct of_device_id msm_hdmi_dt_match[] = {
+ { .compatible = "qcom,hdmi-tx-8998", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8974_config },
{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8974_config },
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 4586baf36415..a62d2aedfbb7 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -137,6 +137,7 @@ enum hdmi_phy_type {
MSM_HDMI_PHY_8960,
MSM_HDMI_PHY_8x74,
MSM_HDMI_PHY_8996,
+ MSM_HDMI_PHY_8998,
MSM_HDMI_PHY_MAX,
};
@@ -154,6 +155,7 @@ extern const struct hdmi_phy_cfg msm_hdmi_phy_8x60_cfg;
extern const struct hdmi_phy_cfg msm_hdmi_phy_8960_cfg;
extern const struct hdmi_phy_cfg msm_hdmi_phy_8x74_cfg;
extern const struct hdmi_phy_cfg msm_hdmi_phy_8996_cfg;
+extern const struct hdmi_phy_cfg msm_hdmi_phy_8998_cfg;
struct hdmi_phy {
struct platform_device *pdev;
@@ -184,6 +186,7 @@ void __exit msm_hdmi_phy_driver_unregister(void);
#ifdef CONFIG_COMMON_CLK
int msm_hdmi_pll_8960_init(struct platform_device *pdev);
int msm_hdmi_pll_8996_init(struct platform_device *pdev);
+int msm_hdmi_pll_8998_init(struct platform_device *pdev);
#else
static inline int msm_hdmi_pll_8960_init(struct platform_device *pdev)
{
@@ -194,6 +197,11 @@ static inline int msm_hdmi_pll_8996_init(struct platform_device *pdev)
{
return -ENODEV;
}
+
+static inline int msm_hdmi_pll_8998_init(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
#endif
/*
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
index 88a3423b7f24..95b3f7535d84 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy.c
@@ -118,6 +118,9 @@ static int msm_hdmi_phy_pll_init(struct platform_device *pdev,
case MSM_HDMI_PHY_8996:
ret = msm_hdmi_pll_8996_init(pdev);
break;
+ case MSM_HDMI_PHY_8998:
+ ret = msm_hdmi_pll_8998_init(pdev);
+ break;
/*
* we don't have PLL support for these, don't report an error for now
*/
@@ -193,6 +196,8 @@ static const struct of_device_id msm_hdmi_phy_dt_match[] = {
.data = &msm_hdmi_phy_8x74_cfg },
{ .compatible = "qcom,hdmi-phy-8996",
.data = &msm_hdmi_phy_8996_cfg },
+ { .compatible = "qcom,hdmi-phy-8998",
+ .data = &msm_hdmi_phy_8998_cfg },
{}
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
new file mode 100644
index 000000000000..0e3a2b16a2ce
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_phy_8998.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2024 Freebox SAS
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/delay.h>
+
+#include "hdmi.h"
+
+#define HDMI_VCO_MAX_FREQ 12000000000UL
+#define HDMI_VCO_MIN_FREQ 8000000000UL
+
+#define HDMI_PCLK_MAX_FREQ 600000000
+#define HDMI_PCLK_MIN_FREQ 25000000
+
+#define HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD 3400000000UL
+#define HDMI_DIG_FREQ_BIT_CLK_THRESHOLD 1500000000UL
+#define HDMI_MID_FREQ_BIT_CLK_THRESHOLD 750000000UL
+#define HDMI_CORECLK_DIV 5
+#define HDMI_DEFAULT_REF_CLOCK 19200000
+#define HDMI_PLL_CMP_CNT 1024
+
+#define HDMI_PLL_POLL_MAX_READS 100
+#define HDMI_PLL_POLL_TIMEOUT_US 150
+
+#define HDMI_NUM_TX_CHANNEL 4
+
+struct hdmi_pll_8998 {
+ struct platform_device *pdev;
+ struct clk_hw clk_hw;
+ unsigned long rate;
+
+ /* pll mmio base */
+ void __iomem *mmio_qserdes_com;
+ /* tx channel base */
+ void __iomem *mmio_qserdes_tx[HDMI_NUM_TX_CHANNEL];
+};
+
+#define hw_clk_to_pll(x) container_of(x, struct hdmi_pll_8998, clk_hw)
+
+struct hdmi_8998_phy_pll_reg_cfg {
+ u32 com_svs_mode_clk_sel;
+ u32 com_hsclk_sel;
+ u32 com_pll_cctrl_mode0;
+ u32 com_pll_rctrl_mode0;
+ u32 com_cp_ctrl_mode0;
+ u32 com_dec_start_mode0;
+ u32 com_div_frac_start1_mode0;
+ u32 com_div_frac_start2_mode0;
+ u32 com_div_frac_start3_mode0;
+ u32 com_integloop_gain0_mode0;
+ u32 com_integloop_gain1_mode0;
+ u32 com_lock_cmp_en;
+ u32 com_lock_cmp1_mode0;
+ u32 com_lock_cmp2_mode0;
+ u32 com_lock_cmp3_mode0;
+ u32 com_core_clk_en;
+ u32 com_coreclk_div_mode0;
+
+ u32 tx_lx_tx_band[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_tx_drv_lvl[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_tx_emp_post1_lvl[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_pre_driver_1[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_pre_driver_2[HDMI_NUM_TX_CHANNEL];
+ u32 tx_lx_res_code_offset[HDMI_NUM_TX_CHANNEL];
+
+ u32 phy_mode;
+};
+
+struct hdmi_8998_post_divider {
+ u64 vco_freq;
+ int hsclk_divsel;
+ int vco_ratio;
+ int tx_band_sel;
+ int half_rate_mode;
+};
+
+static inline struct hdmi_phy *pll_get_phy(struct hdmi_pll_8998 *pll)
+{
+ return platform_get_drvdata(pll->pdev);
+}
+
+static inline void hdmi_pll_write(struct hdmi_pll_8998 *pll, int offset,
+ u32 data)
+{
+ writel(data, pll->mmio_qserdes_com + offset);
+}
+
+static inline u32 hdmi_pll_read(struct hdmi_pll_8998 *pll, int offset)
+{
+ return readl(pll->mmio_qserdes_com + offset);
+}
+
+static inline void hdmi_tx_chan_write(struct hdmi_pll_8998 *pll, int channel,
+ int offset, int data)
+{
+ writel(data, pll->mmio_qserdes_tx[channel] + offset);
+}
+
+static inline u32 pll_get_cpctrl(u64 frac_start, unsigned long ref_clk,
+ bool gen_ssc)
+{
+ if ((frac_start != 0) || gen_ssc)
+ return 0x8;
+
+ return 0x30;
+}
+
+static inline u32 pll_get_rctrl(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) || gen_ssc)
+ return 0x16;
+
+ return 0x18;
+}
+
+static inline u32 pll_get_cctrl(u64 frac_start, bool gen_ssc)
+{
+ if ((frac_start != 0) || gen_ssc)
+ return 0x34;
+
+ return 0x2;
+}
+
+static inline u32 pll_get_integloop_gain(u64 frac_start, u64 bclk, u32 ref_clk,
+ bool gen_ssc)
+{
+ int digclk_divsel = bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD ? 1 : 2;
+ u64 base;
+
+ if ((frac_start != 0) || gen_ssc)
+ base = 0x3F;
+ else
+ base = 0xC4;
+
+ base <<= (digclk_divsel == 2 ? 1 : 0);
+
+ return (base <= 2046 ? base : 2046);
+}
+
+static inline u32 pll_get_pll_cmp(u64 fdata, unsigned long ref_clk)
+{
+ u64 dividend = HDMI_PLL_CMP_CNT * fdata;
+ u32 divisor = ref_clk * 10;
+ u32 rem;
+
+ rem = do_div(dividend, divisor);
+ if (rem > (divisor >> 1))
+ dividend++;
+
+ return dividend - 1;
+}
+
+static inline u64 pll_cmp_to_fdata(u32 pll_cmp, unsigned long ref_clk)
+{
+ u64 fdata = ((u64)pll_cmp) * ref_clk * 10;
+
+ do_div(fdata, HDMI_PLL_CMP_CNT);
+
+ return fdata;
+}
+
+#define HDMI_REF_CLOCK_HZ ((u64)19200000)
+#define HDMI_MHZ_TO_HZ ((u64)1000000)
+static int pll_get_post_div(struct hdmi_8998_post_divider *pd, u64 bclk)
+{
+ u32 const ratio_list[] = {1, 2, 3, 4, 5, 6,
+ 9, 10, 12, 15, 25};
+ u32 const band_list[] = {0, 1, 2, 3};
+ u32 const sz_ratio = ARRAY_SIZE(ratio_list);
+ u32 const sz_band = ARRAY_SIZE(band_list);
+ u32 const cmp_cnt = 1024;
+ u32 const th_min = 500, th_max = 1000;
+ u32 half_rate_mode = 0;
+ u32 list_elements;
+ int optimal_index;
+ u32 i, j, k;
+ u32 found_hsclk_divsel = 0, found_vco_ratio;
+ u32 found_tx_band_sel;
+ u64 const min_freq = HDMI_VCO_MIN_FREQ, max_freq = HDMI_VCO_MAX_FREQ;
+ u64 freq_list[ARRAY_SIZE(ratio_list) * ARRAY_SIZE(band_list)];
+ u64 found_vco_freq;
+ u64 freq_optimal;
+
+find_optimal_index:
+ freq_optimal = max_freq;
+ optimal_index = -1;
+ list_elements = 0;
+
+ for (i = 0; i < sz_ratio; i++) {
+ for (j = 0; j < sz_band; j++) {
+ u64 freq = div_u64(bclk, (1 << half_rate_mode));
+
+ freq *= (ratio_list[i] * (1 << band_list[j]));
+ freq_list[list_elements++] = freq;
+ }
+ }
+
+ for (k = 0; k < ARRAY_SIZE(freq_list); k++) {
+ u32 const clks_pll_div = 2, core_clk_div = 5;
+ u32 const rng1 = 16, rng2 = 8;
+ u32 th1, th2;
+ u64 core_clk, rvar1, rem;
+
+ core_clk = div_u64(freq_list[k],
+ ratio_list[k / sz_band] * clks_pll_div *
+ core_clk_div);
+
+ rvar1 = HDMI_REF_CLOCK_HZ * rng1 * HDMI_MHZ_TO_HZ;
+ rvar1 = div64_u64_rem(rvar1, (cmp_cnt * core_clk), &rem);
+ if (rem > ((cmp_cnt * core_clk) >> 1))
+ rvar1++;
+ th1 = rvar1;
+
+ rvar1 = HDMI_REF_CLOCK_HZ * rng2 * HDMI_MHZ_TO_HZ;
+ rvar1 = div64_u64_rem(rvar1, (cmp_cnt * core_clk), &rem);
+ if (rem > ((cmp_cnt * core_clk) >> 1))
+ rvar1++;
+ th2 = rvar1;
+
+ if (freq_list[k] >= min_freq &&
+ freq_list[k] <= max_freq) {
+ if ((th1 >= th_min && th1 <= th_max) ||
+ (th2 >= th_min && th2 <= th_max)) {
+ if (freq_list[k] <= freq_optimal) {
+ freq_optimal = freq_list[k];
+ optimal_index = k;
+ }
+ }
+ }
+ }
+
+ if (optimal_index == -1) {
+ if (!half_rate_mode) {
+ half_rate_mode = 1;
+ goto find_optimal_index;
+ } else {
+ return -EINVAL;
+ }
+ } else {
+ found_vco_ratio = ratio_list[optimal_index / sz_band];
+ found_tx_band_sel = band_list[optimal_index % sz_band];
+ found_vco_freq = freq_optimal;
+ }
+
+ switch (found_vco_ratio) {
+ case 1:
+ found_hsclk_divsel = 15;
+ break;
+ case 2:
+ found_hsclk_divsel = 0;
+ break;
+ case 3:
+ found_hsclk_divsel = 4;
+ break;
+ case 4:
+ found_hsclk_divsel = 8;
+ break;
+ case 5:
+ found_hsclk_divsel = 12;
+ break;
+ case 6:
+ found_hsclk_divsel = 1;
+ break;
+ case 9:
+ found_hsclk_divsel = 5;
+ break;
+ case 10:
+ found_hsclk_divsel = 2;
+ break;
+ case 12:
+ found_hsclk_divsel = 9;
+ break;
+ case 15:
+ found_hsclk_divsel = 13;
+ break;
+ case 25:
+ found_hsclk_divsel = 14;
+ break;
+ };
+
+ pd->vco_freq = found_vco_freq;
+ pd->tx_band_sel = found_tx_band_sel;
+ pd->vco_ratio = found_vco_ratio;
+ pd->hsclk_divsel = found_hsclk_divsel;
+
+ return 0;
+}
+
+static int pll_calculate(unsigned long pix_clk, unsigned long ref_clk,
+ struct hdmi_8998_phy_pll_reg_cfg *cfg)
+{
+ struct hdmi_8998_post_divider pd;
+ u64 bclk;
+ u64 dec_start;
+ u64 frac_start;
+ u64 fdata;
+ u32 pll_divisor;
+ u32 rem;
+ u32 cpctrl;
+ u32 rctrl;
+ u32 cctrl;
+ u32 integloop_gain;
+ u32 pll_cmp;
+ int i, ret;
+
+ /* bit clk = 10 * pix_clk */
+ bclk = ((u64)pix_clk) * 10;
+
+ ret = pll_get_post_div(&pd, bclk);
+ if (ret)
+ return ret;
+
+ dec_start = pd.vco_freq;
+ pll_divisor = 4 * ref_clk;
+ do_div(dec_start, pll_divisor);
+
+ frac_start = pd.vco_freq * (1 << 20);
+
+ rem = do_div(frac_start, pll_divisor);
+ frac_start -= dec_start * (1 << 20);
+ if (rem > (pll_divisor >> 1))
+ frac_start++;
+
+ cpctrl = pll_get_cpctrl(frac_start, ref_clk, false);
+ rctrl = pll_get_rctrl(frac_start, false);
+ cctrl = pll_get_cctrl(frac_start, false);
+ integloop_gain = pll_get_integloop_gain(frac_start, bclk,
+ ref_clk, false);
+
+ fdata = pd.vco_freq;
+ do_div(fdata, pd.vco_ratio);
+
+ pll_cmp = pll_get_pll_cmp(fdata, ref_clk);
+
+ /* Convert these values to register specific values */
+ if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD)
+ cfg->com_svs_mode_clk_sel = 1;
+ else
+ cfg->com_svs_mode_clk_sel = 2;
+
+ cfg->com_hsclk_sel = (0x20 | pd.hsclk_divsel);
+ cfg->com_pll_cctrl_mode0 = cctrl;
+ cfg->com_pll_rctrl_mode0 = rctrl;
+ cfg->com_cp_ctrl_mode0 = cpctrl;
+ cfg->com_dec_start_mode0 = dec_start;
+ cfg->com_div_frac_start1_mode0 = (frac_start & 0xff);
+ cfg->com_div_frac_start2_mode0 = ((frac_start & 0xff00) >> 8);
+ cfg->com_div_frac_start3_mode0 = ((frac_start & 0xf0000) >> 16);
+ cfg->com_integloop_gain0_mode0 = (integloop_gain & 0xff);
+ cfg->com_integloop_gain1_mode0 = ((integloop_gain & 0xf00) >> 8);
+ cfg->com_lock_cmp1_mode0 = (pll_cmp & 0xff);
+ cfg->com_lock_cmp2_mode0 = ((pll_cmp & 0xff00) >> 8);
+ cfg->com_lock_cmp3_mode0 = ((pll_cmp & 0x30000) >> 16);
+ cfg->com_lock_cmp_en = 0x0;
+ cfg->com_core_clk_en = 0x2c;
+ cfg->com_coreclk_div_mode0 = HDMI_CORECLK_DIV;
+ cfg->phy_mode = (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) ? 0x5 : 0x4;
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++)
+ cfg->tx_lx_tx_band[i] = pd.tx_band_sel;
+
+ if (bclk > HDMI_HIGH_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_lx_tx_drv_lvl[0] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[1] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[2] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[3] = 0x0f;
+ cfg->tx_lx_tx_emp_post1_lvl[0] = 0x03;
+ cfg->tx_lx_tx_emp_post1_lvl[1] = 0x02;
+ cfg->tx_lx_tx_emp_post1_lvl[2] = 0x03;
+ cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00;
+ cfg->tx_lx_pre_driver_1[0] = 0x00;
+ cfg->tx_lx_pre_driver_1[1] = 0x00;
+ cfg->tx_lx_pre_driver_1[2] = 0x00;
+ cfg->tx_lx_pre_driver_1[3] = 0x00;
+ cfg->tx_lx_pre_driver_2[0] = 0x1C;
+ cfg->tx_lx_pre_driver_2[1] = 0x1C;
+ cfg->tx_lx_pre_driver_2[2] = 0x1C;
+ cfg->tx_lx_pre_driver_2[3] = 0x00;
+ cfg->tx_lx_res_code_offset[0] = 0x03;
+ cfg->tx_lx_res_code_offset[1] = 0x00;
+ cfg->tx_lx_res_code_offset[2] = 0x00;
+ cfg->tx_lx_res_code_offset[3] = 0x03;
+ } else if (bclk > HDMI_DIG_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_lx_tx_drv_lvl[0] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[1] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[2] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[3] = 0x0f;
+ cfg->tx_lx_tx_emp_post1_lvl[0] = 0x03;
+ cfg->tx_lx_tx_emp_post1_lvl[1] = 0x03;
+ cfg->tx_lx_tx_emp_post1_lvl[2] = 0x03;
+ cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00;
+ cfg->tx_lx_pre_driver_1[0] = 0x00;
+ cfg->tx_lx_pre_driver_1[1] = 0x00;
+ cfg->tx_lx_pre_driver_1[2] = 0x00;
+ cfg->tx_lx_pre_driver_1[3] = 0x00;
+ cfg->tx_lx_pre_driver_2[0] = 0x16;
+ cfg->tx_lx_pre_driver_2[1] = 0x16;
+ cfg->tx_lx_pre_driver_2[2] = 0x16;
+ cfg->tx_lx_pre_driver_2[3] = 0x18;
+ cfg->tx_lx_res_code_offset[0] = 0x03;
+ cfg->tx_lx_res_code_offset[1] = 0x00;
+ cfg->tx_lx_res_code_offset[2] = 0x00;
+ cfg->tx_lx_res_code_offset[3] = 0x00;
+ } else if (bclk > HDMI_MID_FREQ_BIT_CLK_THRESHOLD) {
+ cfg->tx_lx_tx_drv_lvl[0] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[1] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[2] = 0x0f;
+ cfg->tx_lx_tx_drv_lvl[3] = 0x0f;
+ cfg->tx_lx_tx_emp_post1_lvl[0] = 0x05;
+ cfg->tx_lx_tx_emp_post1_lvl[1] = 0x05;
+ cfg->tx_lx_tx_emp_post1_lvl[2] = 0x05;
+ cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00;
+ cfg->tx_lx_pre_driver_1[0] = 0x00;
+ cfg->tx_lx_pre_driver_1[1] = 0x00;
+ cfg->tx_lx_pre_driver_1[2] = 0x00;
+ cfg->tx_lx_pre_driver_1[3] = 0x00;
+ cfg->tx_lx_pre_driver_2[0] = 0x0E;
+ cfg->tx_lx_pre_driver_2[1] = 0x0E;
+ cfg->tx_lx_pre_driver_2[2] = 0x0E;
+ cfg->tx_lx_pre_driver_2[3] = 0x0E;
+ cfg->tx_lx_res_code_offset[0] = 0x00;
+ cfg->tx_lx_res_code_offset[1] = 0x00;
+ cfg->tx_lx_res_code_offset[2] = 0x00;
+ cfg->tx_lx_res_code_offset[3] = 0x00;
+ } else {
+ cfg->tx_lx_tx_drv_lvl[0] = 0x01;
+ cfg->tx_lx_tx_drv_lvl[1] = 0x01;
+ cfg->tx_lx_tx_drv_lvl[2] = 0x01;
+ cfg->tx_lx_tx_drv_lvl[3] = 0x00;
+ cfg->tx_lx_tx_emp_post1_lvl[0] = 0x00;
+ cfg->tx_lx_tx_emp_post1_lvl[1] = 0x00;
+ cfg->tx_lx_tx_emp_post1_lvl[2] = 0x00;
+ cfg->tx_lx_tx_emp_post1_lvl[3] = 0x00;
+ cfg->tx_lx_pre_driver_1[0] = 0x00;
+ cfg->tx_lx_pre_driver_1[1] = 0x00;
+ cfg->tx_lx_pre_driver_1[2] = 0x00;
+ cfg->tx_lx_pre_driver_1[3] = 0x00;
+ cfg->tx_lx_pre_driver_2[0] = 0x16;
+ cfg->tx_lx_pre_driver_2[1] = 0x16;
+ cfg->tx_lx_pre_driver_2[2] = 0x16;
+ cfg->tx_lx_pre_driver_2[3] = 0x18;
+ cfg->tx_lx_res_code_offset[0] = 0x00;
+ cfg->tx_lx_res_code_offset[1] = 0x00;
+ cfg->tx_lx_res_code_offset[2] = 0x00;
+ cfg->tx_lx_res_code_offset[3] = 0x00;
+ }
+
+ return 0;
+}
+
+static int hdmi_8998_pll_set_clk_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+ struct hdmi_8998_phy_pll_reg_cfg cfg = {};
+ int i, ret;
+
+ ret = pll_calculate(rate, parent_rate, &cfg);
+ if (ret) {
+ DRM_ERROR("PLL calculation failed\n");
+ return ret;
+ }
+
+ /* Initially shut down PHY */
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_PD_CTL, 0x0);
+ udelay(500);
+
+ /* Power up sequence */
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_PD_CTL, 0x1);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_RESETSM_CNTRL, 0x20);
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_CMN_CTRL, 0x6);
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_INTERFACE_SELECT_TX_BAND,
+ cfg.tx_lx_tx_band[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_CLKBUF_TERM_ENABLE,
+ 0x1);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_LANE_MODE,
+ 0x20);
+ }
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SYSCLK_BUF_ENABLE, 0x02);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_BIAS_EN_CLKBUFLR_EN, 0x0B);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SYSCLK_EN_SEL, 0x37);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SYS_CLK_CTRL, 0x02);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CLK_ENABLE1, 0x0E);
+
+ /* Bypass VCO calibration */
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_SVS_MODE_CLK_SEL,
+ cfg.com_svs_mode_clk_sel);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_PLL_IVCO, 0x07);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_VCO_TUNE_CTRL, 0x00);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CLK_SEL, 0x30);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_HSCLK_SEL,
+ cfg.com_hsclk_sel);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP_EN,
+ cfg.com_lock_cmp_en);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_PLL_CCTRL_MODE0,
+ cfg.com_pll_cctrl_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_PLL_RCTRL_MODE0,
+ cfg.com_pll_rctrl_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CP_CTRL_MODE0,
+ cfg.com_cp_ctrl_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DEC_START_MODE0,
+ cfg.com_dec_start_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DIV_FRAC_START1_MODE0,
+ cfg.com_div_frac_start1_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DIV_FRAC_START2_MODE0,
+ cfg.com_div_frac_start2_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_DIV_FRAC_START3_MODE0,
+ cfg.com_div_frac_start3_mode0);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_INTEGLOOP_GAIN0_MODE0,
+ cfg.com_integloop_gain0_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_INTEGLOOP_GAIN1_MODE0,
+ cfg.com_integloop_gain1_mode0);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP1_MODE0,
+ cfg.com_lock_cmp1_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP2_MODE0,
+ cfg.com_lock_cmp2_mode0);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_LOCK_CMP3_MODE0,
+ cfg.com_lock_cmp3_mode0);
+
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_VCO_TUNE_MAP, 0x00);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CORE_CLK_EN,
+ cfg.com_core_clk_en);
+ hdmi_pll_write(pll, REG_HDMI_8998_PHY_QSERDES_COM_CORECLK_DIV_MODE0,
+ cfg.com_coreclk_div_mode0);
+
+ /* TX lanes setup (TX 0/1/2/3) */
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_DRV_LVL,
+ cfg.tx_lx_tx_drv_lvl[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_EMP_POST1_LVL,
+ cfg.tx_lx_tx_emp_post1_lvl[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_PRE_DRIVER_1,
+ cfg.tx_lx_pre_driver_1[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_PRE_DRIVER_2,
+ cfg.tx_lx_pre_driver_2[i]);
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_DRV_LVL_RES_CODE_OFFSET,
+ cfg.tx_lx_res_code_offset[i]);
+ }
+
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_MODE, cfg.phy_mode);
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_LANE_CONFIG,
+ 0x10);
+ }
+
+ /*
+ * Ensure that vco configuration gets flushed to hardware before
+ * enabling the PLL
+ */
+ wmb();
+
+ pll->rate = rate;
+
+ return 0;
+}
+
+static int hdmi_8998_phy_ready_status(struct hdmi_phy *phy)
+{
+ u32 nb_tries = HDMI_PLL_POLL_MAX_READS;
+ unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US;
+ u32 status;
+ int phy_ready = 0;
+
+ while (nb_tries--) {
+ status = hdmi_phy_read(phy, REG_HDMI_8998_PHY_STATUS);
+ phy_ready = status & BIT(0);
+
+ if (phy_ready)
+ break;
+
+ udelay(timeout);
+ }
+
+ return phy_ready;
+}
+
+static int hdmi_8998_pll_lock_status(struct hdmi_pll_8998 *pll)
+{
+ u32 status;
+ int nb_tries = HDMI_PLL_POLL_MAX_READS;
+ unsigned long timeout = HDMI_PLL_POLL_TIMEOUT_US;
+ int pll_locked = 0;
+
+ while (nb_tries--) {
+ status = hdmi_pll_read(pll,
+ REG_HDMI_8998_PHY_QSERDES_COM_C_READY_STATUS);
+ pll_locked = status & BIT(0);
+
+ if (pll_locked)
+ break;
+
+ udelay(timeout);
+ }
+
+ return pll_locked;
+}
+
+static int hdmi_8998_pll_prepare(struct clk_hw *hw)
+{
+ struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+ int i, ret = 0;
+
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x1);
+ udelay(100);
+
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x59);
+ udelay(100);
+
+ ret = hdmi_8998_pll_lock_status(pll);
+ if (!ret)
+ return ret;
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ hdmi_tx_chan_write(pll, i,
+ REG_HDMI_8998_PHY_TXn_LANE_CONFIG, 0x1F);
+ }
+
+ /* Ensure all registers are flushed to hardware */
+ wmb();
+
+ ret = hdmi_8998_phy_ready_status(phy);
+ if (!ret)
+ return ret;
+
+ /* Restart the retiming buffer */
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x58);
+ udelay(1);
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_CFG, 0x59);
+
+ /* Ensure all registers are flushed to hardware */
+ wmb();
+
+ return 0;
+}
+
+static long hdmi_8998_pll_round_rate(struct clk_hw *hw,
+ unsigned long rate,
+ unsigned long *parent_rate)
+{
+ if (rate < HDMI_PCLK_MIN_FREQ)
+ return HDMI_PCLK_MIN_FREQ;
+ else if (rate > HDMI_PCLK_MAX_FREQ)
+ return HDMI_PCLK_MAX_FREQ;
+ else
+ return rate;
+}
+
+static unsigned long hdmi_8998_pll_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
+ return pll->rate;
+}
+
+static void hdmi_8998_pll_unprepare(struct clk_hw *hw)
+{
+ struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
+ struct hdmi_phy *phy = pll_get_phy(pll);
+
+ hdmi_phy_write(phy, REG_HDMI_8998_PHY_PD_CTL, 0);
+ usleep_range(100, 150);
+}
+
+static int hdmi_8998_pll_is_enabled(struct clk_hw *hw)
+{
+ struct hdmi_pll_8998 *pll = hw_clk_to_pll(hw);
+ u32 status;
+ int pll_locked;
+
+ status = hdmi_pll_read(pll, REG_HDMI_8998_PHY_QSERDES_COM_C_READY_STATUS);
+ pll_locked = status & BIT(0);
+
+ return pll_locked;
+}
+
+static const struct clk_ops hdmi_8998_pll_ops = {
+ .set_rate = hdmi_8998_pll_set_clk_rate,
+ .round_rate = hdmi_8998_pll_round_rate,
+ .recalc_rate = hdmi_8998_pll_recalc_rate,
+ .prepare = hdmi_8998_pll_prepare,
+ .unprepare = hdmi_8998_pll_unprepare,
+ .is_enabled = hdmi_8998_pll_is_enabled,
+};
+
+static const struct clk_init_data pll_init = {
+ .name = "hdmipll",
+ .ops = &hdmi_8998_pll_ops,
+ .parent_data = (const struct clk_parent_data[]){
+ { .fw_name = "xo", .name = "xo_board" },
+ },
+ .num_parents = 1,
+ .flags = CLK_IGNORE_UNUSED,
+};
+
+int msm_hdmi_pll_8998_init(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct hdmi_pll_8998 *pll;
+ int ret, i;
+
+ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL);
+ if (!pll)
+ return -ENOMEM;
+
+ pll->pdev = pdev;
+
+ pll->mmio_qserdes_com = msm_ioremap(pdev, "hdmi_pll");
+ if (IS_ERR(pll->mmio_qserdes_com)) {
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < HDMI_NUM_TX_CHANNEL; i++) {
+ char name[32];
+
+ snprintf(name, sizeof(name), "hdmi_tx_l%d", i);
+
+ pll->mmio_qserdes_tx[i] = msm_ioremap(pdev, name);
+ if (IS_ERR(pll->mmio_qserdes_tx[i])) {
+ DRM_DEV_ERROR(dev, "failed to map pll base\n");
+ return -ENOMEM;
+ }
+ }
+ pll->clk_hw.init = &pll_init;
+
+ ret = devm_clk_hw_register(dev, &pll->clk_hw);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register pll clock\n");
+ return ret;
+ }
+
+ ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_simple_get, &pll->clk_hw);
+ if (ret) {
+ DRM_DEV_ERROR(dev, "failed to register clk provider: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static const char * const hdmi_phy_8998_reg_names[] = {
+ "vddio",
+ "vcca",
+};
+
+static const char * const hdmi_phy_8998_clk_names[] = {
+ "iface", "ref", "xo",
+};
+
+const struct hdmi_phy_cfg msm_hdmi_phy_8998_cfg = {
+ .type = MSM_HDMI_PHY_8998,
+ .reg_names = hdmi_phy_8998_reg_names,
+ .num_regs = ARRAY_SIZE(hdmi_phy_8998_reg_names),
+ .clk_names = hdmi_phy_8998_clk_names,
+ .num_clks = ARRAY_SIZE(hdmi_phy_8998_clk_names),
+};
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index be016d7b4ef1..0d3adf398bc1 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -215,8 +215,6 @@ struct msm_drm_private {
struct notifier_block vmap_notifier;
struct shrinker *shrinker;
- struct drm_atomic_state *pm_state;
-
/**
* hangcheck_period: For hang detection, in ms
*
@@ -254,8 +252,6 @@ void msm_atomic_destroy_pending_timer(struct msm_pending_timer *timer);
void msm_atomic_commit_tail(struct drm_atomic_state *state);
int msm_atomic_check(struct drm_device *dev, struct drm_atomic_state *state);
struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
-void msm_atomic_state_clear(struct drm_atomic_state *state);
-void msm_atomic_state_free(struct drm_atomic_state *state);
int msm_crtc_enable_vblank(struct drm_crtc *crtc);
void msm_crtc_disable_vblank(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 3666b42b4ecd..a274b8466423 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -931,7 +931,6 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
- gpu->pdev = pdev;
platform_set_drvdata(pdev, &gpu->adreno_smmu);
msm_devfreq_init(gpu);
diff --git a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
index 2dfe6913ab4f..97608603ea62 100644
--- a/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
+++ b/drivers/gpu/drm/msm/registers/adreno/a6xx.xml
@@ -1198,6 +1198,1027 @@ to upconvert to 32b float internally?
<value value="0x3" name="TESS_CCW_TRIS"/>
</enum>
+<enum name="a7xx_cp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CP_ALWAYS_COUNT"/>
+ <value value="1" name="A7XX_PERF_CP_BUSY_GFX_CORE_IDLE"/>
+ <value value="2" name="A7XX_PERF_CP_BUSY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_CP_NUM_PREEMPTIONS"/>
+ <value value="4" name="A7XX_PERF_CP_PREEMPTION_REACTION_DELAY"/>
+ <value value="5" name="A7XX_PERF_CP_PREEMPTION_SWITCH_OUT_TIME"/>
+ <value value="6" name="A7XX_PERF_CP_PREEMPTION_SWITCH_IN_TIME"/>
+ <value value="7" name="A7XX_PERF_CP_DEAD_DRAWS_IN_BIN_RENDER"/>
+ <value value="8" name="A7XX_PERF_CP_PREDICATED_DRAWS_KILLED"/>
+ <value value="9" name="A7XX_PERF_CP_MODE_SWITCH"/>
+ <value value="10" name="A7XX_PERF_CP_ZPASS_DONE"/>
+ <value value="11" name="A7XX_PERF_CP_CONTEXT_DONE"/>
+ <value value="12" name="A7XX_PERF_CP_CACHE_FLUSH"/>
+ <value value="13" name="A7XX_PERF_CP_LONG_PREEMPTIONS"/>
+ <value value="14" name="A7XX_PERF_CP_SQE_I_CACHE_STARVE"/>
+ <value value="15" name="A7XX_PERF_CP_SQE_IDLE"/>
+ <value value="16" name="A7XX_PERF_CP_SQE_PM4_STARVE_RB_IB"/>
+ <value value="17" name="A7XX_PERF_CP_SQE_PM4_STARVE_SDS"/>
+ <value value="18" name="A7XX_PERF_CP_SQE_MRB_STARVE"/>
+ <value value="19" name="A7XX_PERF_CP_SQE_RRB_STARVE"/>
+ <value value="20" name="A7XX_PERF_CP_SQE_VSD_STARVE"/>
+ <value value="21" name="A7XX_PERF_CP_VSD_DECODE_STARVE"/>
+ <value value="22" name="A7XX_PERF_CP_SQE_PIPE_OUT_STALL"/>
+ <value value="23" name="A7XX_PERF_CP_SQE_SYNC_STALL"/>
+ <value value="24" name="A7XX_PERF_CP_SQE_PM4_WFI_STALL"/>
+ <value value="25" name="A7XX_PERF_CP_SQE_SYS_WFI_STALL"/>
+ <value value="26" name="A7XX_PERF_CP_SQE_T4_EXEC"/>
+ <value value="27" name="A7XX_PERF_CP_SQE_LOAD_STATE_EXEC"/>
+ <value value="28" name="A7XX_PERF_CP_SQE_SAVE_SDS_STATE"/>
+ <value value="29" name="A7XX_PERF_CP_SQE_DRAW_EXEC"/>
+ <value value="30" name="A7XX_PERF_CP_SQE_CTXT_REG_BUNCH_EXEC"/>
+ <value value="31" name="A7XX_PERF_CP_SQE_EXEC_PROFILED"/>
+ <value value="32" name="A7XX_PERF_CP_MEMORY_POOL_EMPTY"/>
+ <value value="33" name="A7XX_PERF_CP_MEMORY_POOL_SYNC_STALL"/>
+ <value value="34" name="A7XX_PERF_CP_MEMORY_POOL_ABOVE_THRESH"/>
+ <value value="35" name="A7XX_PERF_CP_AHB_WR_STALL_PRE_DRAWS"/>
+ <value value="36" name="A7XX_PERF_CP_AHB_STALL_SQE_GMU"/>
+ <value value="37" name="A7XX_PERF_CP_AHB_STALL_SQE_WR_OTHER"/>
+ <value value="38" name="A7XX_PERF_CP_AHB_STALL_SQE_RD_OTHER"/>
+ <value value="39" name="A7XX_PERF_CP_CLUSTER0_EMPTY"/>
+ <value value="40" name="A7XX_PERF_CP_CLUSTER1_EMPTY"/>
+ <value value="41" name="A7XX_PERF_CP_CLUSTER2_EMPTY"/>
+ <value value="42" name="A7XX_PERF_CP_CLUSTER3_EMPTY"/>
+ <value value="43" name="A7XX_PERF_CP_CLUSTER4_EMPTY"/>
+ <value value="44" name="A7XX_PERF_CP_CLUSTER5_EMPTY"/>
+ <value value="45" name="A7XX_PERF_CP_PM4_DATA"/>
+ <value value="46" name="A7XX_PERF_CP_PM4_HEADERS"/>
+ <value value="47" name="A7XX_PERF_CP_VBIF_READ_BEATS"/>
+ <value value="48" name="A7XX_PERF_CP_VBIF_WRITE_BEATS"/>
+ <value value="49" name="A7XX_PERF_CP_SQE_INSTR_COUNTER"/>
+ <value value="50" name="A7XX_PERF_CP_RESERVED_50"/>
+ <value value="51" name="A7XX_PERF_CP_RESERVED_51"/>
+ <value value="52" name="A7XX_PERF_CP_RESERVED_52"/>
+ <value value="53" name="A7XX_PERF_CP_RESERVED_53"/>
+ <value value="54" name="A7XX_PERF_CP_RESERVED_54"/>
+ <value value="55" name="A7XX_PERF_CP_RESERVED_55"/>
+ <value value="56" name="A7XX_PERF_CP_RESERVED_56"/>
+ <value value="57" name="A7XX_PERF_CP_RESERVED_57"/>
+ <value value="58" name="A7XX_PERF_CP_RESERVED_58"/>
+ <value value="59" name="A7XX_PERF_CP_RESERVED_59"/>
+ <value value="60" name="A7XX_PERF_CP_CLUSTER0_FULL"/>
+ <value value="61" name="A7XX_PERF_CP_CLUSTER1_FULL"/>
+ <value value="62" name="A7XX_PERF_CP_CLUSTER2_FULL"/>
+ <value value="63" name="A7XX_PERF_CP_CLUSTER3_FULL"/>
+ <value value="64" name="A7XX_PERF_CP_CLUSTER4_FULL"/>
+ <value value="65" name="A7XX_PERF_CP_CLUSTER5_FULL"/>
+ <value value="66" name="A7XX_PERF_CP_CLUSTER6_FULL"/>
+ <value value="67" name="A7XX_PERF_CP_CLUSTER6_EMPTY"/>
+ <value value="68" name="A7XX_PERF_CP_ICACHE_MISSES"/>
+ <value value="69" name="A7XX_PERF_CP_ICACHE_HITS"/>
+ <value value="70" name="A7XX_PERF_CP_ICACHE_STALL"/>
+ <value value="71" name="A7XX_PERF_CP_DCACHE_MISSES"/>
+ <value value="72" name="A7XX_PERF_CP_DCACHE_HITS"/>
+ <value value="73" name="A7XX_PERF_CP_DCACHE_STALLS"/>
+ <value value="74" name="A7XX_PERF_CP_AQE_SQE_STALL"/>
+ <value value="75" name="A7XX_PERF_CP_SQE_AQE_STARVE"/>
+ <value value="76" name="A7XX_PERF_CP_PREEMPT_LATENCY"/>
+ <value value="77" name="A7XX_PERF_CP_SQE_MD8_STALL_CYCLES"/>
+ <value value="78" name="A7XX_PERF_CP_SQE_MESH_EXEC_CYCLES"/>
+ <value value="79" name="A7XX_PERF_CP_AQE_NUM_AS_CHUNKS"/>
+ <value value="80" name="A7XX_PERF_CP_AQE_NUM_MS_CHUNKS"/>
+</enum>
+
+<enum name="a7xx_rbbm_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RBBM_ALWAYS_COUNT"/>
+ <value value="1" name="A7XX_PERF_RBBM_ALWAYS_ON"/>
+ <value value="2" name="A7XX_PERF_RBBM_TSE_BUSY"/>
+ <value value="3" name="A7XX_PERF_RBBM_RAS_BUSY"/>
+ <value value="4" name="A7XX_PERF_RBBM_PC_DCALL_BUSY"/>
+ <value value="5" name="A7XX_PERF_RBBM_PC_VSD_BUSY"/>
+ <value value="6" name="A7XX_PERF_RBBM_STATUS_MASKED"/>
+ <value value="7" name="A7XX_PERF_RBBM_COM_BUSY"/>
+ <value value="8" name="A7XX_PERF_RBBM_DCOM_BUSY"/>
+ <value value="9" name="A7XX_PERF_RBBM_VBIF_BUSY"/>
+ <value value="10" name="A7XX_PERF_RBBM_VSC_BUSY"/>
+ <value value="11" name="A7XX_PERF_RBBM_TESS_BUSY"/>
+ <value value="12" name="A7XX_PERF_RBBM_UCHE_BUSY"/>
+ <value value="13" name="A7XX_PERF_RBBM_HLSQ_BUSY"/>
+</enum>
+
+<enum name="a7xx_pc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_PC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_PC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_PC_STALL_CYCLES_VFD"/>
+ <value value="3" name="A7XX_PERF_PC_RESERVED"/>
+ <value value="4" name="A7XX_PERF_PC_STALL_CYCLES_VPC"/>
+ <value value="5" name="A7XX_PERF_PC_STALL_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_PC_STALL_CYCLES_TESS"/>
+ <value value="7" name="A7XX_PERF_PC_STALL_CYCLES_VFD_ONLY"/>
+ <value value="8" name="A7XX_PERF_PC_STALL_CYCLES_VPC_ONLY"/>
+ <value value="9" name="A7XX_PERF_PC_PASS1_TF_STALL_CYCLES"/>
+ <value value="10" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_INDEX"/>
+ <value value="11" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_TESS_FACTOR"/>
+ <value value="12" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_VIZ_STREAM"/>
+ <value value="13" name="A7XX_PERF_PC_STARVE_CYCLES_DI"/>
+ <value value="14" name="A7XX_PERF_PC_VIS_STREAMS_LOADED"/>
+ <value value="15" name="A7XX_PERF_PC_INSTANCES"/>
+ <value value="16" name="A7XX_PERF_PC_VPC_PRIMITIVES"/>
+ <value value="17" name="A7XX_PERF_PC_DEAD_PRIM"/>
+ <value value="18" name="A7XX_PERF_PC_LIVE_PRIM"/>
+ <value value="19" name="A7XX_PERF_PC_VERTEX_HITS"/>
+ <value value="20" name="A7XX_PERF_PC_IA_VERTICES"/>
+ <value value="21" name="A7XX_PERF_PC_IA_PRIMITIVES"/>
+ <value value="22" name="A7XX_PERF_PC_RESERVED_22"/>
+ <value value="23" name="A7XX_PERF_PC_HS_INVOCATIONS"/>
+ <value value="24" name="A7XX_PERF_PC_DS_INVOCATIONS"/>
+ <value value="25" name="A7XX_PERF_PC_VS_INVOCATIONS"/>
+ <value value="26" name="A7XX_PERF_PC_GS_INVOCATIONS"/>
+ <value value="27" name="A7XX_PERF_PC_DS_PRIMITIVES"/>
+ <value value="28" name="A7XX_PERF_PC_3D_DRAWCALLS"/>
+ <value value="29" name="A7XX_PERF_PC_2D_DRAWCALLS"/>
+ <value value="30" name="A7XX_PERF_PC_NON_DRAWCALL_GLOBAL_EVENTS"/>
+ <value value="31" name="A7XX_PERF_PC_TESS_BUSY_CYCLES"/>
+ <value value="32" name="A7XX_PERF_PC_TESS_WORKING_CYCLES"/>
+ <value value="33" name="A7XX_PERF_PC_TESS_STALL_CYCLES_PC"/>
+ <value value="34" name="A7XX_PERF_PC_TESS_STARVE_CYCLES_PC"/>
+ <value value="35" name="A7XX_PERF_PC_TESS_SINGLE_PRIM_CYCLES"/>
+ <value value="36" name="A7XX_PERF_PC_TESS_PC_UV_TRANS"/>
+ <value value="37" name="A7XX_PERF_PC_TESS_PC_UV_PATCHES"/>
+ <value value="38" name="A7XX_PERF_PC_TESS_FACTOR_TRANS"/>
+ <value value="39" name="A7XX_PERF_PC_TAG_CHECKED_VERTICES"/>
+ <value value="40" name="A7XX_PERF_PC_MESH_VS_WAVES"/>
+ <value value="41" name="A7XX_PERF_PC_MESH_DRAWS"/>
+ <value value="42" name="A7XX_PERF_PC_MESH_DEAD_DRAWS"/>
+ <value value="43" name="A7XX_PERF_PC_MESH_MVIS_EN_DRAWS"/>
+ <value value="44" name="A7XX_PERF_PC_MESH_DEAD_PRIM"/>
+ <value value="45" name="A7XX_PERF_PC_MESH_LIVE_PRIM"/>
+ <value value="46" name="A7XX_PERF_PC_MESH_PA_EN_PRIM"/>
+ <value value="47" name="A7XX_PERF_PC_STARVE_CYCLES_FOR_MVIS_STREAM"/>
+ <value value="48" name="A7XX_PERF_PC_STARVE_CYCLES_PREDRAW"/>
+ <value value="49" name="A7XX_PERF_PC_STALL_CYCLES_COMPUTE_GFX"/>
+ <value value="50" name="A7XX_PERF_PC_STALL_CYCLES_GFX_COMPUTE"/>
+ <value value="51" name="A7XX_PERF_PC_TESS_PC_MULTI_PATCH_TRANS"/>
+</enum>
+
+<enum name="a7xx_vfd_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VFD_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VFD_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_VFD_STALL_CYCLES_VPC_ALLOC"/>
+ <value value="3" name="A7XX_PERF_VFD_STALL_CYCLES_SP_INFO"/>
+ <value value="4" name="A7XX_PERF_VFD_STALL_CYCLES_SP_ATTR"/>
+ <value value="5" name="A7XX_PERF_VFD_STARVE_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_VFD_RBUFFER_FULL"/>
+ <value value="7" name="A7XX_PERF_VFD_ATTR_INFO_FIFO_FULL"/>
+ <value value="8" name="A7XX_PERF_VFD_DECODED_ATTRIBUTE_BYTES"/>
+ <value value="9" name="A7XX_PERF_VFD_NUM_ATTRIBUTES"/>
+ <value value="10" name="A7XX_PERF_VFD_UPPER_SHADER_FIBERS"/>
+ <value value="11" name="A7XX_PERF_VFD_LOWER_SHADER_FIBERS"/>
+ <value value="12" name="A7XX_PERF_VFD_MODE_0_FIBERS"/>
+ <value value="13" name="A7XX_PERF_VFD_MODE_1_FIBERS"/>
+ <value value="14" name="A7XX_PERF_VFD_MODE_2_FIBERS"/>
+ <value value="15" name="A7XX_PERF_VFD_MODE_3_FIBERS"/>
+ <value value="16" name="A7XX_PERF_VFD_MODE_4_FIBERS"/>
+ <value value="17" name="A7XX_PERF_VFD_TOTAL_VERTICES"/>
+ <value value="18" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD"/>
+ <value value="19" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_INDEX"/>
+ <value value="20" name="A7XX_PERF_VFDP_STALL_CYCLES_VFD_PROG"/>
+ <value value="21" name="A7XX_PERF_VFDP_STARVE_CYCLES_PC"/>
+ <value value="22" name="A7XX_PERF_VFDP_VS_STAGE_WAVES"/>
+ <value value="23" name="A7XX_PERF_VFD_STALL_CYCLES_PRG_END_FE"/>
+ <value value="24" name="A7XX_PERF_VFD_STALL_CYCLES_CBSYNC"/>
+</enum>
+
+<enum name="a7xx_hlsq_perfcounter_select">
+ <value value="0" name="A7XX_PERF_HLSQ_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_HLSQ_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_STATE"/>
+ <value value="3" name="A7XX_PERF_HLSQ_STALL_CYCLES_SP_FS_STAGE"/>
+ <value value="4" name="A7XX_PERF_HLSQ_UCHE_LATENCY_CYCLES"/>
+ <value value="5" name="A7XX_PERF_HLSQ_UCHE_LATENCY_COUNT"/>
+ <value value="6" name="A7XX_PERF_HLSQ_RESERVED_6"/>
+ <value value="7" name="A7XX_PERF_HLSQ_RESERVED_7"/>
+ <value value="8" name="A7XX_PERF_HLSQ_RESERVED_8"/>
+ <value value="9" name="A7XX_PERF_HLSQ_RESERVED_9"/>
+ <value value="10" name="A7XX_PERF_HLSQ_COMPUTE_DRAWCALLS"/>
+ <value value="11" name="A7XX_PERF_HLSQ_FS_DATA_WAIT_PROGRAMMING"/>
+ <value value="12" name="A7XX_PERF_HLSQ_DUAL_FS_PROG_ACTIVE"/>
+ <value value="13" name="A7XX_PERF_HLSQ_DUAL_VS_PROG_ACTIVE"/>
+ <value value="14" name="A7XX_PERF_HLSQ_FS_BATCH_COUNT_ZERO"/>
+ <value value="15" name="A7XX_PERF_HLSQ_VS_BATCH_COUNT_ZERO"/>
+ <value value="16" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_QUAD"/>
+ <value value="17" name="A7XX_PERF_HLSQ_WAVE_PENDING_NO_PRIM_BASE"/>
+ <value value="18" name="A7XX_PERF_HLSQ_STALL_CYCLES_VPC"/>
+ <value value="19" name="A7XX_PERF_HLSQ_RESERVED_19"/>
+ <value value="20" name="A7XX_PERF_HLSQ_DRAW_MODE_SWITCH_VSFS_SYNC"/>
+ <value value="21" name="A7XX_PERF_HLSQ_VSBR_STALL_CYCLES"/>
+ <value value="22" name="A7XX_PERF_HLSQ_FS_STALL_CYCLES"/>
+ <value value="23" name="A7XX_PERF_HLSQ_LPAC_STALL_CYCLES"/>
+ <value value="24" name="A7XX_PERF_HLSQ_BV_STALL_CYCLES"/>
+ <value value="25" name="A7XX_PERF_HLSQ_VSBR_DEREF_CYCLES"/>
+ <value value="26" name="A7XX_PERF_HLSQ_FS_DEREF_CYCLES"/>
+ <value value="27" name="A7XX_PERF_HLSQ_LPAC_DEREF_CYCLES"/>
+ <value value="28" name="A7XX_PERF_HLSQ_BV_DEREF_CYCLES"/>
+ <value value="29" name="A7XX_PERF_HLSQ_VSBR_S2W_CYCLES"/>
+ <value value="30" name="A7XX_PERF_HLSQ_FS_S2W_CYCLES"/>
+ <value value="31" name="A7XX_PERF_HLSQ_LPAC_S2W_CYCLES"/>
+ <value value="32" name="A7XX_PERF_HLSQ_BV_S2W_CYCLES"/>
+ <value value="33" name="A7XX_PERF_HLSQ_VSBR_WAIT_FS_S2W"/>
+ <value value="34" name="A7XX_PERF_HLSQ_FS_WAIT_VS_S2W"/>
+ <value value="35" name="A7XX_PERF_HLSQ_LPAC_WAIT_VS_S2W"/>
+ <value value="36" name="A7XX_PERF_HLSQ_BV_WAIT_FS_S2W"/>
+ <value value="37" name="A7XX_PERF_HLSQ_VS_WAIT_CONST_RESOURCE"/>
+ <value value="38" name="A7XX_PERF_HLSQ_FS_WAIT_SAME_VS_S2W"/>
+ <value value="39" name="A7XX_PERF_HLSQ_FS_STARVING_SP"/>
+ <value value="40" name="A7XX_PERF_HLSQ_VS_DATA_WAIT_PROGRAMMING"/>
+ <value value="41" name="A7XX_PERF_HLSQ_BV_DATA_WAIT_PROGRAMMING"/>
+ <value value="42" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_VS"/>
+ <value value="43" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_VS"/>
+ <value value="44" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_FS"/>
+ <value value="45" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_FS"/>
+ <value value="46" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_BV"/>
+ <value value="47" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_BV"/>
+ <value value="48" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXTS_LPAC"/>
+ <value value="49" name="A7XX_PERF_HLSQ_STPROC_WAVE_CONTEXT_CYCLES_LPAC"/>
+ <value value="50" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_VS"/>
+ <value value="51" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_FS"/>
+ <value value="52" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_BV"/>
+ <value value="53" name="A7XX_PERF_HLSQ_SPTROC_STCHE_WARMUP_INC_LPAC"/>
+ <value value="54" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_VS"/>
+ <value value="55" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_FS"/>
+ <value value="56" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_BV"/>
+ <value value="57" name="A7XX_PERF_HLSQ_SPTROC_STCHE_MISS_INC_LPAC"/>
+</enum>
+
+<enum name="a7xx_vpc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VPC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VPC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_VPC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="A7XX_PERF_VPC_STALL_CYCLES_VFD_WACK"/>
+ <value value="4" name="A7XX_PERF_VPC_STALL_CYCLES_HLSQ_PRIM_ALLOC"/>
+ <value value="5" name="A7XX_PERF_VPC_RESERVED_5"/>
+ <value value="6" name="A7XX_PERF_VPC_STALL_CYCLES_SP_LM"/>
+ <value value="7" name="A7XX_PERF_VPC_STARVE_CYCLES_SP"/>
+ <value value="8" name="A7XX_PERF_VPC_STARVE_CYCLES_LRZ"/>
+ <value value="9" name="A7XX_PERF_VPC_PC_PRIMITIVES"/>
+ <value value="10" name="A7XX_PERF_VPC_SP_COMPONENTS"/>
+ <value value="11" name="A7XX_PERF_VPC_STALL_CYCLES_VPCRAM_POS"/>
+ <value value="12" name="A7XX_PERF_VPC_LRZ_ASSIGN_PRIMITIVES"/>
+ <value value="13" name="A7XX_PERF_VPC_RB_VISIBLE_PRIMITIVES"/>
+ <value value="14" name="A7XX_PERF_VPC_LM_TRANSACTION"/>
+ <value value="15" name="A7XX_PERF_VPC_STREAMOUT_TRANSACTION"/>
+ <value value="16" name="A7XX_PERF_VPC_VS_BUSY_CYCLES"/>
+ <value value="17" name="A7XX_PERF_VPC_PS_BUSY_CYCLES"/>
+ <value value="18" name="A7XX_PERF_VPC_VS_WORKING_CYCLES"/>
+ <value value="19" name="A7XX_PERF_VPC_PS_WORKING_CYCLES"/>
+ <value value="20" name="A7XX_PERF_VPC_STARVE_CYCLES_RB"/>
+ <value value="21" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_POS"/>
+ <value value="22" name="A7XX_PERF_VPC_WIT_FULL_CYCLES"/>
+ <value value="23" name="A7XX_PERF_VPC_VPCRAM_FULL_CYCLES"/>
+ <value value="24" name="A7XX_PERF_VPC_LM_FULL_WAIT_FOR_INTP_END"/>
+ <value value="25" name="A7XX_PERF_VPC_NUM_VPCRAM_WRITE"/>
+ <value value="26" name="A7XX_PERF_VPC_NUM_VPCRAM_READ_SO"/>
+ <value value="27" name="A7XX_PERF_VPC_NUM_ATTR_REQ_LM"/>
+ <value value="28" name="A7XX_PERF_VPC_STALL_CYCLE_TSE"/>
+ <value value="29" name="A7XX_PERF_VPC_TSE_PRIMITIVES"/>
+ <value value="30" name="A7XX_PERF_VPC_GS_PRIMITIVES"/>
+ <value value="31" name="A7XX_PERF_VPC_TSE_TRANSACTIONS"/>
+ <value value="32" name="A7XX_PERF_VPC_STALL_CYCLES_CCU"/>
+ <value value="33" name="A7XX_PERF_VPC_NUM_WM_HIT"/>
+ <value value="34" name="A7XX_PERF_VPC_STALL_DQ_WACK"/>
+ <value value="35" name="A7XX_PERF_VPC_STALL_CYCLES_CCHE"/>
+ <value value="36" name="A7XX_PERF_VPC_STARVE_CYCLES_CCHE"/>
+ <value value="37" name="A7XX_PERF_VPC_NUM_PA_REQ"/>
+ <value value="38" name="A7XX_PERF_VPC_NUM_LM_REQ_HIT"/>
+ <value value="39" name="A7XX_PERF_VPC_CCHE_REQBUF_FULL"/>
+ <value value="40" name="A7XX_PERF_VPC_STALL_CYCLES_LM_ACK"/>
+ <value value="41" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_FE"/>
+ <value value="42" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_PCVS"/>
+ <value value="43" name="A7XX_PERF_VPC_STALL_CYCLES_PRG_END_VPCPS"/>
+</enum>
+
+<enum name="a7xx_tse_perfcounter_select">
+ <value value="0" name="A7XX_PERF_TSE_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_TSE_CLIPPING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_TSE_STALL_CYCLES_RAS"/>
+ <value value="3" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_BARYPLANE"/>
+ <value value="4" name="A7XX_PERF_TSE_STALL_CYCLES_LRZ_ZPLANE"/>
+ <value value="5" name="A7XX_PERF_TSE_STARVE_CYCLES_PC"/>
+ <value value="6" name="A7XX_PERF_TSE_INPUT_PRIM"/>
+ <value value="7" name="A7XX_PERF_TSE_INPUT_NULL_PRIM"/>
+ <value value="8" name="A7XX_PERF_TSE_TRIVAL_REJ_PRIM"/>
+ <value value="9" name="A7XX_PERF_TSE_CLIPPED_PRIM"/>
+ <value value="10" name="A7XX_PERF_TSE_ZERO_AREA_PRIM"/>
+ <value value="11" name="A7XX_PERF_TSE_FACENESS_CULLED_PRIM"/>
+ <value value="12" name="A7XX_PERF_TSE_ZERO_PIXEL_PRIM"/>
+ <value value="13" name="A7XX_PERF_TSE_OUTPUT_NULL_PRIM"/>
+ <value value="14" name="A7XX_PERF_TSE_OUTPUT_VISIBLE_PRIM"/>
+ <value value="15" name="A7XX_PERF_TSE_CINVOCATION"/>
+ <value value="16" name="A7XX_PERF_TSE_CPRIMITIVES"/>
+ <value value="17" name="A7XX_PERF_TSE_2D_INPUT_PRIM"/>
+ <value value="18" name="A7XX_PERF_TSE_2D_ALIVE_CYCLES"/>
+ <value value="19" name="A7XX_PERF_TSE_CLIP_PLANES"/>
+</enum>
+
+<enum name="a7xx_ras_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RAS_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_RAS_SUPERTILE_ACTIVE_CYCLES"/>
+ <value value="2" name="A7XX_PERF_RAS_STALL_CYCLES_LRZ"/>
+ <value value="3" name="A7XX_PERF_RAS_STARVE_CYCLES_TSE"/>
+ <value value="4" name="A7XX_PERF_RAS_SUPER_TILES"/>
+ <value value="5" name="A7XX_PERF_RAS_8X4_TILES"/>
+ <value value="6" name="A7XX_PERF_RAS_MASKGEN_ACTIVE"/>
+ <value value="7" name="A7XX_PERF_RAS_FULLY_COVERED_SUPER_TILES"/>
+ <value value="8" name="A7XX_PERF_RAS_FULLY_COVERED_8X4_TILES"/>
+ <value value="9" name="A7XX_PERF_RAS_PRIM_KILLED_INVISILBE"/>
+ <value value="10" name="A7XX_PERF_RAS_SUPERTILE_GEN_ACTIVE_CYCLES"/>
+ <value value="11" name="A7XX_PERF_RAS_LRZ_INTF_WORKING_CYCLES"/>
+ <value value="12" name="A7XX_PERF_RAS_BLOCKS"/>
+ <value value="13" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_0_WORKING_CC_l2"/>
+ <value value="14" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_1_WORKING_CC_l2"/>
+ <value value="15" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_2_WORKING_CC_l2"/>
+ <value value="16" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_3_WORKING_CC_l2"/>
+ <value value="17" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_4_WORKING_CC_l2"/>
+ <value value="18" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_5_WORKING_CC_l2"/>
+ <value value="19" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_6_WORKING_CC_l2"/>
+ <value value="20" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_7_WORKING_CC_l2"/>
+ <value value="21" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_8_WORKING_CC_l2"/>
+ <value value="22" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_9_WORKING_CC_l2"/>
+ <value value="23" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_10_WORKING_CC_l2"/>
+ <value value="24" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_11_WORKING_CC_l2"/>
+ <value value="25" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_12_WORKING_CC_l2"/>
+ <value value="26" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_13_WORKING_CC_l2"/>
+ <value value="27" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_14_WORKING_CC_l2"/>
+ <value value="28" name="A7XX_PERF_RAS_SAMPLE_MASK_GEN_LANE_15_WORKING_CC_l2"/>
+ <value value="29" name="A7XX_PERF_RAS_FALSE_PARTIAL_STILE"/>
+
+</enum>
+
+<enum name="a7xx_uche_perfcounter_select">
+ <value value="0" name="A7XX_PERF_UCHE_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_UCHE_STALL_CYCLES_ARBITER"/>
+ <value value="2" name="A7XX_PERF_UCHE_VBIF_LATENCY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_UCHE_VBIF_LATENCY_SAMPLES"/>
+ <value value="4" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_TP"/>
+ <value value="5" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_VFD"/>
+ <value value="6" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_HLSQ"/>
+ <value value="7" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_LRZ"/>
+ <value value="8" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_SP"/>
+ <value value="9" name="A7XX_PERF_UCHE_READ_REQUESTS_TP"/>
+ <value value="10" name="A7XX_PERF_UCHE_READ_REQUESTS_VFD"/>
+ <value value="11" name="A7XX_PERF_UCHE_READ_REQUESTS_HLSQ"/>
+ <value value="12" name="A7XX_PERF_UCHE_READ_REQUESTS_LRZ"/>
+ <value value="13" name="A7XX_PERF_UCHE_READ_REQUESTS_SP"/>
+ <value value="14" name="A7XX_PERF_UCHE_WRITE_REQUESTS_LRZ"/>
+ <value value="15" name="A7XX_PERF_UCHE_WRITE_REQUESTS_SP"/>
+ <value value="16" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VPC"/>
+ <value value="17" name="A7XX_PERF_UCHE_WRITE_REQUESTS_VSC"/>
+ <value value="18" name="A7XX_PERF_UCHE_EVICTS"/>
+ <value value="19" name="A7XX_PERF_UCHE_BANK_REQ0"/>
+ <value value="20" name="A7XX_PERF_UCHE_BANK_REQ1"/>
+ <value value="21" name="A7XX_PERF_UCHE_BANK_REQ2"/>
+ <value value="22" name="A7XX_PERF_UCHE_BANK_REQ3"/>
+ <value value="23" name="A7XX_PERF_UCHE_BANK_REQ4"/>
+ <value value="24" name="A7XX_PERF_UCHE_BANK_REQ5"/>
+ <value value="25" name="A7XX_PERF_UCHE_BANK_REQ6"/>
+ <value value="26" name="A7XX_PERF_UCHE_BANK_REQ7"/>
+ <value value="27" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH0"/>
+ <value value="28" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_CH1"/>
+ <value value="29" name="A7XX_PERF_UCHE_GMEM_READ_BEATS"/>
+ <value value="30" name="A7XX_PERF_UCHE_TPH_REF_FULL"/>
+ <value value="31" name="A7XX_PERF_UCHE_TPH_VICTIM_FULL"/>
+ <value value="32" name="A7XX_PERF_UCHE_TPH_EXT_FULL"/>
+ <value value="33" name="A7XX_PERF_UCHE_VBIF_STALL_WRITE_DATA"/>
+ <value value="34" name="A7XX_PERF_UCHE_DCMP_LATENCY_SAMPLES"/>
+ <value value="35" name="A7XX_PERF_UCHE_DCMP_LATENCY_CYCLES"/>
+ <value value="36" name="A7XX_PERF_UCHE_VBIF_READ_BEATS_PC"/>
+ <value value="37" name="A7XX_PERF_UCHE_READ_REQUESTS_PC"/>
+ <value value="38" name="A7XX_PERF_UCHE_RAM_READ_REQ"/>
+ <value value="39" name="A7XX_PERF_UCHE_RAM_WRITE_REQ"/>
+ <value value="40" name="A7XX_PERF_UCHE_STARVED_CYCLES_VBIF_DECMP"/>
+ <value value="41" name="A7XX_PERF_UCHE_STALL_CYCLES_DECMP"/>
+ <value value="42" name="A7XX_PERF_UCHE_ARBITER_STALL_CYCLES_VBIF"/>
+ <value value="43" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_UBWC"/>
+ <value value="44" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_NONUBWC"/>
+ <value value="45" name="A7XX_PERF_UCHE_READ_REQUESTS_TP_GMEM"/>
+ <value value="46" name="A7XX_PERF_UCHE_LONG_LINE_ALL_EVICTS_KAILUA"/>
+ <value value="47" name="A7XX_PERF_UCHE_LONG_LINE_PARTIAL_EVICTS_KAILUA"/>
+ <value value="48" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_CCHE"/>
+ <value value="49" name="A7XX_PERF_UCHE_TPH_CONFLICT_CL_OTHER_KAILUA"/>
+ <value value="50" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_CCHE"/>
+ <value value="51" name="A7XX_PERF_UCHE_DBANK_CONFLICT_CL_OTHER_CLIENTS"/>
+ <value value="52" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH0"/>
+ <value value="53" name="A7XX_PERF_UCHE_VBIF_WRITE_BEATS_CH1"/>
+ <value value="54" name="A7XX_PERF_UCHE_CCHE_TPH_QUEUE_FULL"/>
+ <value value="55" name="A7XX_PERF_UCHE_CCHE_DPH_QUEUE_FULL"/>
+ <value value="56" name="A7XX_PERF_UCHE_GMEM_WRITE_BEATS"/>
+ <value value="57" name="A7XX_PERF_UCHE_UBWC_READ_BEATS"/>
+ <value value="58" name="A7XX_PERF_UCHE_UBWC_WRITE_BEATS"/>
+</enum>
+
+<enum name="a7xx_tp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_TP_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_TP_STALL_CYCLES_UCHE"/>
+ <value value="2" name="A7XX_PERF_TP_LATENCY_CYCLES"/>
+ <value value="3" name="A7XX_PERF_TP_LATENCY_TRANS"/>
+ <value value="4" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_SAMPLES"/>
+ <value value="5" name="A7XX_PERF_TP_FLAG_FIFO_DELAY_CYCLES"/>
+ <value value="6" name="A7XX_PERF_TP_L1_CACHELINE_REQUESTS"/>
+ <value value="7" name="A7XX_PERF_TP_L1_CACHELINE_MISSES"/>
+ <value value="8" name="A7XX_PERF_TP_SP_TP_TRANS"/>
+ <value value="9" name="A7XX_PERF_TP_TP_SP_TRANS"/>
+ <value value="10" name="A7XX_PERF_TP_OUTPUT_PIXELS"/>
+ <value value="11" name="A7XX_PERF_TP_FILTER_WORKLOAD_16BIT"/>
+ <value value="12" name="A7XX_PERF_TP_FILTER_WORKLOAD_32BIT"/>
+ <value value="13" name="A7XX_PERF_TP_QUADS_RECEIVED"/>
+ <value value="14" name="A7XX_PERF_TP_QUADS_OFFSET"/>
+ <value value="15" name="A7XX_PERF_TP_QUADS_SHADOW"/>
+ <value value="16" name="A7XX_PERF_TP_QUADS_ARRAY"/>
+ <value value="17" name="A7XX_PERF_TP_QUADS_GRADIENT"/>
+ <value value="18" name="A7XX_PERF_TP_QUADS_1D"/>
+ <value value="19" name="A7XX_PERF_TP_QUADS_2D"/>
+ <value value="20" name="A7XX_PERF_TP_QUADS_BUFFER"/>
+ <value value="21" name="A7XX_PERF_TP_QUADS_3D"/>
+ <value value="22" name="A7XX_PERF_TP_QUADS_CUBE"/>
+ <value value="23" name="A7XX_PERF_TP_DIVERGENT_QUADS_RECEIVED"/>
+ <value value="24" name="A7XX_PERF_TP_PRT_NON_RESIDENT_EVENTS"/>
+ <value value="25" name="A7XX_PERF_TP_OUTPUT_PIXELS_POINT"/>
+ <value value="26" name="A7XX_PERF_TP_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="27" name="A7XX_PERF_TP_OUTPUT_PIXELS_MIP"/>
+ <value value="28" name="A7XX_PERF_TP_OUTPUT_PIXELS_ANISO"/>
+ <value value="29" name="A7XX_PERF_TP_OUTPUT_PIXELS_ZERO_LOD"/>
+ <value value="30" name="A7XX_PERF_TP_FLAG_CACHE_REQUESTS"/>
+ <value value="31" name="A7XX_PERF_TP_FLAG_CACHE_MISSES"/>
+ <value value="32" name="A7XX_PERF_TP_L1_5_L2_REQUESTS"/>
+ <value value="33" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS"/>
+ <value value="34" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_POINT"/>
+ <value value="35" name="A7XX_PERF_TP_2D_OUTPUT_PIXELS_BILINEAR"/>
+ <value value="36" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_16BIT"/>
+ <value value="37" name="A7XX_PERF_TP_2D_FILTER_WORKLOAD_32BIT"/>
+ <value value="38" name="A7XX_PERF_TP_TPA2TPC_TRANS"/>
+ <value value="39" name="A7XX_PERF_TP_L1_MISSES_ASTC_1TILE"/>
+ <value value="40" name="A7XX_PERF_TP_L1_MISSES_ASTC_2TILE"/>
+ <value value="41" name="A7XX_PERF_TP_L1_MISSES_ASTC_4TILE"/>
+ <value value="42" name="A7XX_PERF_TP_L1_5_COMPRESS_REQS"/>
+ <value value="43" name="A7XX_PERF_TP_L1_5_L2_COMPRESS_MISS"/>
+ <value value="44" name="A7XX_PERF_TP_L1_BANK_CONFLICT"/>
+ <value value="45" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_CYCLES"/>
+ <value value="46" name="A7XX_PERF_TP_L1_5_MISS_LATENCY_TRANS"/>
+ <value value="47" name="A7XX_PERF_TP_QUADS_CONSTANT_MULTIPLIED"/>
+ <value value="48" name="A7XX_PERF_TP_FRONTEND_WORKING_CYCLES"/>
+ <value value="49" name="A7XX_PERF_TP_L1_TAG_WORKING_CYCLES"/>
+ <value value="50" name="A7XX_PERF_TP_L1_DATA_WRITE_WORKING_CYCLES"/>
+ <value value="51" name="A7XX_PERF_TP_PRE_L1_DECOM_WORKING_CYCLES"/>
+ <value value="52" name="A7XX_PERF_TP_BACKEND_WORKING_CYCLES"/>
+ <value value="53" name="A7XX_PERF_TP_L1_5_CACHE_WORKING_CYCLES"/>
+ <value value="54" name="A7XX_PERF_TP_STARVE_CYCLES_SP"/>
+ <value value="55" name="A7XX_PERF_TP_STARVE_CYCLES_UCHE"/>
+ <value value="56" name="A7XX_PERF_TP_STALL_CYCLES_UFC"/>
+ <value value="57" name="A7XX_PERF_TP_FORMAT_DECOMP"/>
+ <value value="58" name="A7XX_PERF_TP_FILTER_POINT_FP16"/>
+ <value value="59" name="A7XX_PERF_TP_FILTER_POINT_FP32"/>
+ <value value="60" name="A7XX_PERF_TP_LATENCY_FIFO_FULL"/>
+ <value value="61" name="A7XX_PERF_TP_RESERVED_61"/>
+ <value value="62" name="A7XX_PERF_TP_RESERVED_62"/>
+ <value value="63" name="A7XX_PERF_TP_RESERVED_63"/>
+ <value value="64" name="A7XX_PERF_TP_RESERVED_64"/>
+ <value value="65" name="A7XX_PERF_TP_RESERVED_65"/>
+ <value value="66" name="A7XX_PERF_TP_RESERVED_66"/>
+ <value value="67" name="A7XX_PERF_TP_RESERVED_67"/>
+ <value value="68" name="A7XX_PERF_TP_RESERVED_68"/>
+ <value value="69" name="A7XX_PERF_TP_RESERVED_69"/>
+ <value value="70" name="A7XX_PERF_TP_RESERVED_70"/>
+ <value value="71" name="A7XX_PERF_TP_RESERVED_71"/>
+ <value value="72" name="A7XX_PERF_TP_RESERVED_72"/>
+ <value value="73" name="A7XX_PERF_TP_RESERVED_73"/>
+ <value value="74" name="A7XX_PERF_TP_RESERVED_74"/>
+ <value value="75" name="A7XX_PERF_TP_RESERVED_75"/>
+ <value value="76" name="A7XX_PERF_TP_RESERVED_76"/>
+ <value value="77" name="A7XX_PERF_TP_RESERVED_77"/>
+ <value value="78" name="A7XX_PERF_TP_RESERVED_78"/>
+ <value value="79" name="A7XX_PERF_TP_RESERVED_79"/>
+ <value value="80" name="A7XX_PERF_TP_RESERVED_80"/>
+ <value value="81" name="A7XX_PERF_TP_RESERVED_81"/>
+ <value value="82" name="A7XX_PERF_TP_RESERVED_82"/>
+ <value value="83" name="A7XX_PERF_TP_RESERVED_83"/>
+ <value value="84" name="A7XX_PERF_TP_RESERVED_84"/>
+ <value value="85" name="A7XX_PERF_TP_RESERVED_85"/>
+ <value value="86" name="A7XX_PERF_TP_RESERVED_86"/>
+ <value value="87" name="A7XX_PERF_TP_RESERVED_87"/>
+ <value value="88" name="A7XX_PERF_TP_RESERVED_88"/>
+ <value value="89" name="A7XX_PERF_TP_RESERVED_89"/>
+ <value value="90" name="A7XX_PERF_TP_RESERVED_90"/>
+ <value value="91" name="A7XX_PERF_TP_RESERVED_91"/>
+ <value value="92" name="A7XX_PERF_TP_RESERVED_92"/>
+ <value value="93" name="A7XX_PERF_TP_RESERVED_93"/>
+ <value value="94" name="A7XX_PERF_TP_RESERVED_94"/>
+ <value value="95" name="A7XX_PERF_TP_RESERVED_95"/>
+ <value value="96" name="A7XX_PERF_TP_RESERVED_96"/>
+ <value value="97" name="A7XX_PERF_TP_RESERVED_97"/>
+ <value value="98" name="A7XX_PERF_TP_RESERVED_98"/>
+ <value value="99" name="A7XX_PERF_TP_RESERVED_99"/>
+ <value value="100" name="A7XX_PERF_TP_RESERVED_100"/>
+ <value value="101" name="A7XX_PERF_TP_RESERVED_101"/>
+ <value value="102" name="A7XX_PERF_TP_RESERVED_102"/>
+ <value value="103" name="A7XX_PERF_TP_RESERVED_103"/>
+ <value value="104" name="A7XX_PERF_TP_RESERVED_104"/>
+ <value value="105" name="A7XX_PERF_TP_RESERVED_105"/>
+ <value value="106" name="A7XX_PERF_TP_RESERVED_106"/>
+ <value value="107" name="A7XX_PERF_TP_RESERVED_107"/>
+ <value value="108" name="A7XX_PERF_TP_RESERVED_108"/>
+ <value value="109" name="A7XX_PERF_TP_RESERVED_109"/>
+ <value value="110" name="A7XX_PERF_TP_RESERVED_110"/>
+ <value value="111" name="A7XX_PERF_TP_RESERVED_111"/>
+ <value value="112" name="A7XX_PERF_TP_RESERVED_112"/>
+ <value value="113" name="A7XX_PERF_TP_RESERVED_113"/>
+ <value value="114" name="A7XX_PERF_TP_RESERVED_114"/>
+ <value value="115" name="A7XX_PERF_TP_RESERVED_115"/>
+ <value value="116" name="A7XX_PERF_TP_RESERVED_116"/>
+ <value value="117" name="A7XX_PERF_TP_RESERVED_117"/>
+ <value value="118" name="A7XX_PERF_TP_RESERVED_118"/>
+ <value value="119" name="A7XX_PERF_TP_RESERVED_119"/>
+ <value value="120" name="A7XX_PERF_TP_RESERVED_120"/>
+ <value value="121" name="A7XX_PERF_TP_RESERVED_121"/>
+ <value value="122" name="A7XX_PERF_TP_RESERVED_122"/>
+ <value value="123" name="A7XX_PERF_TP_RESERVED_123"/>
+ <value value="124" name="A7XX_PERF_TP_RESERVED_124"/>
+ <value value="125" name="A7XX_PERF_TP_RESERVED_125"/>
+ <value value="126" name="A7XX_PERF_TP_RESERVED_126"/>
+ <value value="127" name="A7XX_PERF_TP_RESERVED_127"/>
+ <value value="128" name="A7XX_PERF_TP_FORMAT_DECOMP_BILINEAR"/>
+ <value value="129" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP16"/>
+ <value value="130" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP16"/>
+ <value value="131" name="A7XX_PERF_TP_PACKED_POINT_BOTH_VALID_FP32"/>
+ <value value="132" name="A7XX_PERF_TP_PACKED_POINT_SINGLE_VALID_FP32"/>
+</enum>
+
+<enum name="a7xx_sp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_SP_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_SP_ALU_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_SP_EFU_WORKING_CYCLES"/>
+ <value value="3" name="A7XX_PERF_SP_STALL_CYCLES_VPC"/>
+ <value value="4" name="A7XX_PERF_SP_STALL_CYCLES_TP"/>
+ <value value="5" name="A7XX_PERF_SP_STALL_CYCLES_UCHE"/>
+ <value value="6" name="A7XX_PERF_SP_STALL_CYCLES_RB"/>
+ <value value="7" name="A7XX_PERF_SP_NON_EXECUTION_CYCLES"/>
+ <value value="8" name="A7XX_PERF_SP_WAVE_CONTEXTS"/>
+ <value value="9" name="A7XX_PERF_SP_WAVE_CONTEXT_CYCLES"/>
+ <value value="10" name="A7XX_PERF_SP_STAGE_WAVE_CYCLES"/>
+ <value value="11" name="A7XX_PERF_SP_STAGE_WAVE_SAMPLES"/>
+ <value value="12" name="A7XX_PERF_SP_VS_STAGE_WAVE_CYCLES"/>
+ <value value="13" name="A7XX_PERF_SP_VS_STAGE_WAVE_SAMPLES"/>
+ <value value="14" name="A7XX_PERF_SP_FS_STAGE_DURATION_CYCLES"/>
+ <value value="15" name="A7XX_PERF_SP_VS_STAGE_DURATION_CYCLES"/>
+ <value value="16" name="A7XX_PERF_SP_WAVE_CTRL_CYCLES"/>
+ <value value="17" name="A7XX_PERF_SP_WAVE_LOAD_CYCLES"/>
+ <value value="18" name="A7XX_PERF_SP_WAVE_EMIT_CYCLES"/>
+ <value value="19" name="A7XX_PERF_SP_WAVE_NOP_CYCLES"/>
+ <value value="20" name="A7XX_PERF_SP_WAVE_WAIT_CYCLES"/>
+ <value value="21" name="A7XX_PERF_SP_WAVE_FETCH_CYCLES"/>
+ <value value="22" name="A7XX_PERF_SP_WAVE_IDLE_CYCLES"/>
+ <value value="23" name="A7XX_PERF_SP_WAVE_END_CYCLES"/>
+ <value value="24" name="A7XX_PERF_SP_WAVE_LONG_SYNC_CYCLES"/>
+ <value value="25" name="A7XX_PERF_SP_WAVE_SHORT_SYNC_CYCLES"/>
+ <value value="26" name="A7XX_PERF_SP_WAVE_JOIN_CYCLES"/>
+ <value value="27" name="A7XX_PERF_SP_LM_LOAD_INSTRUCTIONS"/>
+ <value value="28" name="A7XX_PERF_SP_LM_STORE_INSTRUCTIONS"/>
+ <value value="29" name="A7XX_PERF_SP_LM_ATOMICS"/>
+ <value value="30" name="A7XX_PERF_SP_GM_LOAD_INSTRUCTIONS"/>
+ <value value="31" name="A7XX_PERF_SP_GM_STORE_INSTRUCTIONS"/>
+ <value value="32" name="A7XX_PERF_SP_GM_ATOMICS"/>
+ <value value="33" name="A7XX_PERF_SP_VS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="34" name="A7XX_PERF_SP_VS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="35" name="A7XX_PERF_SP_VS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="36" name="A7XX_PERF_SP_VS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="37" name="A7XX_PERF_SP_FS_STAGE_TEX_INSTRUCTIONS"/>
+ <value value="38" name="A7XX_PERF_SP_FS_STAGE_CFLOW_INSTRUCTIONS"/>
+ <value value="39" name="A7XX_PERF_SP_FS_STAGE_EFU_INSTRUCTIONS"/>
+ <value value="40" name="A7XX_PERF_SP_FS_STAGE_FULL_ALU_INSTRUCTIONS"/>
+ <value value="41" name="A7XX_PERF_SP_FS_STAGE_HALF_ALU_INSTRUCTIONS"/>
+ <value value="42" name="A7XX_PERF_SP_FS_STAGE_BARY_INSTRUCTIONS"/>
+ <value value="43" name="A7XX_PERF_SP_VS_INSTRUCTIONS"/>
+ <value value="44" name="A7XX_PERF_SP_FS_INSTRUCTIONS"/>
+ <value value="45" name="A7XX_PERF_SP_ADDR_LOCK_COUNT"/>
+ <value value="46" name="A7XX_PERF_SP_UCHE_READ_TRANS"/>
+ <value value="47" name="A7XX_PERF_SP_UCHE_WRITE_TRANS"/>
+ <value value="48" name="A7XX_PERF_SP_EXPORT_VPC_TRANS"/>
+ <value value="49" name="A7XX_PERF_SP_EXPORT_RB_TRANS"/>
+ <value value="50" name="A7XX_PERF_SP_PIXELS_KILLED"/>
+ <value value="51" name="A7XX_PERF_SP_ICL1_REQUESTS"/>
+ <value value="52" name="A7XX_PERF_SP_ICL1_MISSES"/>
+ <value value="53" name="A7XX_PERF_SP_HS_INSTRUCTIONS"/>
+ <value value="54" name="A7XX_PERF_SP_DS_INSTRUCTIONS"/>
+ <value value="55" name="A7XX_PERF_SP_GS_INSTRUCTIONS"/>
+ <value value="56" name="A7XX_PERF_SP_CS_INSTRUCTIONS"/>
+ <value value="57" name="A7XX_PERF_SP_GPR_READ"/>
+ <value value="58" name="A7XX_PERF_SP_GPR_WRITE"/>
+ <value value="59" name="A7XX_PERF_SP_FS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="60" name="A7XX_PERF_SP_VS_STAGE_HALF_EFU_INSTRUCTIONS"/>
+ <value value="61" name="A7XX_PERF_SP_LM_BANK_CONFLICTS"/>
+ <value value="62" name="A7XX_PERF_SP_TEX_CONTROL_WORKING_CYCLES"/>
+ <value value="63" name="A7XX_PERF_SP_LOAD_CONTROL_WORKING_CYCLES"/>
+ <value value="64" name="A7XX_PERF_SP_FLOW_CONTROL_WORKING_CYCLES"/>
+ <value value="65" name="A7XX_PERF_SP_LM_WORKING_CYCLES"/>
+ <value value="66" name="A7XX_PERF_SP_DISPATCHER_WORKING_CYCLES"/>
+ <value value="67" name="A7XX_PERF_SP_SEQUENCER_WORKING_CYCLES"/>
+ <value value="68" name="A7XX_PERF_SP_LOW_EFFICIENCY_STARVED_BY_TP"/>
+ <value value="69" name="A7XX_PERF_SP_STARVE_CYCLES_HLSQ"/>
+ <value value="70" name="A7XX_PERF_SP_NON_EXECUTION_LS_CYCLES"/>
+ <value value="71" name="A7XX_PERF_SP_WORKING_EU"/>
+ <value value="72" name="A7XX_PERF_SP_ANY_EU_WORKING"/>
+ <value value="73" name="A7XX_PERF_SP_WORKING_EU_FS_STAGE"/>
+ <value value="74" name="A7XX_PERF_SP_ANY_EU_WORKING_FS_STAGE"/>
+ <value value="75" name="A7XX_PERF_SP_WORKING_EU_VS_STAGE"/>
+ <value value="76" name="A7XX_PERF_SP_ANY_EU_WORKING_VS_STAGE"/>
+ <value value="77" name="A7XX_PERF_SP_WORKING_EU_CS_STAGE"/>
+ <value value="78" name="A7XX_PERF_SP_ANY_EU_WORKING_CS_STAGE"/>
+ <value value="79" name="A7XX_PERF_SP_GPR_READ_PREFETCH"/>
+ <value value="80" name="A7XX_PERF_SP_GPR_READ_CONFLICT"/>
+ <value value="81" name="A7XX_PERF_SP_GPR_WRITE_CONFLICT"/>
+ <value value="82" name="A7XX_PERF_SP_GM_LOAD_LATENCY_CYCLES"/>
+ <value value="83" name="A7XX_PERF_SP_GM_LOAD_LATENCY_SAMPLES"/>
+ <value value="84" name="A7XX_PERF_SP_EXECUTABLE_WAVES"/>
+ <value value="85" name="A7XX_PERF_SP_ICL1_MISS_FETCH_CYCLES"/>
+ <value value="86" name="A7XX_PERF_SP_WORKING_EU_LPAC"/>
+ <value value="87" name="A7XX_PERF_SP_BYPASS_BUSY_CYCLES"/>
+ <value value="88" name="A7XX_PERF_SP_ANY_EU_WORKING_LPAC"/>
+ <value value="89" name="A7XX_PERF_SP_WAVE_ALU_CYCLES"/>
+ <value value="90" name="A7XX_PERF_SP_WAVE_EFU_CYCLES"/>
+ <value value="91" name="A7XX_PERF_SP_WAVE_INT_CYCLES"/>
+ <value value="92" name="A7XX_PERF_SP_WAVE_CSP_CYCLES"/>
+ <value value="93" name="A7XX_PERF_SP_EWAVE_CONTEXTS"/>
+ <value value="94" name="A7XX_PERF_SP_EWAVE_CONTEXT_CYCLES"/>
+ <value value="95" name="A7XX_PERF_SP_LPAC_BUSY_CYCLES"/>
+ <value value="96" name="A7XX_PERF_SP_LPAC_INSTRUCTIONS"/>
+ <value value="97" name="A7XX_PERF_SP_FS_STAGE_1X_WAVES"/>
+ <value value="98" name="A7XX_PERF_SP_FS_STAGE_2X_WAVES"/>
+ <value value="99" name="A7XX_PERF_SP_QUADS"/>
+ <value value="100" name="A7XX_PERF_SP_CS_INVOCATIONS"/>
+ <value value="101" name="A7XX_PERF_SP_PIXELS"/>
+ <value value="102" name="A7XX_PERF_SP_LPAC_DRAWCALLS"/>
+ <value value="103" name="A7XX_PERF_SP_PI_WORKING_CYCLES"/>
+ <value value="104" name="A7XX_PERF_SP_WAVE_INPUT_CYCLES"/>
+ <value value="105" name="A7XX_PERF_SP_WAVE_OUTPUT_CYCLES"/>
+ <value value="106" name="A7XX_PERF_SP_WAVE_HWAVE_WAIT_CYCLES"/>
+ <value value="107" name="A7XX_PERF_SP_WAVE_HWAVE_SYNC"/>
+ <value value="108" name="A7XX_PERF_SP_OUTPUT_3D_PIXELS"/>
+ <value value="109" name="A7XX_PERF_SP_FULL_ALU_MAD_INSTRUCTIONS"/>
+ <value value="110" name="A7XX_PERF_SP_HALF_ALU_MAD_INSTRUCTIONS"/>
+ <value value="111" name="A7XX_PERF_SP_FULL_ALU_MUL_INSTRUCTIONS"/>
+ <value value="112" name="A7XX_PERF_SP_HALF_ALU_MUL_INSTRUCTIONS"/>
+ <value value="113" name="A7XX_PERF_SP_FULL_ALU_ADD_INSTRUCTIONS"/>
+ <value value="114" name="A7XX_PERF_SP_HALF_ALU_ADD_INSTRUCTIONS"/>
+ <value value="115" name="A7XX_PERF_SP_BARY_FP32_INSTRUCTIONS"/>
+ <value value="116" name="A7XX_PERF_SP_ALU_GPR_READ_CYCLES"/>
+ <value value="117" name="A7XX_PERF_SP_ALU_DATA_FORWARDING_CYCLES"/>
+ <value value="118" name="A7XX_PERF_SP_LM_FULL_CYCLES"/>
+ <value value="119" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_CYCLES"/>
+ <value value="120" name="A7XX_PERF_SP_TEXTURE_FETCH_LATENCY_SAMPLES"/>
+ <value value="121" name="A7XX_PERF_SP_FS_STAGE_PI_TEX_INSTRUCTION"/>
+ <value value="122" name="A7XX_PERF_SP_RAY_QUERY_INSTRUCTIONS"/>
+ <value value="123" name="A7XX_PERF_SP_RBRT_KICKOFF_FIBERS"/>
+ <value value="124" name="A7XX_PERF_SP_RBRT_KICKOFF_DQUADS"/>
+ <value value="125" name="A7XX_PERF_SP_RTU_BUSY_CYCLES"/>
+ <value value="126" name="A7XX_PERF_SP_RTU_L0_HITS"/>
+ <value value="127" name="A7XX_PERF_SP_RTU_L0_MISSES"/>
+ <value value="128" name="A7XX_PERF_SP_RTU_L0_HIT_ON_MISS"/>
+ <value value="129" name="A7XX_PERF_SP_RTU_STALL_CYCLES_WAVE_QUEUE"/>
+ <value value="130" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_HIT_QUEUE"/>
+ <value value="131" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0_MISS_QUEUE"/>
+ <value value="132" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0D_IDX_QUEUE"/>
+ <value value="133" name="A7XX_PERF_SP_RTU_STALL_CYCLES_L0DATA"/>
+ <value value="134" name="A7XX_PERF_SP_RTU_STALL_CYCLES_REPLACE_CNT"/>
+ <value value="135" name="A7XX_PERF_SP_RTU_STALL_CYCLES_MRG_CNT"/>
+ <value value="136" name="A7XX_PERF_SP_RTU_STALL_CYCLES_UCHE"/>
+ <value value="137" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_L0"/>
+ <value value="138" name="A7XX_PERF_SP_RTU_OPERAND_FETCH_STALL_CYCLES_INS_FIFO"/>
+ <value value="139" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_CYCLES"/>
+ <value value="140" name="A7XX_PERF_SP_RTU_BVH_FETCH_LATENCY_SAMPLES"/>
+ <value value="141" name="A7XX_PERF_SP_STCHE_MISS_INC_VS"/>
+ <value value="142" name="A7XX_PERF_SP_STCHE_MISS_INC_FS"/>
+ <value value="143" name="A7XX_PERF_SP_STCHE_MISS_INC_BV"/>
+ <value value="144" name="A7XX_PERF_SP_STCHE_MISS_INC_LPAC"/>
+ <value value="145" name="A7XX_PERF_SP_VGPR_ACTIVE_CONTEXTS"/>
+ <value value="146" name="A7XX_PERF_SP_PGPR_ALLOC_CONTEXTS"/>
+ <value value="147" name="A7XX_PERF_SP_VGPR_ALLOC_CONTEXTS"/>
+ <value value="148" name="A7XX_PERF_SP_RTU_RAY_BOX_INTERSECTIONS"/>
+ <value value="149" name="A7XX_PERF_SP_RTU_RAY_TRIANGLE_INTERSECTIONS"/>
+ <value value="150" name="A7XX_PERF_SP_SCH_STALL_CYCLES_RTU"/>
+</enum>
+
+<enum name="a7xx_rb_perfcounter_select">
+ <value value="0" name="A7XX_PERF_RB_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_RB_STALL_CYCLES_HLSQ"/>
+ <value value="2" name="A7XX_PERF_RB_STALL_CYCLES_FIFO0_FULL"/>
+ <value value="3" name="A7XX_PERF_RB_STALL_CYCLES_FIFO1_FULL"/>
+ <value value="4" name="A7XX_PERF_RB_STALL_CYCLES_FIFO2_FULL"/>
+ <value value="5" name="A7XX_PERF_RB_STARVE_CYCLES_SP"/>
+ <value value="6" name="A7XX_PERF_RB_STARVE_CYCLES_LRZ_TILE"/>
+ <value value="7" name="A7XX_PERF_RB_STARVE_CYCLES_CCU"/>
+ <value value="8" name="A7XX_PERF_RB_STARVE_CYCLES_Z_PLANE"/>
+ <value value="9" name="A7XX_PERF_RB_STARVE_CYCLES_BARY_PLANE"/>
+ <value value="10" name="A7XX_PERF_RB_Z_WORKLOAD"/>
+ <value value="11" name="A7XX_PERF_RB_HLSQ_ACTIVE"/>
+ <value value="12" name="A7XX_PERF_RB_Z_READ"/>
+ <value value="13" name="A7XX_PERF_RB_Z_WRITE"/>
+ <value value="14" name="A7XX_PERF_RB_C_READ"/>
+ <value value="15" name="A7XX_PERF_RB_C_WRITE"/>
+ <value value="16" name="A7XX_PERF_RB_TOTAL_PASS"/>
+ <value value="17" name="A7XX_PERF_RB_Z_PASS"/>
+ <value value="18" name="A7XX_PERF_RB_Z_FAIL"/>
+ <value value="19" name="A7XX_PERF_RB_S_FAIL"/>
+ <value value="20" name="A7XX_PERF_RB_BLENDED_FXP_COMPONENTS"/>
+ <value value="21" name="A7XX_PERF_RB_BLENDED_FP16_COMPONENTS"/>
+ <value value="22" name="A7XX_PERF_RB_PS_INVOCATIONS"/>
+ <value value="23" name="A7XX_PERF_RB_2D_ALIVE_CYCLES"/>
+ <value value="24" name="A7XX_PERF_RB_2D_STALL_CYCLES_A2D"/>
+ <value value="25" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SRC"/>
+ <value value="26" name="A7XX_PERF_RB_2D_STARVE_CYCLES_SP"/>
+ <value value="27" name="A7XX_PERF_RB_2D_STARVE_CYCLES_DST"/>
+ <value value="28" name="A7XX_PERF_RB_2D_VALID_PIXELS"/>
+ <value value="29" name="A7XX_PERF_RB_3D_PIXELS"/>
+ <value value="30" name="A7XX_PERF_RB_BLENDER_WORKING_CYCLES"/>
+ <value value="31" name="A7XX_PERF_RB_ZPROC_WORKING_CYCLES"/>
+ <value value="32" name="A7XX_PERF_RB_CPROC_WORKING_CYCLES"/>
+ <value value="33" name="A7XX_PERF_RB_SAMPLER_WORKING_CYCLES"/>
+ <value value="34" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_READ"/>
+ <value value="35" name="A7XX_PERF_RB_STALL_CYCLES_CCU_COLOR_WRITE"/>
+ <value value="36" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_READ"/>
+ <value value="37" name="A7XX_PERF_RB_STALL_CYCLES_CCU_DEPTH_WRITE"/>
+ <value value="38" name="A7XX_PERF_RB_STALL_CYCLES_VPC"/>
+ <value value="39" name="A7XX_PERF_RB_2D_INPUT_TRANS"/>
+ <value value="40" name="A7XX_PERF_RB_2D_OUTPUT_RB_DST_TRANS"/>
+ <value value="41" name="A7XX_PERF_RB_2D_OUTPUT_RB_SRC_TRANS"/>
+ <value value="42" name="A7XX_PERF_RB_BLENDED_FP32_COMPONENTS"/>
+ <value value="43" name="A7XX_PERF_RB_COLOR_PIX_TILES"/>
+ <value value="44" name="A7XX_PERF_RB_STALL_CYCLES_CCU"/>
+ <value value="45" name="A7XX_PERF_RB_EARLY_Z_ARB3_GRANT"/>
+ <value value="46" name="A7XX_PERF_RB_LATE_Z_ARB3_GRANT"/>
+ <value value="47" name="A7XX_PERF_RB_EARLY_Z_SKIP_GRANT"/>
+ <value value="48" name="A7XX_PERF_RB_VRS_1x1_QUADS"/>
+ <value value="49" name="A7XX_PERF_RB_VRS_2x1_QUADS"/>
+ <value value="50" name="A7XX_PERF_RB_VRS_1x2_QUADS"/>
+ <value value="51" name="A7XX_PERF_RB_VRS_2x2_QUADS"/>
+ <value value="52" name="A7XX_PERF_RB_VRS_4x2_QUADS"/>
+ <value value="53" name="A7XX_PERF_RB_VRS_4x4_QUADS"/>
+</enum>
+
+<enum name="a7xx_vsc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_VSC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_VSC_WORKING_CYCLES"/>
+ <value value="2" name="A7XX_PERF_VSC_STALL_CYCLES_UCHE"/>
+ <value value="3" name="A7XX_PERF_VSC_EOT_NUM"/>
+ <value value="4" name="A7XX_PERF_VSC_INPUT_TILES"/>
+</enum>
+
+<enum name="a7xx_ccu_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CCU_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_CCU_STALL_CYCLES_RB_DEPTH_RETURN"/>
+ <value value="2" name="A7XX_PERF_CCU_STALL_CYCLES_RB_COLOR_RETURN"/>
+ <value value="3" name="A7XX_PERF_CCU_DEPTH_BLOCKS"/>
+ <value value="4" name="A7XX_PERF_CCU_COLOR_BLOCKS"/>
+ <value value="5" name="A7XX_PERF_CCU_DEPTH_BLOCK_HIT"/>
+ <value value="6" name="A7XX_PERF_CCU_COLOR_BLOCK_HIT"/>
+ <value value="7" name="A7XX_PERF_CCU_PARTIAL_BLOCK_READ"/>
+ <value value="8" name="A7XX_PERF_CCU_GMEM_READ"/>
+ <value value="9" name="A7XX_PERF_CCU_GMEM_WRITE"/>
+ <value value="10" name="A7XX_PERF_CCU_2D_RD_REQ"/>
+ <value value="11" name="A7XX_PERF_CCU_2D_WR_REQ"/>
+ <value value="12" name="A7XX_PERF_CCU_UBWC_COLOR_BLOCKS_CONCURRENT"/>
+ <value value="13" name="A7XX_PERF_CCU_UBWC_DEPTH_BLOCKS_CONCURRENT"/>
+ <value value="14" name="A7XX_PERF_CCU_COLOR_RESOLVE_DROPPED"/>
+ <value value="15" name="A7XX_PERF_CCU_DEPTH_RESOLVE_DROPPED"/>
+ <value value="16" name="A7XX_PERF_CCU_COLOR_RENDER_CONCURRENT"/>
+ <value value="17" name="A7XX_PERF_CCU_DEPTH_RENDER_CONCURRENT"/>
+ <value value="18" name="A7XX_PERF_CCU_COLOR_RESOLVE_AFTER_RENDER"/>
+ <value value="19" name="A7XX_PERF_CCU_DEPTH_RESOLVE_AFTER_RENDER"/>
+ <value value="20" name="A7XX_PERF_CCU_GMEM_EXTRA_DEPTH_READ"/>
+ <value value="21" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA"/>
+ <value value="22" name="A7XX_PERF_CCU_GMEM_COLOR_READ_4AA_FULL"/>
+</enum>
+
+<enum name="a7xx_lrz_perfcounter_select">
+ <value value="0" name="A7XX_PERF_LRZ_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_LRZ_STARVE_CYCLES_RAS"/>
+ <value value="2" name="A7XX_PERF_LRZ_STALL_CYCLES_RB"/>
+ <value value="3" name="A7XX_PERF_LRZ_STALL_CYCLES_VSC"/>
+ <value value="4" name="A7XX_PERF_LRZ_STALL_CYCLES_VPC"/>
+ <value value="5" name="A7XX_PERF_LRZ_STALL_CYCLES_FLAG_PREFETCH"/>
+ <value value="6" name="A7XX_PERF_LRZ_STALL_CYCLES_UCHE"/>
+ <value value="7" name="A7XX_PERF_LRZ_LRZ_READ"/>
+ <value value="8" name="A7XX_PERF_LRZ_LRZ_WRITE"/>
+ <value value="9" name="A7XX_PERF_LRZ_READ_LATENCY"/>
+ <value value="10" name="A7XX_PERF_LRZ_MERGE_CACHE_UPDATING"/>
+ <value value="11" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_MASKGEN"/>
+ <value value="12" name="A7XX_PERF_LRZ_PRIM_KILLED_BY_LRZ"/>
+ <value value="13" name="A7XX_PERF_LRZ_VISIBLE_PRIM_AFTER_LRZ"/>
+ <value value="14" name="A7XX_PERF_LRZ_FULL_8X8_TILES"/>
+ <value value="15" name="A7XX_PERF_LRZ_PARTIAL_8X8_TILES"/>
+ <value value="16" name="A7XX_PERF_LRZ_TILE_KILLED"/>
+ <value value="17" name="A7XX_PERF_LRZ_TOTAL_PIXEL"/>
+ <value value="18" name="A7XX_PERF_LRZ_VISIBLE_PIXEL_AFTER_LRZ"/>
+ <value value="19" name="A7XX_PERF_LRZ_FEEDBACK_ACCEPT"/>
+ <value value="20" name="A7XX_PERF_LRZ_FEEDBACK_DISCARD"/>
+ <value value="21" name="A7XX_PERF_LRZ_FEEDBACK_STALL"/>
+ <value value="22" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_ZPLANE"/>
+ <value value="23" name="A7XX_PERF_LRZ_STALL_CYCLES_RB_BPLANE"/>
+ <value value="24" name="A7XX_PERF_LRZ_RAS_MASK_TRANS"/>
+ <value value="25" name="A7XX_PERF_LRZ_STALL_CYCLES_MVC"/>
+ <value value="26" name="A7XX_PERF_LRZ_TILE_KILLED_BY_IMAGE_VRS"/>
+ <value value="27" name="A7XX_PERF_LRZ_TILE_KILLED_BY_Z"/>
+</enum>
+
+<enum name="a7xx_cmp_perfcounter_select">
+ <value value="0" name="A7XX_PERF_CMPDECMP_STALL_CYCLES_ARB"/>
+ <value value="1" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_CYCLES"/>
+ <value value="2" name="A7XX_PERF_CMPDECMP_VBIF_LATENCY_SAMPLES"/>
+ <value value="3" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_CCU"/>
+ <value value="4" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_CCU"/>
+ <value value="5" name="A7XX_PERF_CMPDECMP_VBIF_READ_REQUEST"/>
+ <value value="6" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_REQUEST"/>
+ <value value="7" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA"/>
+ <value value="8" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA"/>
+ <value value="9" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG1_COUNT"/>
+ <value value="10" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG2_COUNT"/>
+ <value value="11" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG3_COUNT"/>
+ <value value="12" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG4_COUNT"/>
+ <value value="13" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG5_COUNT"/>
+ <value value="14" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG6_COUNT"/>
+ <value value="15" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG8_COUNT"/>
+ <value value="16" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG1_COUNT"/>
+ <value value="17" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG2_COUNT"/>
+ <value value="18" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG3_COUNT"/>
+ <value value="19" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG4_COUNT"/>
+ <value value="20" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG5_COUNT"/>
+ <value value="21" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG6_COUNT"/>
+ <value value="22" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG8_COUNT"/>
+ <value value="23" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH0"/>
+ <value value="24" name="A7XX_PERF_CMPDECMP_VBIF_READ_DATA_UCHE_CH1"/>
+ <value value="25" name="A7XX_PERF_CMPDECMP_VBIF_WRITE_DATA_UCHE"/>
+ <value value="26" name="A7XX_PERF_CMPDECMP_DEPTH_WRITE_FLAG0_COUNT"/>
+ <value value="27" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAG0_COUNT"/>
+ <value value="28" name="A7XX_PERF_CMPDECMP_COLOR_WRITE_FLAGALPHA_COUNT"/>
+ <value value="29" name="A7XX_PERF_CMPDECMP_RESOLVE_EVENTS"/>
+ <value value="30" name="A7XX_PERF_CMPDECMP_CONCURRENT_RESOLVE_EVENTS"/>
+ <value value="31" name="A7XX_PERF_CMPDECMP_DROPPED_CLEAR_EVENTS"/>
+ <value value="32" name="A7XX_PERF_CMPDECMP_ST_BLOCKS_CONCURRENT"/>
+ <value value="33" name="A7XX_PERF_CMPDECMP_LRZ_ST_BLOCKS_CONCURRENT"/>
+ <value value="34" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG0_COUNT"/>
+ <value value="35" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG1_COUNT"/>
+ <value value="36" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG2_COUNT"/>
+ <value value="37" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG3_COUNT"/>
+ <value value="38" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG4_COUNT"/>
+ <value value="39" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG5_COUNT"/>
+ <value value="40" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG6_COUNT"/>
+ <value value="41" name="A7XX_PERF_CMPDECMP_DEPTH_READ_FLAG8_COUNT"/>
+ <value value="42" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG0_COUNT"/>
+ <value value="43" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG1_COUNT"/>
+ <value value="44" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG2_COUNT"/>
+ <value value="45" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG3_COUNT"/>
+ <value value="46" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG4_COUNT"/>
+ <value value="47" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG5_COUNT"/>
+ <value value="48" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG6_COUNT"/>
+ <value value="49" name="A7XX_PERF_CMPDECMP_COLOR_READ_FLAG8_COUNT"/>
+</enum>
+
+<enum name="a7xx_gbif_perfcounter_select">
+ <value value="0" name="A7XX_PERF_GBIF_RESERVED_0"/>
+ <value value="1" name="A7XX_PERF_GBIF_RESERVED_1"/>
+ <value value="2" name="A7XX_PERF_GBIF_RESERVED_2"/>
+ <value value="3" name="A7XX_PERF_GBIF_RESERVED_3"/>
+ <value value="4" name="A7XX_PERF_GBIF_RESERVED_4"/>
+ <value value="5" name="A7XX_PERF_GBIF_RESERVED_5"/>
+ <value value="6" name="A7XX_PERF_GBIF_RESERVED_6"/>
+ <value value="7" name="A7XX_PERF_GBIF_RESERVED_7"/>
+ <value value="8" name="A7XX_PERF_GBIF_RESERVED_8"/>
+ <value value="9" name="A7XX_PERF_GBIF_RESERVED_9"/>
+ <value value="10" name="A7XX_PERF_GBIF_AXI0_READ_REQUESTS_TOTAL"/>
+ <value value="11" name="A7XX_PERF_GBIF_AXI1_READ_REQUESTS_TOTAL"/>
+ <value value="12" name="A7XX_PERF_GBIF_RESERVED_12"/>
+ <value value="13" name="A7XX_PERF_GBIF_RESERVED_13"/>
+ <value value="14" name="A7XX_PERF_GBIF_RESERVED_14"/>
+ <value value="15" name="A7XX_PERF_GBIF_RESERVED_15"/>
+ <value value="16" name="A7XX_PERF_GBIF_RESERVED_16"/>
+ <value value="17" name="A7XX_PERF_GBIF_RESERVED_17"/>
+ <value value="18" name="A7XX_PERF_GBIF_RESERVED_18"/>
+ <value value="19" name="A7XX_PERF_GBIF_RESERVED_19"/>
+ <value value="20" name="A7XX_PERF_GBIF_RESERVED_20"/>
+ <value value="21" name="A7XX_PERF_GBIF_RESERVED_21"/>
+ <value value="22" name="A7XX_PERF_GBIF_AXI0_WRITE_REQUESTS_TOTAL"/>
+ <value value="23" name="A7XX_PERF_GBIF_AXI1_WRITE_REQUESTS_TOTAL"/>
+ <value value="24" name="A7XX_PERF_GBIF_RESERVED_24"/>
+ <value value="25" name="A7XX_PERF_GBIF_RESERVED_25"/>
+ <value value="26" name="A7XX_PERF_GBIF_RESERVED_26"/>
+ <value value="27" name="A7XX_PERF_GBIF_RESERVED_27"/>
+ <value value="28" name="A7XX_PERF_GBIF_RESERVED_28"/>
+ <value value="29" name="A7XX_PERF_GBIF_RESERVED_29"/>
+ <value value="30" name="A7XX_PERF_GBIF_RESERVED_30"/>
+ <value value="31" name="A7XX_PERF_GBIF_RESERVED_31"/>
+ <value value="32" name="A7XX_PERF_GBIF_RESERVED_32"/>
+ <value value="33" name="A7XX_PERF_GBIF_RESERVED_33"/>
+ <value value="34" name="A7XX_PERF_GBIF_AXI0_READ_DATA_BEATS_TOTAL"/>
+ <value value="35" name="A7XX_PERF_GBIF_AXI1_READ_DATA_BEATS_TOTAL"/>
+ <value value="36" name="A7XX_PERF_GBIF_RESERVED_36"/>
+ <value value="37" name="A7XX_PERF_GBIF_RESERVED_37"/>
+ <value value="38" name="A7XX_PERF_GBIF_RESERVED_38"/>
+ <value value="39" name="A7XX_PERF_GBIF_RESERVED_39"/>
+ <value value="40" name="A7XX_PERF_GBIF_RESERVED_40"/>
+ <value value="41" name="A7XX_PERF_GBIF_RESERVED_41"/>
+ <value value="42" name="A7XX_PERF_GBIF_RESERVED_42"/>
+ <value value="43" name="A7XX_PERF_GBIF_RESERVED_43"/>
+ <value value="44" name="A7XX_PERF_GBIF_RESERVED_44"/>
+ <value value="45" name="A7XX_PERF_GBIF_RESERVED_45"/>
+ <value value="46" name="A7XX_PERF_GBIF_AXI0_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="47" name="A7XX_PERF_GBIF_AXI1_WRITE_DATA_BEATS_TOTAL"/>
+ <value value="48" name="A7XX_PERF_GBIF_RESERVED_48"/>
+ <value value="49" name="A7XX_PERF_GBIF_RESERVED_49"/>
+ <value value="50" name="A7XX_PERF_GBIF_RESERVED_50"/>
+ <value value="51" name="A7XX_PERF_GBIF_RESERVED_51"/>
+ <value value="52" name="A7XX_PERF_GBIF_RESERVED_52"/>
+ <value value="53" name="A7XX_PERF_GBIF_RESERVED_53"/>
+ <value value="54" name="A7XX_PERF_GBIF_RESERVED_54"/>
+ <value value="55" name="A7XX_PERF_GBIF_RESERVED_55"/>
+ <value value="56" name="A7XX_PERF_GBIF_RESERVED_56"/>
+ <value value="57" name="A7XX_PERF_GBIF_RESERVED_57"/>
+ <value value="58" name="A7XX_PERF_GBIF_RESERVED_58"/>
+ <value value="59" name="A7XX_PERF_GBIF_RESERVED_59"/>
+ <value value="60" name="A7XX_PERF_GBIF_RESERVED_60"/>
+ <value value="61" name="A7XX_PERF_GBIF_RESERVED_61"/>
+ <value value="62" name="A7XX_PERF_GBIF_RESERVED_62"/>
+ <value value="63" name="A7XX_PERF_GBIF_RESERVED_63"/>
+ <value value="64" name="A7XX_PERF_GBIF_RESERVED_64"/>
+ <value value="65" name="A7XX_PERF_GBIF_RESERVED_65"/>
+ <value value="66" name="A7XX_PERF_GBIF_RESERVED_66"/>
+ <value value="67" name="A7XX_PERF_GBIF_RESERVED_67"/>
+ <value value="68" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_RD_ALL"/>
+ <value value="69" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_RD_ALL"/>
+ <value value="70" name="A7XX_PERF_GBIF_CYCLES_CH0_HELD_OFF_WR_ALL"/>
+ <value value="71" name="A7XX_PERF_GBIF_CYCLES_CH1_HELD_OFF_WR_ALL"/>
+ <value value="72" name="A7XX_PERF_GBIF_AXI_CH0_REQUEST_HELD_OFF"/>
+ <value value="73" name="A7XX_PERF_GBIF_AXI_CH1_REQUEST_HELD_OFF"/>
+ <value value="74" name="A7XX_PERF_GBIF_AXI_REQUEST_HELD_OFF"/>
+ <value value="75" name="A7XX_PERF_GBIF_AXI_CH0_WRITE_DATA_HELD_OFF"/>
+ <value value="76" name="A7XX_PERF_GBIF_AXI_CH1_WRITE_DATA_HELD_OFF"/>
+ <value value="77" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_DATA_HELD_OFF"/>
+ <value value="78" name="A7XX_PERF_GBIF_AXI_ALL_READ_BEATS"/>
+ <value value="79" name="A7XX_PERF_GBIF_AXI_ALL_WRITE_BEATS"/>
+ <value value="80" name="A7XX_PERF_GBIF_AXI_ALL_BEATS"/>
+</enum>
+
+<enum name="a7xx_ufc_perfcounter_select">
+ <value value="0" name="A7XX_PERF_UFC_BUSY_CYCLES"/>
+ <value value="1" name="A7XX_PERF_UFC_READ_DATA_VBIF"/>
+ <value value="2" name="A7XX_PERF_UFC_WRITE_DATA_VBIF"/>
+ <value value="3" name="A7XX_PERF_UFC_READ_REQUEST_VBIF"/>
+ <value value="4" name="A7XX_PERF_UFC_WRITE_REQUEST_VBIF"/>
+ <value value="5" name="A7XX_PERF_UFC_LRZ_FILTER_HIT"/>
+ <value value="6" name="A7XX_PERF_UFC_LRZ_FILTER_MISS"/>
+ <value value="7" name="A7XX_PERF_UFC_CRE_FILTER_HIT"/>
+ <value value="8" name="A7XX_PERF_UFC_CRE_FILTER_MISS"/>
+ <value value="9" name="A7XX_PERF_UFC_SP_FILTER_HIT"/>
+ <value value="10" name="A7XX_PERF_UFC_SP_FILTER_MISS"/>
+ <value value="11" name="A7XX_PERF_UFC_SP_REQUESTS"/>
+ <value value="12" name="A7XX_PERF_UFC_TP_FILTER_HIT"/>
+ <value value="13" name="A7XX_PERF_UFC_TP_FILTER_MISS"/>
+ <value value="14" name="A7XX_PERF_UFC_TP_REQUESTS"/>
+ <value value="15" name="A7XX_PERF_UFC_MAIN_HIT_LRZ_PREFETCH"/>
+ <value value="16" name="A7XX_PERF_UFC_MAIN_HIT_CRE_PREFETCH"/>
+ <value value="17" name="A7XX_PERF_UFC_MAIN_HIT_SP_PREFETCH"/>
+ <value value="18" name="A7XX_PERF_UFC_MAIN_HIT_TP_PREFETCH"/>
+ <value value="19" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_READ"/>
+ <value value="20" name="A7XX_PERF_UFC_MAIN_HIT_UBWC_WRITE"/>
+ <value value="21" name="A7XX_PERF_UFC_MAIN_MISS_LRZ_PREFETCH"/>
+ <value value="22" name="A7XX_PERF_UFC_MAIN_MISS_CRE_PREFETCH"/>
+ <value value="23" name="A7XX_PERF_UFC_MAIN_MISS_SP_PREFETCH"/>
+ <value value="24" name="A7XX_PERF_UFC_MAIN_MISS_TP_PREFETCH"/>
+ <value value="25" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_READ"/>
+ <value value="26" name="A7XX_PERF_UFC_MAIN_MISS_UBWC_WRITE"/>
+ <value value="27" name="A7XX_PERF_UFC_UBWC_READ_UFC_TRANS"/>
+ <value value="28" name="A7XX_PERF_UFC_UBWC_WRITE_UFC_TRANS"/>
+ <value value="29" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_CMD"/>
+ <value value="30" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_RDATA"/>
+ <value value="31" name="A7XX_PERF_UFC_STALL_CYCLES_GBIF_WDATA"/>
+ <value value="32" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_WR_FLAG"/>
+ <value value="33" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_FLAG_RTN"/>
+ <value value="34" name="A7XX_PERF_UFC_STALL_CYCLES_UBWC_EVENT"/>
+ <value value="35" name="A7XX_PERF_UFC_LRZ_PREFETCH_STALLED_CYCLES"/>
+ <value value="36" name="A7XX_PERF_UFC_CRE_PREFETCH_STALLED_CYCLES"/>
+ <value value="37" name="A7XX_PERF_UFC_SPTP_PREFETCH_STALLED_CYCLES"/>
+ <value value="38" name="A7XX_PERF_UFC_UBWC_RD_STALLED_CYCLES"/>
+ <value value="39" name="A7XX_PERF_UFC_UBWC_WR_STALLED_CYCLES"/>
+ <value value="40" name="A7XX_PERF_UFC_PREFETCH_STALLED_CYCLES"/>
+ <value value="41" name="A7XX_PERF_UFC_EVICTION_STALLED_CYCLES"/>
+ <value value="42" name="A7XX_PERF_UFC_LOCK_STALLED_CYCLES"/>
+ <value value="43" name="A7XX_PERF_UFC_MISS_LATENCY_CYCLES"/>
+ <value value="44" name="A7XX_PERF_UFC_MISS_LATENCY_SAMPLES"/>
+ <value value="45" name="A7XX_PERF_UFC_UBWC_REQ_STALLED_CYCLES"/>
+ <value value="46" name="A7XX_PERF_UFC_TP_HINT_TAG_MISS"/>
+ <value value="47" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_RDY"/>
+ <value value="48" name="A7XX_PERF_UFC_TP_HINT_TAG_HIT_NRDY"/>
+ <value value="49" name="A7XX_PERF_UFC_TP_HINT_IS_FCLEAR"/>
+ <value value="50" name="A7XX_PERF_UFC_TP_HINT_IS_ALPHA0"/>
+ <value value="51" name="A7XX_PERF_UFC_SP_L1_FILTER_HIT"/>
+ <value value="52" name="A7XX_PERF_UFC_SP_L1_FILTER_MISS"/>
+ <value value="53" name="A7XX_PERF_UFC_SP_L1_FILTER_REQUESTS"/>
+ <value value="54" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_RDY"/>
+ <value value="55" name="A7XX_PERF_UFC_TP_L1_TAG_HIT_NRDY"/>
+ <value value="56" name="A7XX_PERF_UFC_TP_L1_TAG_MISS"/>
+ <value value="57" name="A7XX_PERF_UFC_TP_L1_FILTER_REQUESTS"/>
+</enum>
+
<domain name="A6XX" width="32" prefix="variant" varset="chip">
<bitset name="A6XX_RBBM_INT_0_MASK" inline="no" varset="chip">
<bitfield name="RBBM_GPU_IDLE" pos="0" type="boolean"/>
@@ -1584,7 +2605,7 @@ to upconvert to 32b float internally?
<reg32 offset="0x050e" name="RBBM_PERFCTR_SRAM_INIT_CMD"/>
<reg32 offset="0x050f" name="RBBM_PERFCTR_SRAM_INIT_STATUS"/>
<reg32 offset="0x0533" name="RBBM_ISDB_CNT"/>
- <reg32 offset="0x0534" name="RBBM_NC_MODE_CNTL" variants="A7XX-"/>
+ <reg32 offset="0x0534" name="RBBM_NC_MODE_CNTL"/>
<reg32 offset="0x0535" name="RBBM_SNAPSHOT_STATUS" variants="A7XX-"/>
<!---
@@ -2184,13 +3205,28 @@ to upconvert to 32b float internally?
<value value="3" name="BUFFERS_IN_SYSMEM"/>
</enum>
+ <enum name="a6xx_lrz_feedback_mask">
+ <value value="0x0" name="LRZ_FEEDBACK_NONE"/>
+ <value value="0x1" name="LRZ_FEEDBACK_EARLY_Z"/>
+ <value value="0x2" name="LRZ_FEEDBACK_EARLY_LRZ_LATE_Z"/>
+ <!-- We don't have a flag type and this flags combination is often used -->
+ <value value="0x3" name="LRZ_FEEDBACK_EARLY_Z_OR_EARLY_LRZ_LATE_Z"/>
+ <value value="0x4" name="LRZ_FEEDBACK_LATE_Z"/>
+ </enum>
+
<reg32 offset="0x80a1" name="GRAS_BIN_CONTROL" usage="rp_blit">
<bitfield name="BINW" low="0" high="5" shr="5" type="uint"/>
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
+ <doc>Disable LRZ feedback writes</doc>
<bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
<bitfield name="BUFFERS_LOCATION" low="22" high="23" type="a6xx_buffers_location" variants="A6XX"/>
- <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26"/>
+ <doc>
+ Allows draws that don't have GRAS_LRZ_CNTL.LRZ_WRITE but have
+ GRAS_LRZ_CNTL.ENABLE to contribute to LRZ during RENDERING pass.
+ In sysmem mode GRAS_LRZ_CNTL.LRZ_WRITE is not considered.
+ </doc>
+ <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
<bitfield name="UNK27" pos="27"/>
</reg32>
@@ -2270,7 +3306,7 @@ to upconvert to 32b float internally?
- 0.0 if GREATER
- 1.0 if LESS
</doc>
- <bitfield name="FC_ENABLE" pos="3" type="boolean"/>
+ <bitfield name="FC_ENABLE" pos="3" type="boolean" variants="A6XX"/>
<!-- set when depth-test + depth-write enabled -->
<bitfield name="Z_TEST_ENABLE" pos="4" type="boolean"/>
<bitfield name="Z_BOUNDS_ENABLE" pos="5" type="boolean"/>
@@ -2284,7 +3320,7 @@ to upconvert to 32b float internally?
Disable LRZ based on previous direction and the current one.
If DIR_WRITE is not enabled - there is no write to direction buffer.
</doc>
- <bitfield name="DISABLE_ON_WRONG_DIR" pos="9" type="boolean"/>
+ <bitfield name="DISABLE_ON_WRONG_DIR" pos="9" type="boolean" variants="A6XX"/>
<bitfield name="Z_FUNC" low="11" high="13" type="adreno_compare_func" variants="A7XX-"/>
</reg32>
@@ -2357,7 +3393,10 @@ to upconvert to 32b float internally?
<bitfield name="BASE_MIP_LEVEL" low="28" high="31" type="uint"/>
</reg32>
- <reg32 offset="0x810b" name="GRAS_UNKNOWN_810B" variants="A7XX-" usage="cmd"/>
+ <reg32 offset="0x810b" name="GRAS_LRZ_CNTL2" variants="A7XX-" usage="rp_blit">
+ <bitfield name="DISABLE_ON_WRONG_DIR" pos="0" type="boolean"/>
+ <bitfield name="FC_ENABLE" pos="1" type="boolean"/>
+ </reg32>
<!-- 0x810c-0x810f invalid -->
@@ -2366,7 +3405,10 @@ to upconvert to 32b float internally?
<!-- A bit tentative but it's a color and it is followed by LRZ_CLEAR -->
<reg32 offset="0x8111" name="GRAS_LRZ_CLEAR_DEPTH_F32" type="float" variants="A7XX-"/>
- <reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0x8113" name="GRAS_LRZ_DEPTH_BUFFER_INFO" variants="A7XX-" usage="rp_blit">
+ <bitfield name="DEPTH_FORMAT" low="0" high="2" type="a6xx_depth_format"/>
+ <bitfield name="UNK3" pos="3"/>
+ </reg32>
<!-- Always written together and always equal 09510840 00000a62 -->
<reg32 offset="0x8120" name="GRAS_UNKNOWN_8120" variants="A7XX-" usage="cmd"/>
@@ -2440,7 +3482,7 @@ to upconvert to 32b float internally?
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
<bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
<bitfield name="BUFFERS_LOCATION" low="22" high="23" type="a6xx_buffers_location"/>
- <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26"/>
+ <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
</reg32>
<reg32 offset="0x8800" name="RB_BIN_CONTROL" variants="A7XX-" usage="rp_blit">
@@ -2448,7 +3490,7 @@ to upconvert to 32b float internally?
<bitfield name="BINH" low="8" high="14" shr="4" type="uint"/>
<bitfield name="RENDER_MODE" low="18" high="20" type="a6xx_render_mode"/>
<bitfield name="FORCE_LRZ_WRITE_DIS" pos="21" type="boolean"/>
- <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26"/>
+ <bitfield name="LRZ_FEEDBACK_ZMODE_MASK" low="24" high="26" type="a6xx_lrz_feedback_mask"/>
</reg32>
<reg32 offset="0x8801" name="RB_RENDER_CNTL" variants="A6XX" usage="rp_blit">
@@ -2605,6 +3647,7 @@ to upconvert to 32b float internally?
<bitfield name="UNK10" pos="10"/>
<bitfield name="LOSSLESSCOMPEN" pos="11" type="boolean"/>
<bitfield name="COLOR_SWAP" low="13" high="14" type="a3xx_color_swap"/>
+ <bitfield name="MUTABLEEN" pos="16" type="boolean" variants="A7XX-"/>
</reg32>
<!--
at least in gmem, things seem to be aligned to pitch of 64..
@@ -2770,6 +3813,7 @@ to upconvert to 32b float internally?
<bitfield name="COLOR_SWAP" low="5" high="6" type="a3xx_color_swap"/>
<bitfield name="COLOR_FORMAT" low="7" high="14" type="a6xx_format"/>
<bitfield name="UNK15" pos="15" type="boolean"/>
+ <bitfield name="MUTABLEEN" pos="16" type="boolean" variants="A7XX-"/>
</reg32>
<reg64 offset="0x88d8" name="RB_BLIT_DST" type="waddress" align="64" usage="rp_blit"/>
<reg32 offset="0x88da" name="RB_BLIT_DST_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
@@ -2886,13 +3930,12 @@ to upconvert to 32b float internally?
<reg32 offset="0x8c00" name="RB_2D_BLIT_CNTL" type="a6xx_2d_blit_cntl" usage="rp_blit"/>
<reg32 offset="0x8c01" name="RB_2D_UNKNOWN_8C01" low="0" high="31" usage="rp_blit"/>
- <bitset name="a6xx_2d_surf_info" inline="yes">
+ <bitset name="a6xx_2d_src_surf_info" inline="yes">
<bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
<bitfield name="TILE_MODE" low="8" high="9" type="a6xx_tile_mode"/>
<bitfield name="COLOR_SWAP" low="10" high="11" type="a3xx_color_swap"/>
<bitfield name="FLAGS" pos="12" type="boolean"/>
<bitfield name="SRGB" pos="13" type="boolean"/>
- <!-- the rest is only for src -->
<bitfield name="SAMPLES" low="14" high="15" type="a3xx_msaa_samples"/>
<bitfield name="FILTER" pos="16" type="boolean"/>
<bitfield name="UNK17" pos="17" type="boolean"/>
@@ -2903,11 +3946,21 @@ to upconvert to 32b float internally?
<bitfield name="UNK22" pos="22" type="boolean"/>
<bitfield name="UNK23" low="23" high="26"/>
<bitfield name="UNK28" pos="28" type="boolean"/>
+ <bitfield name="MUTABLEEN" pos="29" type="boolean" variants="A7XX-"/>
+ </bitset>
+
+ <bitset name="a6xx_2d_dst_surf_info" inline="yes">
+ <bitfield name="COLOR_FORMAT" low="0" high="7" type="a6xx_format"/>
+ <bitfield name="TILE_MODE" low="8" high="9" type="a6xx_tile_mode"/>
+ <bitfield name="COLOR_SWAP" low="10" high="11" type="a3xx_color_swap"/>
+ <bitfield name="FLAGS" pos="12" type="boolean"/>
+ <bitfield name="SRGB" pos="13" type="boolean"/>
+ <bitfield name="SAMPLES" low="14" high="15" type="a3xx_msaa_samples"/>
+ <bitfield name="MUTABLEEN" pos="17" type="boolean" variants="A7XX-"/>
</bitset>
<!-- 0x8c02-0x8c16 invalid -->
- <!-- TODO: RB_2D_DST_INFO has 17 valid bits (doesn't match a6xx_2d_surf_info) -->
- <reg32 offset="0x8c17" name="RB_2D_DST_INFO" type="a6xx_2d_surf_info" usage="rp_blit"/>
+ <reg32 offset="0x8c17" name="RB_2D_DST_INFO" type="a6xx_2d_dst_surf_info" usage="rp_blit"/>
<reg64 offset="0x8c18" name="RB_2D_DST" type="waddress" align="64" usage="rp_blit"/>
<reg32 offset="0x8c1a" name="RB_2D_DST_PITCH" low="0" high="15" shr="6" type="uint" usage="rp_blit"/>
<!-- this is a guess but seems likely (for NV12/IYUV): -->
@@ -2927,7 +3980,10 @@ to upconvert to 32b float internally?
<reg32 offset="0x8c2d" name="RB_2D_SRC_SOLID_C1" usage="rp_blit"/>
<reg32 offset="0x8c2e" name="RB_2D_SRC_SOLID_C2" usage="rp_blit"/>
<reg32 offset="0x8c2f" name="RB_2D_SRC_SOLID_C3" usage="rp_blit"/>
- <!-- 0x8c34-0x8dff invalid -->
+
+ <reg32 offset="0x8c34" name="RB_UNKNOWN_8C34" variants="A7XX-" usage="cmd"/>
+
+ <!-- 0x8c35-0x8dff invalid -->
<!-- always 0x1 ? either doesn't exist for a650 or write-only: -->
<reg32 offset="0x8e01" name="RB_UNKNOWN_8E01" usage="cmd"/>
@@ -4275,7 +5331,7 @@ to upconvert to 32b float internally?
badly named or the functionality moved in a6xx. But downstream kernel
calls this "a6xx_sp_ps_tp_2d_cluster"
-->
- <reg32 offset="0xb4c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_surf_info" variants="A6XX" usage="rp_blit"/>
+ <reg32 offset="0xb4c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_src_surf_info" variants="A6XX" usage="rp_blit"/>
<reg32 offset="0xb4c1" name="SP_PS_2D_SRC_SIZE" variants="A6XX" usage="rp_blit">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
@@ -4286,7 +5342,7 @@ to upconvert to 32b float internally?
<bitfield name="PITCH" low="9" high="23" shr="6" type="uint"/>
</reg32>
- <reg32 offset="0xb2c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_surf_info" variants="A7XX-" usage="rp_blit"/>
+ <reg32 offset="0xb2c0" name="SP_PS_2D_SRC_INFO" type="a6xx_2d_src_surf_info" variants="A7XX-" usage="rp_blit"/>
<reg32 offset="0xb2c1" name="SP_PS_2D_SRC_SIZE" variants="A7XX">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
@@ -4329,7 +5385,12 @@ to upconvert to 32b float internally?
<!-- always 0x100000 or 0x1000000? -->
<reg32 offset="0xb600" name="TPL1_DBG_ECO_CNTL" low="0" high="25" usage="cmd"/>
<reg32 offset="0xb601" name="TPL1_ADDR_MODE_CNTL" type="a5xx_address_mode"/>
- <reg32 offset="0xb602" name="TPL1_DBG_ECO_CNTL1" usage="cmd"/>
+ <reg32 offset="0xb602" name="TPL1_DBG_ECO_CNTL1" usage="cmd">
+ <!-- Affects UBWC in some way, if BLIT_OP_SCALE is done with this bit set
+ and if other blit is done without it - UBWC image may be copied incorrectly.
+ -->
+ <bitfield name="TP_UBWC_FLAG_HINT" pos="18" type="boolean"/>
+ </reg32>
<reg32 offset="0xb604" name="TPL1_NC_MODE_CNTL">
<bitfield name="MODE" pos="0" type="boolean"/>
<bitfield name="LOWER_BIT" low="1" high="2" type="uint"/>
@@ -4351,7 +5412,8 @@ to upconvert to 32b float internally?
<reg32 offset="0xb60b" name="TPL1_BICUBIC_WEIGHTS_TABLE_3" low="0" high="29" variants="A7XX" usage="cmd"/>
<reg32 offset="0xb60c" name="TPL1_BICUBIC_WEIGHTS_TABLE_4" low="0" high="29" variants="A7XX" usage="cmd"/>
- <array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="12"/>
+ <array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="12" variants="A6XX"/>
+ <array offset="0xb610" name="TPL1_PERFCTR_TP_SEL" stride="1" length="18" variants="A7XX"/>
<!-- TODO: 4 more perfcntr sel at 0xb620 ? -->
@@ -4582,15 +5644,15 @@ to upconvert to 32b float internally?
<bitfield name="UNK6" pos="6" type="boolean"/>
</reg32>
- <reg32 offset="0xbb00" name="HLSQ_DRAW_CMD">
+ <reg32 offset="0xbb00" name="HLSQ_DRAW_CMD" variants="A6XX">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xbb01" name="HLSQ_DISPATCH_CMD">
+ <reg32 offset="0xbb01" name="HLSQ_DISPATCH_CMD" variants="A6XX">
<bitfield name="STATE_ID" low="0" high="7"/>
</reg32>
- <reg32 offset="0xbb02" name="HLSQ_EVENT_CMD">
+ <reg32 offset="0xbb02" name="HLSQ_EVENT_CMD" variants="A6XX">
<!-- I think only the low bit is actually used? -->
<bitfield name="STATE_ID" low="16" high="23"/>
<bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
@@ -4623,6 +5685,19 @@ to upconvert to 32b float internally?
<bitfield name="GFX_BINDLESS" low="14" high="18" type="hex"/>
</reg32>
+ <reg32 offset="0xab1c" name="HLSQ_DRAW_CMD" variants="A7XX-">
+ <bitfield name="STATE_ID" low="0" high="7"/>
+ </reg32>
+
+ <reg32 offset="0xab1d" name="HLSQ_DISPATCH_CMD" variants="A7XX-">
+ <bitfield name="STATE_ID" low="0" high="7"/>
+ </reg32>
+
+ <reg32 offset="0xab1e" name="HLSQ_EVENT_CMD" variants="A7XX-">
+ <bitfield name="STATE_ID" low="16" high="23"/>
+ <bitfield name="EVENT" low="0" high="6" type="vgt_event_type"/>
+ </reg32>
+
<reg32 offset="0xab1f" name="HLSQ_INVALIDATE_CMD" variants="A7XX-" usage="cmd">
<doc>
This register clears pending loads queued up by
@@ -4791,7 +5866,7 @@ to upconvert to 32b float internally?
<reg32 offset="3" name="3"/>
</domain>
-<domain name="A6XX_TEX_CONST" width="32">
+<domain name="A6XX_TEX_CONST" width="32" varset="chip">
<doc>Texture constant dwords</doc>
<enum name="a6xx_tex_swiz"> <!-- same as a4xx? -->
<value name="A6XX_TEX_X" value="0"/>
@@ -4831,6 +5906,7 @@ to upconvert to 32b float internally?
<reg32 offset="1" name="1">
<bitfield name="WIDTH" low="0" high="14" type="uint"/>
<bitfield name="HEIGHT" low="15" high="29" type="uint"/>
+ <bitfield name="MUTABLEEN" pos="31" type="boolean" variants="A7XX-"/>
</reg32>
<reg32 offset="2" name="2">
<!--
diff --git a/drivers/gpu/drm/msm/registers/display/hdmi.xml b/drivers/gpu/drm/msm/registers/display/hdmi.xml
index 6c81581016c7..1cf1b14fbd91 100644
--- a/drivers/gpu/drm/msm/registers/display/hdmi.xml
+++ b/drivers/gpu/drm/msm/registers/display/hdmi.xml
@@ -1012,4 +1012,93 @@ xsi:schemaLocation="https://gitlab.freedesktop.org/freedreno/ rules-fd.xsd">
<reg32 offset="0x00110" name="TX_ALOG_INTF_OBSV"/>
</domain>
+<domain name="HDMI_8998_PHY" width="32">
+ <reg32 offset="0x00000" name="CFG"/>
+ <reg32 offset="0x00004" name="PD_CTL"/>
+ <reg32 offset="0x00010" name="MODE"/>
+ <reg32 offset="0x0005C" name="CLOCK"/>
+ <reg32 offset="0x00068" name="CMN_CTRL"/>
+ <reg32 offset="0x000B4" name="STATUS"/>
+</domain>
+
+<domain name="HDMI_8998_PHY_QSERDES_COM" width="32">
+ <reg32 offset="0x0000" name="ATB_SEL1"/>
+ <reg32 offset="0x0004" name="ATB_SEL2"/>
+ <reg32 offset="0x0008" name="FREQ_UPDATE"/>
+ <reg32 offset="0x000C" name="BG_TIMER"/>
+ <reg32 offset="0x0010" name="SSC_EN_CENTER"/>
+ <reg32 offset="0x0014" name="SSC_ADJ_PER1"/>
+ <reg32 offset="0x0018" name="SSC_ADJ_PER2"/>
+ <reg32 offset="0x001C" name="SSC_PER1"/>
+ <reg32 offset="0x0020" name="SSC_PER2"/>
+ <reg32 offset="0x0024" name="SSC_STEP_SIZE1"/>
+ <reg32 offset="0x0028" name="SSC_STEP_SIZE2"/>
+ <reg32 offset="0x002C" name="POST_DIV"/>
+ <reg32 offset="0x0030" name="POST_DIV_MUX"/>
+ <reg32 offset="0x0034" name="BIAS_EN_CLKBUFLR_EN"/>
+ <reg32 offset="0x0038" name="CLK_ENABLE1"/>
+ <reg32 offset="0x003C" name="SYS_CLK_CTRL"/>
+ <reg32 offset="0x0040" name="SYSCLK_BUF_ENABLE"/>
+ <reg32 offset="0x0044" name="PLL_EN"/>
+ <reg32 offset="0x0048" name="PLL_IVCO"/>
+ <reg32 offset="0x004C" name="CMN_IETRIM"/>
+ <reg32 offset="0x0050" name="CMN_IPTRIM"/>
+ <reg32 offset="0x0060" name="CP_CTRL_MODE0"/>
+ <reg32 offset="0x0064" name="CP_CTRL_MODE1"/>
+ <reg32 offset="0x0068" name="PLL_RCTRL_MODE0"/>
+ <reg32 offset="0x006C" name="PLL_RCTRL_MODE1"/>
+ <reg32 offset="0x0070" name="PLL_CCTRL_MODE0"/>
+ <reg32 offset="0x0074" name="PLL_CCTRL_MODE1"/>
+ <reg32 offset="0x0078" name="PLL_CNTRL"/>
+ <reg32 offset="0x007C" name="BIAS_EN_CTRL_BY_PSM"/>
+ <reg32 offset="0x0080" name="SYSCLK_EN_SEL"/>
+ <reg32 offset="0x0084" name="CML_SYSCLK_SEL"/>
+ <reg32 offset="0x0088" name="RESETSM_CNTRL"/>
+ <reg32 offset="0x008C" name="RESETSM_CNTRL2"/>
+ <reg32 offset="0x0090" name="LOCK_CMP_EN"/>
+ <reg32 offset="0x0094" name="LOCK_CMP_CFG"/>
+ <reg32 offset="0x0098" name="LOCK_CMP1_MODE0"/>
+ <reg32 offset="0x009C" name="LOCK_CMP2_MODE0"/>
+ <reg32 offset="0x00A0" name="LOCK_CMP3_MODE0"/>
+ <reg32 offset="0x00B0" name="DEC_START_MODE0"/>
+ <reg32 offset="0x00B4" name="DEC_START_MODE1"/>
+ <reg32 offset="0x00B8" name="DIV_FRAC_START1_MODE0"/>
+ <reg32 offset="0x00BC" name="DIV_FRAC_START2_MODE0"/>
+ <reg32 offset="0x00C0" name="DIV_FRAC_START3_MODE0"/>
+ <reg32 offset="0x00C4" name="DIV_FRAC_START1_MODE1"/>
+ <reg32 offset="0x00C8" name="DIV_FRAC_START2_MODE1"/>
+ <reg32 offset="0x00CC" name="DIV_FRAC_START3_MODE1"/>
+ <reg32 offset="0x00D0" name="INTEGLOOP_INITVAL"/>
+ <reg32 offset="0x00D4" name="INTEGLOOP_EN"/>
+ <reg32 offset="0x00D8" name="INTEGLOOP_GAIN0_MODE0"/>
+ <reg32 offset="0x00DC" name="INTEGLOOP_GAIN1_MODE0"/>
+ <reg32 offset="0x00E0" name="INTEGLOOP_GAIN0_MODE1"/>
+ <reg32 offset="0x00E4" name="INTEGLOOP_GAIN1_MODE1"/>
+ <reg32 offset="0x00E8" name="VCOCAL_DEADMAN_CTRL"/>
+ <reg32 offset="0x00EC" name="VCO_TUNE_CTRL"/>
+ <reg32 offset="0x00F0" name="VCO_TUNE_MAP"/>
+ <reg32 offset="0x0124" name="CMN_STATUS"/>
+ <reg32 offset="0x0128" name="RESET_SM_STATUS"/>
+ <reg32 offset="0x0138" name="CLK_SEL"/>
+ <reg32 offset="0x013C" name="HSCLK_SEL"/>
+ <reg32 offset="0x0148" name="CORECLK_DIV_MODE0"/>
+ <reg32 offset="0x0150" name="SW_RESET"/>
+ <reg32 offset="0x0154" name="CORE_CLK_EN"/>
+ <reg32 offset="0x0158" name="C_READY_STATUS"/>
+ <reg32 offset="0x015C" name="CMN_CONFIG"/>
+ <reg32 offset="0x0164" name="SVS_MODE_CLK_SEL"/>
+</domain>
+
+<domain name="HDMI_8998_PHY_TXn" width="32">
+ <reg32 offset="0x0000" name="EMP_POST1_LVL"/>
+ <reg32 offset="0x0008" name="INTERFACE_SELECT_TX_BAND"/>
+ <reg32 offset="0x000C" name="CLKBUF_TERM_ENABLE"/>
+ <reg32 offset="0x0014" name="DRV_LVL_RES_CODE_OFFSET"/>
+ <reg32 offset="0x0018" name="DRV_LVL"/>
+ <reg32 offset="0x001C" name="LANE_CONFIG"/>
+ <reg32 offset="0x0024" name="PRE_DRIVER_1"/>
+ <reg32 offset="0x0028" name="PRE_DRIVER_2"/>
+ <reg32 offset="0x002C" name="LANE_MODE"/>
+</domain>
+
</database>
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
index 330d72b1a4af..52412965fac1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/gsp/fwsec.c
@@ -324,7 +324,7 @@ nvkm_gsp_fwsec_sb(struct nvkm_gsp *gsp)
return ret;
/* Verify. */
- err = nvkm_rd32(device, 0x001400 + (0xf * 4)) & 0x0000ffff;
+ err = nvkm_rd32(device, 0x001400 + (0x15 * 4)) & 0x0000ffff;
if (err) {
nvkm_error(subdev, "fwsec-sb: 0x%04x\n", err);
return -EIO;
diff --git a/drivers/gpu/drm/omapdrm/Kconfig b/drivers/gpu/drm/omapdrm/Kconfig
index 64e440a2649b..fbd9af758581 100644
--- a/drivers/gpu/drm/omapdrm/Kconfig
+++ b/drivers/gpu/drm/omapdrm/Kconfig
@@ -5,6 +5,8 @@ config DRM_OMAP
depends on DRM && OF
depends on ARCH_OMAP2PLUS || (COMPILE_TEST && PAGE_SIZE_LESS_THAN_64KB)
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select FB_DMAMEM_HELPERS_DEFERRED if DRM_FBDEV_EMULATION
select VIDEOMODE_HELPERS
select HDMI
diff --git a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
index c5d3ead38555..d3baccfe6286 100644
--- a/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
+++ b/drivers/gpu/drm/panel/panel-newvision-nv3052c.c
@@ -925,7 +925,7 @@ MODULE_DEVICE_TABLE(spi, nv3052c_ids);
static const struct of_device_id nv3052c_of_match[] = {
{ .compatible = "leadtek,ltk035c5444t", .data = &ltk035c5444t_panel_info },
{ .compatible = "fascontek,fs035vg158", .data = &fs035vg158_panel_info },
- { .compatible = "wl-355608-a8", .data = &wl_355608_a8_panel_info },
+ { .compatible = "anbernic,rg35xx-plus-panel", .data = &wl_355608_a8_panel_info },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, nv3052c_of_match);
diff --git a/drivers/gpu/drm/panthor/panthor_drv.c b/drivers/gpu/drm/panthor/panthor_drv.c
index 444e3bb1cfb5..0caf9e9a8c45 100644
--- a/drivers/gpu/drm/panthor/panthor_drv.c
+++ b/drivers/gpu/drm/panthor/panthor_drv.c
@@ -14,6 +14,7 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <drm/drm_auth.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_drv.h>
#include <drm/drm_exec.h>
@@ -1036,6 +1037,24 @@ static int panthor_ioctl_group_destroy(struct drm_device *ddev, void *data,
return panthor_group_destroy(pfile, args->group_handle);
}
+static int group_priority_permit(struct drm_file *file,
+ u8 priority)
+{
+ /* Ensure that priority is valid */
+ if (priority > PANTHOR_GROUP_PRIORITY_HIGH)
+ return -EINVAL;
+
+ /* Medium priority and below are always allowed */
+ if (priority <= PANTHOR_GROUP_PRIORITY_MEDIUM)
+ return 0;
+
+ /* Higher priorities require CAP_SYS_NICE or DRM_MASTER */
+ if (capable(CAP_SYS_NICE) || drm_is_current_master(file))
+ return 0;
+
+ return -EACCES;
+}
+
static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
struct drm_file *file)
{
@@ -1051,6 +1070,10 @@ static int panthor_ioctl_group_create(struct drm_device *ddev, void *data,
if (ret)
return ret;
+ ret = group_priority_permit(file, args->priority);
+ if (ret)
+ return ret;
+
ret = panthor_group_create(pfile, args, queue_args);
if (ret >= 0) {
args->group_handle = ret;
diff --git a/drivers/gpu/drm/panthor/panthor_fw.c b/drivers/gpu/drm/panthor/panthor_fw.c
index 857f3f11258a..ef232c0c2049 100644
--- a/drivers/gpu/drm/panthor/panthor_fw.c
+++ b/drivers/gpu/drm/panthor/panthor_fw.c
@@ -1089,6 +1089,12 @@ int panthor_fw_post_reset(struct panthor_device *ptdev)
panthor_fw_stop(ptdev);
ptdev->fw->fast_reset = false;
drm_err(&ptdev->base, "FW fast reset failed, trying a slow reset");
+
+ ret = panthor_vm_flush_all(ptdev->fw->vm);
+ if (ret) {
+ drm_err(&ptdev->base, "FW slow reset failed (couldn't flush FW's AS l2cache)");
+ return ret;
+ }
}
/* Reload all sections, including RO ones. We're not supposed
@@ -1099,7 +1105,7 @@ int panthor_fw_post_reset(struct panthor_device *ptdev)
ret = panthor_fw_start(ptdev);
if (ret) {
- drm_err(&ptdev->base, "FW slow reset failed");
+ drm_err(&ptdev->base, "FW slow reset failed (couldn't start the FW )");
return ret;
}
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c
index 7d0a90559182..37f1885c54c7 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.c
+++ b/drivers/gpu/drm/panthor/panthor_mmu.c
@@ -576,6 +576,12 @@ static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr,
if (as_nr < 0)
return 0;
+ /*
+ * If the AS number is greater than zero, then we can be sure
+ * the device is up and running, so we don't need to explicitly
+ * power it up
+ */
+
if (op != AS_COMMAND_UNLOCK)
lock_region(ptdev, as_nr, iova, size);
@@ -874,14 +880,23 @@ static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size)
if (!drm_dev_enter(&ptdev->base, &cookie))
return 0;
- /* Flush the PTs only if we're already awake */
- if (pm_runtime_active(ptdev->base.dev))
- ret = mmu_hw_do_operation(vm, iova, size, AS_COMMAND_FLUSH_PT);
+ ret = mmu_hw_do_operation(vm, iova, size, AS_COMMAND_FLUSH_PT);
drm_dev_exit(cookie);
return ret;
}
+/**
+ * panthor_vm_flush_all() - Flush L2 caches for the entirety of a VM's AS
+ * @vm: VM whose cache to flush
+ *
+ * Return: 0 on success, a negative error code if flush failed.
+ */
+int panthor_vm_flush_all(struct panthor_vm *vm)
+{
+ return panthor_vm_flush_range(vm, vm->base.mm_start, vm->base.mm_range);
+}
+
static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size)
{
struct panthor_device *ptdev = vm->ptdev;
diff --git a/drivers/gpu/drm/panthor/panthor_mmu.h b/drivers/gpu/drm/panthor/panthor_mmu.h
index f3c1ed19f973..6788771071e3 100644
--- a/drivers/gpu/drm/panthor/panthor_mmu.h
+++ b/drivers/gpu/drm/panthor/panthor_mmu.h
@@ -31,6 +31,7 @@ panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset);
int panthor_vm_active(struct panthor_vm *vm);
void panthor_vm_idle(struct panthor_vm *vm);
int panthor_vm_as(struct panthor_vm *vm);
+int panthor_vm_flush_all(struct panthor_vm *vm);
struct panthor_heap_pool *
panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create);
diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c
index d3246f7d9591..42afdf0ddb7e 100644
--- a/drivers/gpu/drm/panthor/panthor_sched.c
+++ b/drivers/gpu/drm/panthor/panthor_sched.c
@@ -3092,7 +3092,7 @@ int panthor_group_create(struct panthor_file *pfile,
if (group_args->pad)
return -EINVAL;
- if (group_args->priority > PANTHOR_CSG_PRIORITY_HIGH)
+ if (group_args->priority >= PANTHOR_CSG_PRIORITY_COUNT)
return -EINVAL;
if ((group_args->compute_core_mask & ~ptdev->gpu_info.shader_present) ||
diff --git a/drivers/gpu/drm/renesas/rcar-du/Kconfig b/drivers/gpu/drm/renesas/rcar-du/Kconfig
index be86ecb9f559..e1f41468a9a6 100644
--- a/drivers/gpu/drm/renesas/rcar-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rcar-du/Kconfig
@@ -5,6 +5,8 @@ config DRM_RCAR_DU
depends on ARM || ARM64 || COMPILE_TEST
depends on ARCH_RENESAS || COMPILE_TEST
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
help
diff --git a/drivers/gpu/drm/renesas/rz-du/Kconfig b/drivers/gpu/drm/renesas/rz-du/Kconfig
index 8ec14271ebba..89bdb598e0ae 100644
--- a/drivers/gpu/drm/renesas/rz-du/Kconfig
+++ b/drivers/gpu/drm/renesas/rz-du/Kconfig
@@ -6,6 +6,8 @@ config DRM_RZG2L_DU
depends on VIDEO_RENESAS_VSP1
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select VIDEOMODE_HELPERS
help
Choose this option if you have an RZ/G2L alike chipset.
diff --git a/drivers/gpu/drm/renesas/shmobile/Kconfig b/drivers/gpu/drm/renesas/shmobile/Kconfig
index 027220b8fe1c..c329ab8a7a8b 100644
--- a/drivers/gpu/drm/renesas/shmobile/Kconfig
+++ b/drivers/gpu/drm/renesas/shmobile/Kconfig
@@ -5,6 +5,8 @@ config DRM_SHMOBILE
depends on ARCH_RENESAS || ARCH_SHMOBILE || COMPILE_TEST
select BACKLIGHT_CLASS_DEVICE
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
select VIDEOMODE_HELPERS
help
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 7df875e38517..23c49e91f1cc 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -86,6 +86,8 @@ config ROCKCHIP_LVDS
bool "Rockchip LVDS support"
depends on DRM_ROCKCHIP
depends on PINCTRL && OF
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
help
Choose this option to enable support for Rockchip LVDS controllers.
Rockchip rk3288 SoC has LVDS TX Controller can be used, and it
@@ -96,6 +98,8 @@ config ROCKCHIP_RGB
bool "Rockchip RGB support"
depends on DRM_ROCKCHIP
depends on PINCTRL
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
help
Choose this option to enable support for Rockchip RGB output.
Some Rockchip CRTCs, like rv1108, can directly output parallel
diff --git a/drivers/gpu/drm/tegra/Kconfig b/drivers/gpu/drm/tegra/Kconfig
index 782f51d3044a..e688d8104652 100644
--- a/drivers/gpu/drm/tegra/Kconfig
+++ b/drivers/gpu/drm/tegra/Kconfig
@@ -8,6 +8,7 @@ config DRM_TEGRA
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HDMI_HELPER
select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_DISPLAY_DP_AUX_BUS
select DRM_KMS_HELPER
select DRM_MIPI_DSI
diff --git a/drivers/gpu/drm/tidss/Kconfig b/drivers/gpu/drm/tidss/Kconfig
index 378600806167..2385c56493b9 100644
--- a/drivers/gpu/drm/tidss/Kconfig
+++ b/drivers/gpu/drm/tidss/Kconfig
@@ -3,6 +3,8 @@ config DRM_TIDSS
depends on DRM && OF
depends on ARM || ARM64 || COMPILE_TEST
select DRM_KMS_HELPER
+ select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
help
The TI Keystone family SoCs introduced a new generation of
diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c
index 090bbaebb496..f58c38d7de29 100644
--- a/drivers/gpu/drm/v3d/v3d_sched.c
+++ b/drivers/gpu/drm/v3d/v3d_sched.c
@@ -136,6 +136,8 @@ v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
struct v3d_stats *local_stats = &file->stats[queue];
u64 now = local_clock();
+ preempt_disable();
+
write_seqcount_begin(&local_stats->lock);
local_stats->start_ns = now;
write_seqcount_end(&local_stats->lock);
@@ -143,6 +145,8 @@ v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue)
write_seqcount_begin(&global_stats->lock);
global_stats->start_ns = now;
write_seqcount_end(&global_stats->lock);
+
+ preempt_enable();
}
static void
@@ -164,8 +168,10 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue)
struct v3d_stats *local_stats = &file->stats[queue];
u64 now = local_clock();
+ preempt_disable();
v3d_stats_update(local_stats, now);
v3d_stats_update(global_stats, now);
+ preempt_enable();
}
static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index 717d624e9a05..890a66a2361f 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -27,6 +27,8 @@
**************************************************************************/
#include "vmwgfx_drv.h"
+
+#include "vmwgfx_bo.h"
#include <linux/highmem.h>
/*
@@ -420,13 +422,105 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
return 0;
}
+static void *map_external(struct vmw_bo *bo, struct iosys_map *map)
+{
+ struct vmw_private *vmw =
+ container_of(bo->tbo.bdev, struct vmw_private, bdev);
+ void *ptr = NULL;
+ int ret;
+
+ if (bo->tbo.base.import_attach) {
+ ret = dma_buf_vmap(bo->tbo.base.dma_buf, map);
+ if (ret) {
+ drm_dbg_driver(&vmw->drm,
+ "Wasn't able to map external bo!\n");
+ goto out;
+ }
+ ptr = map->vaddr;
+ } else {
+ ptr = vmw_bo_map_and_cache(bo);
+ }
+
+out:
+ return ptr;
+}
+
+static void unmap_external(struct vmw_bo *bo, struct iosys_map *map)
+{
+ if (bo->tbo.base.import_attach)
+ dma_buf_vunmap(bo->tbo.base.dma_buf, map);
+ else
+ vmw_bo_unmap(bo);
+}
+
+static int vmw_external_bo_copy(struct vmw_bo *dst, u32 dst_offset,
+ u32 dst_stride, struct vmw_bo *src,
+ u32 src_offset, u32 src_stride,
+ u32 width_in_bytes, u32 height,
+ struct vmw_diff_cpy *diff)
+{
+ struct vmw_private *vmw =
+ container_of(dst->tbo.bdev, struct vmw_private, bdev);
+ size_t dst_size = dst->tbo.resource->size;
+ size_t src_size = src->tbo.resource->size;
+ struct iosys_map dst_map = {0};
+ struct iosys_map src_map = {0};
+ int ret, i;
+ int x_in_bytes;
+ u8 *vsrc;
+ u8 *vdst;
+
+ vsrc = map_external(src, &src_map);
+ if (!vsrc) {
+ drm_dbg_driver(&vmw->drm, "Wasn't able to map src\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ vdst = map_external(dst, &dst_map);
+ if (!vdst) {
+ drm_dbg_driver(&vmw->drm, "Wasn't able to map dst\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ vsrc += src_offset;
+ vdst += dst_offset;
+ if (src_stride == dst_stride) {
+ dst_size -= dst_offset;
+ src_size -= src_offset;
+ memcpy(vdst, vsrc,
+ min(dst_stride * height, min(dst_size, src_size)));
+ } else {
+ WARN_ON(dst_stride < width_in_bytes);
+ for (i = 0; i < height; ++i) {
+ memcpy(vdst, vsrc, width_in_bytes);
+ vsrc += src_stride;
+ vdst += dst_stride;
+ }
+ }
+
+ x_in_bytes = (dst_offset % dst_stride);
+ diff->rect.x1 = x_in_bytes / diff->cpp;
+ diff->rect.y1 = ((dst_offset - x_in_bytes) / dst_stride);
+ diff->rect.x2 = diff->rect.x1 + width_in_bytes / diff->cpp;
+ diff->rect.y2 = diff->rect.y1 + height;
+
+ ret = 0;
+out:
+ unmap_external(src, &src_map);
+ unmap_external(dst, &dst_map);
+
+ return ret;
+}
+
/**
* vmw_bo_cpu_blit - in-kernel cpu blit.
*
- * @dst: Destination buffer object.
+ * @vmw_dst: Destination buffer object.
* @dst_offset: Destination offset of blit start in bytes.
* @dst_stride: Destination stride in bytes.
- * @src: Source buffer object.
+ * @vmw_src: Source buffer object.
* @src_offset: Source offset of blit start in bytes.
* @src_stride: Source stride in bytes.
* @w: Width of blit.
@@ -444,13 +538,15 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
* Neither of the buffer objects may be placed in PCI memory
* (Fixed memory in TTM terminology) when using this function.
*/
-int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
+int vmw_bo_cpu_blit(struct vmw_bo *vmw_dst,
u32 dst_offset, u32 dst_stride,
- struct ttm_buffer_object *src,
+ struct vmw_bo *vmw_src,
u32 src_offset, u32 src_stride,
u32 w, u32 h,
struct vmw_diff_cpy *diff)
{
+ struct ttm_buffer_object *src = &vmw_src->tbo;
+ struct ttm_buffer_object *dst = &vmw_dst->tbo;
struct ttm_operation_ctx ctx = {
.interruptible = false,
.no_wait_gpu = false
@@ -460,6 +556,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
int ret = 0;
struct page **dst_pages = NULL;
struct page **src_pages = NULL;
+ bool src_external = (src->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
+ bool dst_external = (dst->ttm->page_flags & TTM_TT_FLAG_EXTERNAL) != 0;
+
+ if (WARN_ON(dst == src))
+ return -EINVAL;
/* Buffer objects need to be either pinned or reserved: */
if (!(dst->pin_count))
@@ -479,6 +580,11 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
return ret;
}
+ if (src_external || dst_external)
+ return vmw_external_bo_copy(vmw_dst, dst_offset, dst_stride,
+ vmw_src, src_offset, src_stride,
+ w, h, diff);
+
if (!src->ttm->pages && src->ttm->sg) {
src_pages = kvmalloc_array(src->ttm->num_pages,
sizeof(struct page *), GFP_KERNEL);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
index f42ebc4a7c22..a0e433fbcba6 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c
@@ -360,6 +360,8 @@ void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
void *virtual;
int ret;
+ atomic_inc(&vbo->map_count);
+
virtual = ttm_kmap_obj_virtual(&vbo->map, &not_used);
if (virtual)
return virtual;
@@ -383,11 +385,17 @@ void *vmw_bo_map_and_cache_size(struct vmw_bo *vbo, size_t size)
*/
void vmw_bo_unmap(struct vmw_bo *vbo)
{
+ int map_count;
+
if (vbo->map.bo == NULL)
return;
- ttm_bo_kunmap(&vbo->map);
- vbo->map.bo = NULL;
+ map_count = atomic_dec_return(&vbo->map_count);
+
+ if (!map_count) {
+ ttm_bo_kunmap(&vbo->map);
+ vbo->map.bo = NULL;
+ }
}
@@ -421,6 +429,7 @@ static int vmw_bo_init(struct vmw_private *dev_priv,
vmw_bo->tbo.priority = 3;
vmw_bo->res_tree = RB_ROOT;
xa_init(&vmw_bo->detached_resources);
+ atomic_set(&vmw_bo->map_count, 0);
params->size = ALIGN(params->size, PAGE_SIZE);
drm_gem_private_object_init(vdev, &vmw_bo->tbo.base, params->size);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
index 62b4342d5f7c..43b5439ec9f7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.h
@@ -71,6 +71,8 @@ struct vmw_bo_params {
* @map: Kmap object for semi-persistent mappings
* @res_tree: RB tree of resources using this buffer object as a backing MOB
* @res_prios: Eviction priority counts for attached resources
+ * @map_count: The number of currently active maps. Will differ from the
+ * cpu_writers because it includes kernel maps.
* @cpu_writers: Number of synccpu write grabs. Protected by reservation when
* increased. May be decreased without reservation.
* @dx_query_ctx: DX context if this buffer object is used as a DX query MOB
@@ -90,6 +92,7 @@ struct vmw_bo {
u32 res_prios[TTM_MAX_BO_PRIORITY];
struct xarray detached_resources;
+ atomic_t map_count;
atomic_t cpu_writers;
/* Not ref-counted. Protected by binding_mutex */
struct vmw_resource *dx_query_ctx;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 32f50e595809..3f4719b3c268 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -1353,9 +1353,9 @@ void vmw_diff_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src,
void vmw_memcpy(struct vmw_diff_cpy *diff, u8 *dest, const u8 *src, size_t n);
-int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
+int vmw_bo_cpu_blit(struct vmw_bo *dst,
u32 dst_offset, u32 dst_stride,
- struct ttm_buffer_object *src,
+ struct vmw_bo *src,
u32 src_offset, u32 src_stride,
u32 w, u32 h,
struct vmw_diff_cpy *diff);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 5453f7cf0e2d..fab155a68054 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -502,7 +502,7 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
container_of(dirty->unit, typeof(*stdu), base);
s32 width, height;
s32 src_pitch, dst_pitch;
- struct ttm_buffer_object *src_bo, *dst_bo;
+ struct vmw_bo *src_bo, *dst_bo;
u32 src_offset, dst_offset;
struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(stdu->cpp);
@@ -517,11 +517,11 @@ static void vmw_stdu_bo_cpu_commit(struct vmw_kms_dirty *dirty)
/* Assume we are blitting from Guest (bo) to Host (display_srf) */
src_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
- src_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
+ src_bo = stdu->display_srf->res.guest_memory_bo;
src_offset = ddirty->top * src_pitch + ddirty->left * stdu->cpp;
dst_pitch = ddirty->pitch;
- dst_bo = &ddirty->buf->tbo;
+ dst_bo = ddirty->buf;
dst_offset = ddirty->fb_top * dst_pitch + ddirty->fb_left * stdu->cpp;
(void) vmw_bo_cpu_blit(dst_bo, dst_offset, dst_pitch,
@@ -1170,7 +1170,7 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd,
struct vmw_diff_cpy diff = VMW_CPU_BLIT_DIFF_INITIALIZER(0);
struct vmw_stdu_update_gb_image *cmd_img = cmd;
struct vmw_stdu_update *cmd_update;
- struct ttm_buffer_object *src_bo, *dst_bo;
+ struct vmw_bo *src_bo, *dst_bo;
u32 src_offset, dst_offset;
s32 src_pitch, dst_pitch;
s32 width, height;
@@ -1184,11 +1184,11 @@ vmw_stdu_bo_populate_update_cpu(struct vmw_du_update_plane *update, void *cmd,
diff.cpp = stdu->cpp;
- dst_bo = &stdu->display_srf->res.guest_memory_bo->tbo;
+ dst_bo = stdu->display_srf->res.guest_memory_bo;
dst_pitch = stdu->display_srf->metadata.base_size.width * stdu->cpp;
dst_offset = bb->y1 * dst_pitch + bb->x1 * stdu->cpp;
- src_bo = &vfbbo->buffer->tbo;
+ src_bo = vfbbo->buffer;
src_pitch = update->vfb->base.pitches[0];
src_offset = bo_update->fb_top * src_pitch + bo_update->fb_left *
stdu->cpp;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 8ae6a761c900..1625b30d9970 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -2283,9 +2283,11 @@ int vmw_dumb_create(struct drm_file *file_priv,
/*
* Without mob support we're just going to use raw memory buffer
* because we wouldn't be able to support full surface coherency
- * without mobs
+ * without mobs. There also no reason to support surface coherency
+ * without 3d (i.e. gpu usage on the host) because then all the
+ * contents is going to be rendered guest side.
*/
- if (!dev_priv->has_mob) {
+ if (!dev_priv->has_mob || !vmw_supports_3d(dev_priv)) {
int cpp = DIV_ROUND_UP(args->bpp, 8);
switch (cpp) {
diff --git a/drivers/gpu/drm/xe/Makefile b/drivers/gpu/drm/xe/Makefile
index b9670ae09a9e..edfd812e0f41 100644
--- a/drivers/gpu/drm/xe/Makefile
+++ b/drivers/gpu/drm/xe/Makefile
@@ -40,6 +40,7 @@ xe-y += xe_bb.o \
xe_ggtt.o \
xe_gpu_scheduler.o \
xe_gsc.o \
+ xe_gsc_debugfs.o \
xe_gsc_proxy.o \
xe_gsc_submit.o \
xe_gt.o \
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
index b7b12b20e390..f27a2c75b56d 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h
@@ -21,11 +21,6 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
return container_of(dev, struct drm_i915_private, drm);
}
-static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
-{
- return dev_get_drvdata(kdev);
-}
-
#define IS_PLATFORM(xe, x) ((xe)->info.platform == x)
#define INTEL_INFO(dev_priv) (&((dev_priv)->info))
#define IS_I830(dev_priv) (dev_priv && 0)
@@ -80,9 +75,9 @@ static inline struct drm_i915_private *kdev_to_i915(struct device *kdev)
#define IS_MOBILE(xe) (xe && 0)
-#define IS_LP(xe) (0)
-#define IS_GEN9_LP(xe) (0)
-#define IS_GEN9_BC(xe) (0)
+#define IS_LP(xe) ((xe) && 0)
+#define IS_GEN9_LP(xe) ((xe) && 0)
+#define IS_GEN9_BC(xe) ((xe) && 0)
#define IS_TIGERLAKE_UY(xe) (xe && 0)
#define IS_COMETLAKE_ULX(xe) (xe && 0)
@@ -110,8 +105,6 @@ struct i915_sched_attr {
};
#define i915_gem_fence_wait_priority(fence, attr) do { (void) attr; } while (0)
-#define pdev_to_i915 pdev_to_xe_device
-
#define FORCEWAKE_ALL XE_FORCEWAKE_ALL
#ifdef CONFIG_ARM64
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
index 0c47661bdc6a..a473aa6697d0 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_pcode.h
@@ -13,7 +13,7 @@ static inline int
snb_pcode_write_timeout(struct intel_uncore *uncore, u32 mbox, u32 val,
int fast_timeout_us, int slow_timeout_ms)
{
- return xe_pcode_write_timeout(__compat_uncore_to_gt(uncore), mbox, val,
+ return xe_pcode_write_timeout(__compat_uncore_to_tile(uncore), mbox, val,
slow_timeout_ms ?: 1);
}
@@ -21,13 +21,13 @@ static inline int
snb_pcode_write(struct intel_uncore *uncore, u32 mbox, u32 val)
{
- return xe_pcode_write(__compat_uncore_to_gt(uncore), mbox, val);
+ return xe_pcode_write(__compat_uncore_to_tile(uncore), mbox, val);
}
static inline int
snb_pcode_read(struct intel_uncore *uncore, u32 mbox, u32 *val, u32 *val1)
{
- return xe_pcode_read(__compat_uncore_to_gt(uncore), mbox, val, val1);
+ return xe_pcode_read(__compat_uncore_to_tile(uncore), mbox, val, val1);
}
static inline int
@@ -35,7 +35,7 @@ skl_pcode_request(struct intel_uncore *uncore, u32 mbox,
u32 request, u32 reply_mask, u32 reply,
int timeout_base_ms)
{
- return xe_pcode_request(__compat_uncore_to_gt(uncore), mbox, request, reply_mask, reply,
+ return xe_pcode_request(__compat_uncore_to_tile(uncore), mbox, request, reply_mask, reply,
timeout_base_ms);
}
diff --git a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
index 083c4da2ea41..eb5b5f0e4bd9 100644
--- a/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
+++ b/drivers/gpu/drm/xe/compat-i915-headers/intel_uncore.h
@@ -17,6 +17,13 @@ static inline struct xe_gt *__compat_uncore_to_gt(struct intel_uncore *uncore)
return xe_root_mmio_gt(xe);
}
+static inline struct xe_tile *__compat_uncore_to_tile(struct intel_uncore *uncore)
+{
+ struct xe_device *xe = container_of(uncore, struct xe_device, uncore);
+
+ return xe_device_get_root_tile(xe);
+}
+
static inline u32 intel_uncore_read(struct intel_uncore *uncore,
i915_reg_t i915_reg)
{
diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
index cd8948c08661..99499d6c0256 100644
--- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
+++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c
@@ -8,7 +8,6 @@
#include "intel_display_types.h"
#include "intel_fbdev_fb.h"
#include "xe_bo.h"
-#include "xe_gt.h"
#include "xe_ttm_stolen_mgr.h"
#include "xe_wa.h"
diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c
index 78a884ddd499..75736faf2a80 100644
--- a/drivers/gpu/drm/xe/display/xe_display.c
+++ b/drivers/gpu/drm/xe/display/xe_display.c
@@ -10,7 +10,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "soc/intel_dram.h"
#include "i915_drv.h" /* FIXME: HAS_DISPLAY() depends on this */
@@ -341,16 +341,14 @@ void xe_display_pm_suspend(struct xe_device *xe, bool runtime)
xe_display_flush_cleanup_work(xe);
- xe_display_flush_cleanup_work(xe);
-
intel_dp_mst_suspend(xe);
intel_hpd_cancel_work(xe);
- if (!runtime && has_display(xe))
+ if (!runtime && has_display(xe)) {
intel_display_driver_suspend_access(xe);
-
- intel_encoder_suspend_all(&xe->display);
+ intel_encoder_suspend_all(&xe->display);
+ }
intel_opregion_suspend(display, s2idle ? PCI_D1 : PCI_D3cold);
diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
index ccd0d87d438a..f99d901a3214 100644
--- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
+++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c
@@ -9,7 +9,6 @@
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_device_types.h"
-#include "xe_gt.h"
u32 intel_dsb_buffer_ggtt_offset(struct intel_dsb_buffer *dsb_buf)
{
diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c
index d650c5ac41a4..b58fc4ba2aac 100644
--- a/drivers/gpu/drm/xe/display/xe_fb_pin.c
+++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c
@@ -12,7 +12,6 @@
#include "xe_bo.h"
#include "xe_device.h"
#include "xe_ggtt.h"
-#include "xe_gt.h"
#include "xe_pm.h"
static void
diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
index 0af667ebebf9..6619a40aed15 100644
--- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
+++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c
@@ -16,7 +16,6 @@
#include "xe_force_wake.h"
#include "xe_gsc_proxy.h"
#include "xe_gsc_submit.h"
-#include "xe_gt.h"
#include "xe_map.h"
#include "xe_pm.h"
#include "xe_uc_fw.h"
diff --git a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
index e2a925be137c..7702364b65f1 100644
--- a/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_gsc_regs.h
@@ -32,8 +32,12 @@
#define HECI1_FWSTS1_CURRENT_STATE_RESET 0
#define HECI1_FWSTS1_PROXY_STATE_NORMAL 5
#define HECI1_FWSTS1_INIT_COMPLETE REG_BIT(9)
+#define HECI_FWSTS2(base) XE_REG((base) + 0xc48)
+#define HECI_FWSTS3(base) XE_REG((base) + 0xc60)
+#define HECI_FWSTS4(base) XE_REG((base) + 0xc64)
#define HECI_FWSTS5(base) XE_REG((base) + 0xc68)
#define HECI1_FWSTS5_HUC_AUTH_DONE REG_BIT(19)
+#define HECI_FWSTS6(base) XE_REG((base) + 0xc6c)
#define HECI_H_GS1(base) XE_REG((base) + 0xc4c)
#define HECI_H_GS1_ER_PREP REG_BIT(0)
diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
index 13db6c0530b3..cedd3e88a6fb 100644
--- a/drivers/gpu/drm/xe/tests/xe_dma_buf.c
+++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c
@@ -3,7 +3,7 @@
* Copyright © 2022 Intel Corporation
*/
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <kunit/test.h>
#include <kunit/visibility.h>
diff --git a/drivers/gpu/drm/xe/xe_assert.h b/drivers/gpu/drm/xe/xe_assert.h
index 8b0cc1bc9327..e22bbf57fca7 100644
--- a/drivers/gpu/drm/xe/xe_assert.h
+++ b/drivers/gpu/drm/xe/xe_assert.h
@@ -81,7 +81,7 @@
#if IS_ENABLED(CONFIG_DRM_XE_DEBUG)
#define __xe_assert_msg(xe, condition, msg, arg...) ({ \
- (void)drm_WARN(&(xe)->drm, !(condition), "[" DRM_NAME "] Assertion `%s` failed!\n" msg, \
+ (void)drm_WARN(&(xe)->drm, !(condition), "Assertion `%s` failed!\n" msg, \
__stringify(condition), ## arg); \
})
#else
diff --git a/drivers/gpu/drm/xe/xe_bo.c b/drivers/gpu/drm/xe/xe_bo.c
index 25d0c939ba31..06911e9a3bf5 100644
--- a/drivers/gpu/drm/xe/xe_bo.c
+++ b/drivers/gpu/drm/xe/xe_bo.c
@@ -13,7 +13,7 @@
#include <drm/ttm/ttm_device.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_tt.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_device.h"
#include "xe_dma_buf.h"
@@ -758,7 +758,16 @@ static int xe_bo_move(struct ttm_buffer_object *ttm_bo, bool evict,
xe_assert(xe, migrate);
trace_xe_bo_move(bo, new_mem->mem_type, old_mem_type, move_lacks_source);
- xe_pm_runtime_get_noresume(xe);
+ if (xe_rpm_reclaim_safe(xe)) {
+ /*
+ * We might be called through swapout in the validation path of
+ * another TTM device, so unconditionally acquire rpm here.
+ */
+ xe_pm_runtime_get(xe);
+ } else {
+ drm_WARN_ON(&xe->drm, handle_system_ccs);
+ xe_pm_runtime_get_noresume(xe);
+ }
if (xe_bo_is_pinned(bo) && !xe_bo_is_user(bo)) {
/*
diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c
index b6db7e082d88..1a0d7fdd094b 100644
--- a/drivers/gpu/drm/xe/xe_device.c
+++ b/drivers/gpu/drm/xe/xe_device.c
@@ -15,7 +15,7 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_managed.h>
#include <drm/drm_print.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "display/xe_display.h"
#include "instructions/xe_gpu_commands.h"
diff --git a/drivers/gpu/drm/xe/xe_device.h b/drivers/gpu/drm/xe/xe_device.h
index f052c06a2d2f..894f04770454 100644
--- a/drivers/gpu/drm/xe/xe_device.h
+++ b/drivers/gpu/drm/xe/xe_device.h
@@ -17,12 +17,16 @@ static inline struct xe_device *to_xe_device(const struct drm_device *dev)
static inline struct xe_device *kdev_to_xe_device(struct device *kdev)
{
- return dev_get_drvdata(kdev);
+ struct drm_device *drm = dev_get_drvdata(kdev);
+
+ return drm ? to_xe_device(drm) : NULL;
}
static inline struct xe_device *pdev_to_xe_device(struct pci_dev *pdev)
{
- return pci_get_drvdata(pdev);
+ struct drm_device *drm = pci_get_drvdata(pdev);
+
+ return drm ? to_xe_device(drm) : NULL;
}
static inline struct xe_device *xe_device_const_cast(const struct xe_device *xe)
diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h
index e73fb0c23932..ec7eb7811126 100644
--- a/drivers/gpu/drm/xe/xe_device_types.h
+++ b/drivers/gpu/drm/xe/xe_device_types.h
@@ -208,6 +208,12 @@ struct xe_tile {
} vf;
} sriov;
+ /** @pcode: tile's PCODE */
+ struct {
+ /** @pcode.lock: protecting tile's PCODE mailbox data */
+ struct mutex lock;
+ } pcode;
+
/** @migrate: Migration helper for vram blits and clearing */
struct xe_migrate *migrate;
diff --git a/drivers/gpu/drm/xe/xe_drm_client.c b/drivers/gpu/drm/xe/xe_drm_client.c
index 7ddd59908334..e64f4b645e2e 100644
--- a/drivers/gpu/drm/xe/xe_drm_client.c
+++ b/drivers/gpu/drm/xe/xe_drm_client.c
@@ -5,7 +5,7 @@
#include "xe_drm_client.h"
#include <drm/drm_print.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/types.h>
diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c
index 484acfbe0e61..7b38485817dc 100644
--- a/drivers/gpu/drm/xe/xe_exec.c
+++ b/drivers/gpu/drm/xe/xe_exec.c
@@ -8,7 +8,7 @@
#include <drm/drm_device.h>
#include <drm/drm_exec.h>
#include <drm/drm_file.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <linux/delay.h>
#include "xe_bo.h"
diff --git a/drivers/gpu/drm/xe/xe_exec_queue.c b/drivers/gpu/drm/xe/xe_exec_queue.c
index e53937fafd14..5a9cbc97f0be 100644
--- a/drivers/gpu/drm/xe/xe_exec_queue.c
+++ b/drivers/gpu/drm/xe/xe_exec_queue.c
@@ -9,7 +9,7 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_device.h"
#include "xe_gt.h"
diff --git a/drivers/gpu/drm/xe/xe_execlist.c b/drivers/gpu/drm/xe/xe_execlist.c
index 7502e3486eaf..6a59165b9569 100644
--- a/drivers/gpu/drm/xe/xe_execlist.c
+++ b/drivers/gpu/drm/xe/xe_execlist.c
@@ -123,8 +123,8 @@ static void __xe_execlist_port_idle(struct xe_execlist_port *port)
if (!port->running_exl)
return;
- xe_lrc_write_ring(port->hwe->kernel_lrc, noop, sizeof(noop));
- __start_lrc(port->hwe, port->hwe->kernel_lrc, 0);
+ xe_lrc_write_ring(port->lrc, noop, sizeof(noop));
+ __start_lrc(port->hwe, port->lrc, 0);
port->running_exl = NULL;
}
@@ -254,14 +254,22 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
{
struct drm_device *drm = &xe->drm;
struct xe_execlist_port *port;
- int i;
+ int i, err;
port = drmm_kzalloc(drm, sizeof(*port), GFP_KERNEL);
- if (!port)
- return ERR_PTR(-ENOMEM);
+ if (!port) {
+ err = -ENOMEM;
+ goto err;
+ }
port->hwe = hwe;
+ port->lrc = xe_lrc_create(hwe, NULL, SZ_16K);
+ if (IS_ERR(port->lrc)) {
+ err = PTR_ERR(port->lrc);
+ goto err;
+ }
+
spin_lock_init(&port->lock);
for (i = 0; i < ARRAY_SIZE(port->active); i++)
INIT_LIST_HEAD(&port->active[i]);
@@ -277,6 +285,9 @@ struct xe_execlist_port *xe_execlist_port_create(struct xe_device *xe,
add_timer(&port->irq_fail);
return port;
+
+err:
+ return ERR_PTR(err);
}
void xe_execlist_port_destroy(struct xe_execlist_port *port)
@@ -287,6 +298,8 @@ void xe_execlist_port_destroy(struct xe_execlist_port *port)
spin_lock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
port->hwe->irq_handler = NULL;
spin_unlock_irq(&gt_to_xe(port->hwe->gt)->irq.lock);
+
+ xe_lrc_put(port->lrc);
}
static struct dma_fence *
diff --git a/drivers/gpu/drm/xe/xe_execlist_types.h b/drivers/gpu/drm/xe/xe_execlist_types.h
index f94bbf4c53e4..415140936f11 100644
--- a/drivers/gpu/drm/xe/xe_execlist_types.h
+++ b/drivers/gpu/drm/xe/xe_execlist_types.h
@@ -27,6 +27,8 @@ struct xe_execlist_port {
struct xe_execlist_exec_queue *running_exl;
struct timer_list irq_fail;
+
+ struct xe_lrc *lrc;
};
struct xe_execlist_exec_queue {
diff --git a/drivers/gpu/drm/xe/xe_gsc.c b/drivers/gpu/drm/xe/xe_gsc.c
index 8a137cb83318..6fbea70d3d36 100644
--- a/drivers/gpu/drm/xe/xe_gsc.c
+++ b/drivers/gpu/drm/xe/xe_gsc.c
@@ -8,6 +8,7 @@
#include <linux/delay.h>
#include <drm/drm_managed.h>
+#include <drm/drm_print.h>
#include <generated/xe_wa_oob.h>
@@ -165,10 +166,11 @@ static int query_compatibility_version(struct xe_gsc *gsc)
return err;
}
- compat->major = version_query_rd(xe, &bo->vmap, rd_offset, compat_major);
- compat->minor = version_query_rd(xe, &bo->vmap, rd_offset, compat_minor);
+ compat->major = version_query_rd(xe, &bo->vmap, rd_offset, proj_major);
+ compat->minor = version_query_rd(xe, &bo->vmap, rd_offset, compat_major);
+ compat->patch = version_query_rd(xe, &bo->vmap, rd_offset, compat_minor);
- xe_gt_info(gt, "found GSC cv%u.%u\n", compat->major, compat->minor);
+ xe_gt_info(gt, "found GSC cv%u.%u.%u\n", compat->major, compat->minor, compat->patch);
out_bo:
xe_bo_unpin_map_no_vm(bo);
@@ -333,9 +335,11 @@ static int gsc_er_complete(struct xe_gt *gt)
if (er_status == GSCI_TIMER_STATUS_TIMER_EXPIRED) {
/*
* XXX: we should trigger an FLR here, but we don't have support
- * for that yet.
+ * for that yet. Since we can't recover from the error, we
+ * declare the device as wedged.
*/
xe_gt_err(gt, "GSC ER timed out!\n");
+ xe_device_declare_wedged(gt_to_xe(gt));
return -EIO;
}
@@ -513,13 +517,28 @@ out_bo:
void xe_gsc_load_start(struct xe_gsc *gsc)
{
struct xe_gt *gt = gsc_to_gt(gsc);
+ struct xe_device *xe = gt_to_xe(gt);
if (!xe_uc_fw_is_loadable(&gsc->fw) || !gsc->q)
return;
+ /*
+ * The GSC HW is only reset by driver FLR or D3cold entry. We don't
+ * support the former at runtime, while the latter is only supported on
+ * DGFX, for which we don't support GSC. Therefore, if GSC failed to
+ * load previously there is no need to try again because the HW is
+ * stuck in the error state.
+ */
+ xe_assert(xe, !IS_DGFX(xe));
+ if (xe_uc_fw_is_in_error_state(&gsc->fw))
+ return;
+
/* GSC FW survives GT reset and D3Hot */
if (gsc_fw_is_loaded(gt)) {
- xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
+ if (xe_gsc_proxy_init_done(gsc))
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_RUNNING);
+ else
+ xe_uc_fw_change_status(&gsc->fw, XE_UC_FIRMWARE_TRANSFERRED);
return;
}
@@ -571,3 +590,35 @@ void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep)
msleep(200);
}
}
+
+/**
+ * xe_gsc_print_info - print info about GSC FW status
+ * @gsc: the GSC structure
+ * @p: the printer to be used to print the info
+ */
+void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p)
+{
+ struct xe_gt *gt = gsc_to_gt(gsc);
+ int err;
+
+ xe_uc_fw_print(&gsc->fw, p);
+
+ drm_printf(p, "\tfound security version %u\n", gsc->security_version);
+
+ if (!xe_uc_fw_is_enabled(&gsc->fw))
+ return;
+
+ err = xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC);
+ if (err)
+ return;
+
+ drm_printf(p, "\nHECI1 FWSTS: 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x\n",
+ xe_mmio_read32(gt, HECI_FWSTS1(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(gt, HECI_FWSTS2(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(gt, HECI_FWSTS3(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(gt, HECI_FWSTS4(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(gt, HECI_FWSTS5(MTL_GSC_HECI1_BASE)),
+ xe_mmio_read32(gt, HECI_FWSTS6(MTL_GSC_HECI1_BASE)));
+
+ xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC);
+}
diff --git a/drivers/gpu/drm/xe/xe_gsc.h b/drivers/gpu/drm/xe/xe_gsc.h
index 1c7a623faf11..e282b9ef6ec4 100644
--- a/drivers/gpu/drm/xe/xe_gsc.h
+++ b/drivers/gpu/drm/xe/xe_gsc.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
+struct drm_printer;
struct xe_gsc;
struct xe_gt;
struct xe_hw_engine;
@@ -21,4 +22,6 @@ void xe_gsc_hwe_irq_handler(struct xe_hw_engine *hwe, u16 intr_vec);
void xe_gsc_wa_14015076503(struct xe_gt *gt, bool prep);
+void xe_gsc_print_info(struct xe_gsc *gsc, struct drm_printer *p);
+
#endif
diff --git a/drivers/gpu/drm/xe/xe_gsc_debugfs.c b/drivers/gpu/drm/xe/xe_gsc_debugfs.c
new file mode 100644
index 000000000000..461d7e99c2b3
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gsc_debugfs.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2022 Intel Corporation
+ */
+
+#include "xe_gsc_debugfs.h"
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_managed.h>
+
+#include "xe_device.h"
+#include "xe_gt.h"
+#include "xe_gsc.h"
+#include "xe_macros.h"
+#include "xe_pm.h"
+
+static struct xe_gt *
+gsc_to_gt(struct xe_gsc *gsc)
+{
+ return container_of(gsc, struct xe_gt, uc.gsc);
+}
+
+static struct xe_device *
+gsc_to_xe(struct xe_gsc *gsc)
+{
+ return gt_to_xe(gsc_to_gt(gsc));
+}
+
+static struct xe_gsc *node_to_gsc(struct drm_info_node *node)
+{
+ return node->info_ent->data;
+}
+
+static int gsc_info(struct seq_file *m, void *data)
+{
+ struct xe_gsc *gsc = node_to_gsc(m->private);
+ struct xe_device *xe = gsc_to_xe(gsc);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ xe_pm_runtime_get(xe);
+ xe_gsc_print_info(gsc, &p);
+ xe_pm_runtime_put(xe);
+
+ return 0;
+}
+
+static const struct drm_info_list debugfs_list[] = {
+ {"gsc_info", gsc_info, 0},
+};
+
+void xe_gsc_debugfs_register(struct xe_gsc *gsc, struct dentry *parent)
+{
+ struct drm_minor *minor = gsc_to_xe(gsc)->drm.primary;
+ struct drm_info_list *local;
+ int i;
+
+#define DEBUGFS_SIZE (ARRAY_SIZE(debugfs_list) * sizeof(struct drm_info_list))
+ local = drmm_kmalloc(&gsc_to_xe(gsc)->drm, DEBUGFS_SIZE, GFP_KERNEL);
+ if (!local)
+ return;
+
+ memcpy(local, debugfs_list, DEBUGFS_SIZE);
+#undef DEBUGFS_SIZE
+
+ for (i = 0; i < ARRAY_SIZE(debugfs_list); ++i)
+ local[i].data = gsc;
+
+ drm_debugfs_create_files(local,
+ ARRAY_SIZE(debugfs_list),
+ parent, minor);
+}
diff --git a/drivers/gpu/drm/xe/xe_gsc_debugfs.h b/drivers/gpu/drm/xe/xe_gsc_debugfs.h
new file mode 100644
index 000000000000..c2e2645dc705
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gsc_debugfs.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GSC_DEBUGFS_H_
+#define _XE_GSC_DEBUGFS_H_
+
+struct dentry;
+struct xe_gsc;
+
+void xe_gsc_debugfs_register(struct xe_gsc *gsc, struct dentry *parent);
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt.c b/drivers/gpu/drm/xe/xe_gt.c
index 08a004d698d4..dd96dec95b19 100644
--- a/drivers/gpu/drm/xe/xe_gt.c
+++ b/drivers/gpu/drm/xe/xe_gt.c
@@ -8,7 +8,7 @@
#include <linux/minmax.h>
#include <drm/drm_managed.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <generated/xe_wa_oob.h>
@@ -48,7 +48,6 @@
#include "xe_migrate.h"
#include "xe_mmio.h"
#include "xe_pat.h"
-#include "xe_pcode.h"
#include "xe_pm.h"
#include "xe_mocs.h"
#include "xe_reg_sr.h"
@@ -388,7 +387,6 @@ int xe_gt_init_early(struct xe_gt *gt)
xe_tuning_process_gt(gt);
xe_force_wake_init_gt(gt, gt_to_fw(gt));
- xe_pcode_init(gt);
spin_lock_init(&gt->global_invl_lock);
return 0;
@@ -756,12 +754,13 @@ static int gt_reset(struct xe_gt *gt)
xe_gt_info(gt, "reset started\n");
+ xe_pm_runtime_get(gt_to_xe(gt));
+
if (xe_fault_inject_gt_reset()) {
err = -ECANCELED;
goto err_fail;
}
- xe_pm_runtime_get(gt_to_xe(gt));
xe_gt_sanitize(gt);
err = xe_force_wake_get(gt_to_fw(gt), XE_FORCEWAKE_ALL);
@@ -796,11 +795,11 @@ err_out:
XE_WARN_ON(xe_force_wake_put(gt_to_fw(gt), XE_FORCEWAKE_ALL));
err_msg:
XE_WARN_ON(xe_uc_start(&gt->uc));
- xe_pm_runtime_put(gt_to_xe(gt));
err_fail:
xe_gt_err(gt, "reset failed (%pe)\n", ERR_PTR(err));
xe_device_declare_wedged(gt_to_xe(gt));
+ xe_pm_runtime_put(gt_to_xe(gt));
return err;
}
diff --git a/drivers/gpu/drm/xe/xe_gt_pagefault.c b/drivers/gpu/drm/xe/xe_gt_pagefault.c
index 0be4687bfc20..730eec07795e 100644
--- a/drivers/gpu/drm/xe/xe_gt_pagefault.c
+++ b/drivers/gpu/drm/xe/xe_gt_pagefault.c
@@ -388,20 +388,17 @@ static void pagefault_fini(void *arg)
{
struct xe_gt *gt = arg;
struct xe_device *xe = gt_to_xe(gt);
- int i;
if (!xe->info.has_usm)
return;
destroy_workqueue(gt->usm.acc_wq);
destroy_workqueue(gt->usm.pf_wq);
-
- for (i = 0; i < NUM_PF_QUEUE; ++i)
- kfree(gt->usm.pf_queue[i].data);
}
static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
{
+ struct xe_device *xe = gt_to_xe(gt);
xe_dss_mask_t all_dss;
int num_dss, num_eus;
@@ -417,7 +414,8 @@ static int xe_alloc_pf_queue(struct xe_gt *gt, struct pf_queue *pf_queue)
(num_eus + XE_NUM_HW_ENGINES) * PF_MSG_LEN_DW;
pf_queue->gt = gt;
- pf_queue->data = kcalloc(pf_queue->num_dw, sizeof(u32), GFP_KERNEL);
+ pf_queue->data = devm_kcalloc(xe->drm.dev, pf_queue->num_dw,
+ sizeof(u32), GFP_KERNEL);
if (!pf_queue->data)
return -ENOMEM;
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
index ef239440963c..905f409db74b 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf.c
@@ -9,6 +9,7 @@
#include "xe_gt_sriov_pf.h"
#include "xe_gt_sriov_pf_config.h"
+#include "xe_gt_sriov_pf_control.h"
#include "xe_gt_sriov_pf_helpers.h"
#include "xe_gt_sriov_pf_service.h"
#include "xe_mmio.h"
@@ -57,6 +58,10 @@ int xe_gt_sriov_pf_init_early(struct xe_gt *gt)
if (err)
return err;
+ err = xe_gt_sriov_pf_control_init(gt);
+ if (err)
+ return err;
+
return 0;
}
@@ -93,4 +98,5 @@ void xe_gt_sriov_pf_init_hw(struct xe_gt *gt)
void xe_gt_sriov_pf_restart(struct xe_gt *gt)
{
xe_gt_sriov_pf_config_restart(gt);
+ xe_gt_sriov_pf_control_restart(gt);
}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
index 41ed07b153b5..a95e546b7744 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.c
@@ -29,6 +29,7 @@
#include "xe_guc_submit.h"
#include "xe_lmtt.h"
#include "xe_map.h"
+#include "xe_migrate.h"
#include "xe_sriov.h"
#include "xe_ttm_vram_mgr.h"
#include "xe_wopcm.h"
@@ -276,6 +277,14 @@ static u32 encode_config(u32 *cfg, const struct xe_gt_sriov_config *config)
cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_PREEMPT_TIMEOUT);
cfg[n++] = config->preempt_timeout;
+#define encode_threshold_config(TAG, ...) ({ \
+ cfg[n++] = PREP_GUC_KLV_TAG(VF_CFG_THRESHOLD_##TAG); \
+ cfg[n++] = config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)]; \
+});
+
+ MAKE_XE_GUC_KLV_THRESHOLDS_SET(encode_threshold_config);
+#undef encode_threshold_config
+
return n;
}
@@ -1833,6 +1842,18 @@ u32 xe_gt_sriov_pf_config_get_threshold(struct xe_gt *gt, unsigned int vfid,
return value;
}
+static void pf_reset_config_thresholds(struct xe_gt *gt, struct xe_gt_sriov_config *config)
+{
+ lockdep_assert_held(xe_gt_sriov_pf_master_mutex(gt));
+
+#define reset_threshold_config(TAG, ...) ({ \
+ config->thresholds[MAKE_XE_GUC_KLV_THRESHOLD_INDEX(TAG)] = 0; \
+});
+
+ MAKE_XE_GUC_KLV_THRESHOLDS_SET(reset_threshold_config);
+#undef reset_threshold_config
+}
+
static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
{
struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
@@ -1848,6 +1869,7 @@ static void pf_release_vf_config(struct xe_gt *gt, unsigned int vfid)
pf_release_config_ctxs(gt, config);
pf_release_config_dbs(gt, config);
pf_reset_config_sched(gt, config);
+ pf_reset_config_thresholds(gt, config);
}
/**
@@ -1881,6 +1903,87 @@ int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool forc
return force ? 0 : err;
}
+static void pf_sanitize_ggtt(struct xe_ggtt_node *ggtt_region, unsigned int vfid)
+{
+ if (xe_ggtt_node_allocated(ggtt_region))
+ xe_ggtt_assign(ggtt_region, vfid);
+}
+
+static int pf_sanitize_lmem(struct xe_tile *tile, struct xe_bo *bo, long timeout)
+{
+ struct xe_migrate *m = tile->migrate;
+ struct dma_fence *fence;
+ int err;
+
+ if (!bo)
+ return 0;
+
+ xe_bo_lock(bo, false);
+ fence = xe_migrate_clear(m, bo, bo->ttm.resource, XE_MIGRATE_CLEAR_FLAG_FULL);
+ if (IS_ERR(fence)) {
+ err = PTR_ERR(fence);
+ } else if (!fence) {
+ err = -ENOMEM;
+ } else {
+ long ret = dma_fence_wait_timeout(fence, false, timeout);
+
+ err = ret > 0 ? 0 : ret < 0 ? ret : -ETIMEDOUT;
+ dma_fence_put(fence);
+ if (!err)
+ xe_gt_sriov_dbg_verbose(tile->primary_gt, "LMEM cleared in %dms\n",
+ jiffies_to_msecs(timeout - ret));
+ }
+ xe_bo_unlock(bo);
+
+ return err;
+}
+
+static int pf_sanitize_vf_resources(struct xe_gt *gt, u32 vfid, long timeout)
+{
+ struct xe_gt_sriov_config *config = pf_pick_vf_config(gt, vfid);
+ struct xe_tile *tile = gt_to_tile(gt);
+ struct xe_device *xe = gt_to_xe(gt);
+ int err = 0;
+
+ /*
+ * Only GGTT and LMEM requires to be cleared by the PF.
+ * GuC doorbell IDs and context IDs do not need any clearing.
+ */
+ if (!xe_gt_is_media_type(gt)) {
+ pf_sanitize_ggtt(config->ggtt_region, vfid);
+ if (IS_DGFX(xe))
+ err = pf_sanitize_lmem(tile, config->lmem_obj, timeout);
+ }
+
+ return err;
+}
+
+/**
+ * xe_gt_sriov_pf_config_sanitize() - Sanitize VF's resources.
+ * @gt: the &xe_gt
+ * @vfid: the VF identifier (can't be PF)
+ * @timeout: maximum timeout to wait for completion in jiffies
+ *
+ * This function can only be called on PF.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout)
+{
+ int err;
+
+ xe_gt_assert(gt, vfid != PFID);
+
+ mutex_lock(xe_gt_sriov_pf_master_mutex(gt));
+ err = pf_sanitize_vf_resources(gt, vfid, timeout);
+ mutex_unlock(xe_gt_sriov_pf_master_mutex(gt));
+
+ if (unlikely(err))
+ xe_gt_sriov_notice(gt, "VF%u resource sanitizing failed (%pe)\n",
+ vfid, ERR_PTR(err));
+ return err;
+}
+
/**
* xe_gt_sriov_pf_config_push - Reprovision VF's configuration.
* @gt: the &xe_gt
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
index c0e6e4743dc2..42e64769f666 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_config.h
@@ -50,6 +50,7 @@ int xe_gt_sriov_pf_config_set_threshold(struct xe_gt *gt, unsigned int vfid,
enum xe_guc_klv_threshold_index index, u32 value);
int xe_gt_sriov_pf_config_set_fair(struct xe_gt *gt, unsigned int vfid, unsigned int num_vfs);
+int xe_gt_sriov_pf_config_sanitize(struct xe_gt *gt, unsigned int vfid, long timeout);
int xe_gt_sriov_pf_config_release(struct xe_gt *gt, unsigned int vfid, bool force);
int xe_gt_sriov_pf_config_push(struct xe_gt *gt, unsigned int vfid, bool refresh);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
index ebf06e037750..02f7328bd6ce 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.c
@@ -3,11 +3,17 @@
* Copyright © 2023-2024 Intel Corporation
*/
+#include <drm/drm_managed.h>
+
#include "abi/guc_actions_sriov_abi.h"
#include "xe_device.h"
#include "xe_gt.h"
+#include "xe_gt_sriov_pf_config.h"
#include "xe_gt_sriov_pf_control.h"
+#include "xe_gt_sriov_pf_helpers.h"
+#include "xe_gt_sriov_pf_monitor.h"
+#include "xe_gt_sriov_pf_service.h"
#include "xe_gt_sriov_printk.h"
#include "xe_guc_ct.h"
#include "xe_sriov.h"
@@ -41,10 +47,6 @@ static int guc_action_vf_control_cmd(struct xe_guc *guc, u32 vfid, u32 cmd)
};
int ret;
- /* XXX those two commands are now sent from the G2H handler */
- if (cmd == GUC_PF_TRIGGER_VF_FLR_START || cmd == GUC_PF_TRIGGER_VF_FLR_FINISH)
- return xe_guc_ct_send_g2h_handler(&guc->ct, request, ARRAY_SIZE(request));
-
ret = xe_guc_ct_send_block(&guc->ct, request, ARRAY_SIZE(request));
return ret > 0 ? -EPROTO : ret;
}
@@ -54,6 +56,8 @@ static int pf_send_vf_control_cmd(struct xe_gt *gt, unsigned int vfid, u32 cmd)
int err;
xe_gt_assert(gt, vfid != PFID);
+ xe_gt_sriov_dbg_verbose(gt, "sending VF%u control command %s\n",
+ vfid, control_cmd_to_string(cmd));
err = guc_action_vf_control_cmd(&gt->uc.guc, vfid, cmd);
if (unlikely(err))
@@ -88,6 +92,456 @@ static int pf_send_vf_flr_finish(struct xe_gt *gt, unsigned int vfid)
}
/**
+ * DOC: The VF state machine
+ *
+ * The simplified VF state machine could be presented as::
+ *
+ * pause--------------------------o
+ * / |
+ * / v
+ * (READY)<------------------resume-----(PAUSED)
+ * ^ \ / /
+ * | \ / /
+ * | stop---->(STOPPED)<----stop /
+ * | / /
+ * | / /
+ * o--------<-----flr /
+ * \ /
+ * o------<--------------------flr
+ *
+ * Where:
+ *
+ * * READY - represents a state in which VF is fully operable
+ * * PAUSED - represents a state in which VF activity is temporarily suspended
+ * * STOPPED - represents a state in which VF activity is definitely halted
+ * * pause - represents a request to temporarily suspend VF activity
+ * * resume - represents a request to resume VF activity
+ * * stop - represents a request to definitely halt VF activity
+ * * flr - represents a request to perform VF FLR to restore VF activity
+ *
+ * However, each state transition requires additional steps that involves
+ * communication with GuC that might fail or be interrupted by other requests::
+ *
+ * .................................WIP....
+ * : :
+ * pause--------------------->PAUSE_WIP----------------------------o
+ * / : / \ : |
+ * / : o----<---stop flr--o : |
+ * / : | \ / | : V
+ * (READY,RESUMED)<--------+------------RESUME_WIP<----+--<-----resume--(PAUSED)
+ * ^ \ \ : | | : / /
+ * | \ \ : | | : / /
+ * | \ \ : | | : / /
+ * | \ \ : o----<----------------------+--<-------stop /
+ * | \ \ : | | : /
+ * | \ \ : V | : /
+ * | \ stop----->STOP_WIP---------flr--->-----o : /
+ * | \ : | | : /
+ * | \ : | V : /
+ * | flr--------+----->----------------->FLR_WIP<-----flr
+ * | : | / ^ :
+ * | : | / | :
+ * o--------<-------:----+-----<----------------o | :
+ * : | | :
+ * :....|...........................|.....:
+ * | |
+ * V |
+ * (STOPPED)--------------------flr
+ *
+ * For details about each internal WIP state machine see:
+ *
+ * * `The VF PAUSE state machine`_
+ * * `The VF RESUME state machine`_
+ * * `The VF STOP state machine`_
+ * * `The VF FLR state machine`_
+ */
+
+#ifdef CONFIG_DRM_XE_DEBUG_SRIOV
+static const char *control_bit_to_string(enum xe_gt_sriov_control_bits bit)
+{
+ switch (bit) {
+#define CASE2STR(_X) \
+ case XE_GT_SRIOV_STATE_##_X: return #_X
+ CASE2STR(WIP);
+ CASE2STR(FLR_WIP);
+ CASE2STR(FLR_SEND_START);
+ CASE2STR(FLR_WAIT_GUC);
+ CASE2STR(FLR_GUC_DONE);
+ CASE2STR(FLR_RESET_CONFIG);
+ CASE2STR(FLR_RESET_DATA);
+ CASE2STR(FLR_RESET_MMIO);
+ CASE2STR(FLR_SEND_FINISH);
+ CASE2STR(FLR_FAILED);
+ CASE2STR(PAUSE_WIP);
+ CASE2STR(PAUSE_SEND_PAUSE);
+ CASE2STR(PAUSE_WAIT_GUC);
+ CASE2STR(PAUSE_GUC_DONE);
+ CASE2STR(PAUSE_FAILED);
+ CASE2STR(PAUSED);
+ CASE2STR(RESUME_WIP);
+ CASE2STR(RESUME_SEND_RESUME);
+ CASE2STR(RESUME_FAILED);
+ CASE2STR(RESUMED);
+ CASE2STR(STOP_WIP);
+ CASE2STR(STOP_SEND_STOP);
+ CASE2STR(STOP_FAILED);
+ CASE2STR(STOPPED);
+ CASE2STR(MISMATCH);
+#undef CASE2STR
+ default: return "?";
+ }
+}
+#endif
+
+static unsigned long pf_get_default_timeout(enum xe_gt_sriov_control_bits bit)
+{
+ switch (bit) {
+ case XE_GT_SRIOV_STATE_FLR_WAIT_GUC:
+ case XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC:
+ return HZ / 2;
+ case XE_GT_SRIOV_STATE_FLR_WIP:
+ case XE_GT_SRIOV_STATE_FLR_RESET_CONFIG:
+ return 5 * HZ;
+ default:
+ return HZ;
+ }
+}
+
+static struct xe_gt_sriov_control_state *pf_pick_vf_control(struct xe_gt *gt, unsigned int vfid)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ xe_gt_assert(gt, vfid <= xe_gt_sriov_pf_get_totalvfs(gt));
+
+ return &gt->sriov.pf.vfs[vfid].control;
+}
+
+static unsigned long *pf_peek_vf_state(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid);
+
+ return &cs->state;
+}
+
+static bool pf_check_vf_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ return test_bit(bit, pf_peek_vf_state(gt, vfid));
+}
+
+static void pf_dump_vf_state(struct xe_gt *gt, unsigned int vfid)
+{
+ unsigned long state = *pf_peek_vf_state(gt, vfid);
+ enum xe_gt_sriov_control_bits bit;
+
+ if (state) {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state %#lx%s%*pbl\n",
+ vfid, state, state ? " bits " : "",
+ (int)BITS_PER_LONG, &state);
+ for_each_set_bit(bit, &state, BITS_PER_LONG)
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d)\n",
+ vfid, control_bit_to_string(bit), bit);
+ } else {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state READY\n", vfid);
+ }
+}
+
+static bool pf_expect_vf_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ bool result = pf_check_vf_state(gt, vfid, bit);
+
+ if (unlikely(!result))
+ pf_dump_vf_state(gt, vfid);
+
+ return result;
+}
+
+static bool pf_expect_vf_not_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ bool result = !pf_check_vf_state(gt, vfid, bit);
+
+ if (unlikely(!result))
+ pf_dump_vf_state(gt, vfid);
+
+ return result;
+}
+
+static bool pf_enter_vf_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ if (!test_and_set_bit(bit, pf_peek_vf_state(gt, vfid))) {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) enter\n",
+ vfid, control_bit_to_string(bit), bit);
+ return true;
+ }
+ return false;
+}
+
+static bool pf_exit_vf_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ if (test_and_clear_bit(bit, pf_peek_vf_state(gt, vfid))) {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) exit\n",
+ vfid, control_bit_to_string(bit), bit);
+ return true;
+ }
+ return false;
+}
+
+static void pf_escape_vf_state(struct xe_gt *gt, unsigned int vfid,
+ enum xe_gt_sriov_control_bits bit)
+{
+ if (pf_exit_vf_state(gt, vfid, bit))
+ xe_gt_sriov_dbg_verbose(gt, "VF%u state %s(%d) escaped by %ps\n",
+ vfid, control_bit_to_string(bit), bit,
+ __builtin_return_address(0));
+}
+
+static void pf_enter_vf_mismatch(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_MISMATCH)) {
+ xe_gt_sriov_dbg(gt, "VF%u state mismatch detected by %ps\n",
+ vfid, __builtin_return_address(0));
+ pf_dump_vf_state(gt, vfid);
+ }
+}
+
+static void pf_exit_vf_mismatch(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_MISMATCH))
+ xe_gt_sriov_dbg(gt, "VF%u state mismatch cleared by %ps\n",
+ vfid, __builtin_return_address(0));
+
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_FAILED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED);
+}
+
+#define pf_enter_vf_state_machine_bug(gt, vfid) ({ \
+ pf_enter_vf_mismatch((gt), (vfid)); \
+})
+
+static void pf_queue_control_worker(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ queue_work(xe->sriov.wq, &gt->sriov.pf.control.worker);
+}
+
+static void pf_queue_vf(struct xe_gt *gt, unsigned int vfid)
+{
+ struct xe_gt_sriov_pf_control *pfc = &gt->sriov.pf.control;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+
+ spin_lock(&pfc->lock);
+ list_move_tail(&gt->sriov.pf.vfs[vfid].control.link, &pfc->list);
+ spin_unlock(&pfc->lock);
+
+ pf_queue_control_worker(gt);
+}
+
+static void pf_exit_vf_flr_wip(struct xe_gt *gt, unsigned int vfid);
+static void pf_exit_vf_stop_wip(struct xe_gt *gt, unsigned int vfid);
+static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid);
+static void pf_exit_vf_resume_wip(struct xe_gt *gt, unsigned int vfid);
+
+static bool pf_enter_vf_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_WIP)) {
+ struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid);
+
+ reinit_completion(&cs->done);
+ return true;
+ }
+ return false;
+}
+
+static void pf_exit_vf_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_WIP)) {
+ struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid);
+
+ pf_exit_vf_flr_wip(gt, vfid);
+ pf_exit_vf_stop_wip(gt, vfid);
+ pf_exit_vf_pause_wip(gt, vfid);
+ pf_exit_vf_resume_wip(gt, vfid);
+
+ complete_all(&cs->done);
+ }
+}
+
+static int pf_wait_vf_wip_done(struct xe_gt *gt, unsigned int vfid, unsigned long timeout)
+{
+ struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, vfid);
+
+ return wait_for_completion_timeout(&cs->done, timeout) ? 0 : -ETIMEDOUT;
+}
+
+static void pf_enter_vf_ready(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
+ pf_exit_vf_mismatch(gt, vfid);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+/**
+ * DOC: The VF PAUSE state machine
+ *
+ * The VF PAUSE state machine looks like::
+ *
+ * (READY,RESUMED)<-------------<---------------------o---------o
+ * | \ \
+ * pause \ \
+ * | \ \
+ * ....V...........................PAUSE_WIP........ \ \
+ * : \ : o \
+ * : \ o------<-----busy : | \
+ * : \ / / : | |
+ * : PAUSE_SEND_PAUSE ---failed--->----------o--->(PAUSE_FAILED) |
+ * : | \ : | |
+ * : acked rejected---->----------o--->(MISMATCH) /
+ * : | : /
+ * : v : /
+ * : PAUSE_WAIT_GUC : /
+ * : | : /
+ * : done : /
+ * : | : /
+ * : v : /
+ * : PAUSE_GUC_DONE o-----restart
+ * : / :
+ * : / :
+ * :....o..............o...............o...........:
+ * | | |
+ * completed flr stop
+ * | | |
+ * V .....V..... ......V.....
+ * (PAUSED) : FLR_WIP : : STOP_WIP :
+ * :.........: :..........:
+ *
+ * For the full state machine view, see `The VF state machine`_.
+ */
+
+static void pf_exit_vf_pause_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WIP)) {
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE);
+ }
+}
+
+static void pf_enter_vf_paused(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
+ pf_exit_vf_mismatch(gt, vfid);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_pause_completed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_paused(gt, vfid);
+}
+
+static void pf_enter_vf_pause_failed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_pause_rejected(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_mismatch(gt, vfid);
+ pf_enter_vf_pause_failed(gt, vfid);
+}
+
+static bool pf_exit_vf_pause_guc_done(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE))
+ return false;
+
+ pf_enter_vf_pause_completed(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_pause_guc_done(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_GUC_DONE))
+ pf_queue_vf(gt, vfid);
+}
+
+static void pf_enter_pause_wait_guc(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+}
+
+static bool pf_exit_pause_wait_guc(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC);
+}
+
+static void pf_enter_vf_pause_send_pause(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_pause_send_pause(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE))
+ return false;
+
+ /* GuC may actually send a PAUSE_DONE before we get a RESPONSE */
+ pf_enter_pause_wait_guc(gt, vfid);
+
+ err = pf_send_vf_pause(gt, vfid);
+ if (err) {
+ /* send failed, so we shouldn't expect PAUSE_DONE from GuC */
+ pf_exit_pause_wait_guc(gt, vfid);
+
+ if (err == -EBUSY)
+ pf_enter_vf_pause_send_pause(gt, vfid);
+ else if (err == -EIO)
+ pf_enter_vf_pause_rejected(gt, vfid);
+ else
+ pf_enter_vf_pause_failed(gt, vfid);
+ } else {
+ /*
+ * we have already moved to WAIT_GUC, maybe even to GUC_DONE
+ * but since GuC didn't complain, we may clear MISMATCH
+ */
+ pf_exit_vf_mismatch(gt, vfid);
+ }
+
+ return true;
+}
+
+static bool pf_enter_vf_pause_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WIP)) {
+ pf_enter_vf_wip(gt, vfid);
+ pf_enter_vf_pause_send_pause(gt, vfid);
+ return true;
+ }
+
+ return false;
+}
+
+/**
* xe_gt_sriov_pf_control_pause_vf - Pause a VF.
* @gt: the &xe_gt
* @vfid: the VF identifier
@@ -98,7 +552,140 @@ static int pf_send_vf_flr_finish(struct xe_gt *gt, unsigned int vfid)
*/
int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid)
{
- return pf_send_vf_pause(gt, vfid);
+ unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_PAUSE_WIP);
+ int err;
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) {
+ xe_gt_sriov_dbg(gt, "VF%u is stopped!\n", vfid);
+ return -EPERM;
+ }
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) {
+ xe_gt_sriov_dbg(gt, "VF%u was already paused!\n", vfid);
+ return -ESTALE;
+ }
+
+ if (!pf_enter_vf_pause_wip(gt, vfid)) {
+ xe_gt_sriov_dbg(gt, "VF%u pause already in progress!\n", vfid);
+ return -EALREADY;
+ }
+
+ err = pf_wait_vf_wip_done(gt, vfid, timeout);
+ if (err) {
+ xe_gt_sriov_dbg(gt, "VF%u pause didn't finish in %u ms (%pe)\n",
+ vfid, jiffies_to_msecs(timeout), ERR_PTR(err));
+ return err;
+ }
+
+ if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) {
+ xe_gt_sriov_info(gt, "VF%u paused!\n", vfid);
+ return 0;
+ }
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_FAILED)) {
+ xe_gt_sriov_dbg(gt, "VF%u pause failed!\n", vfid);
+ return -EIO;
+ }
+
+ xe_gt_sriov_dbg(gt, "VF%u pause was canceled!\n", vfid);
+ return -ECANCELED;
+}
+
+/**
+ * DOC: The VF RESUME state machine
+ *
+ * The VF RESUME state machine looks like::
+ *
+ * (PAUSED)<-----------------<------------------------o
+ * | \
+ * resume \
+ * | \
+ * ....V............................RESUME_WIP...... \
+ * : \ : o
+ * : \ o-------<-----busy : |
+ * : \ / / : |
+ * : RESUME_SEND_RESUME ---failed--->--------o--->(RESUME_FAILED)
+ * : / \ : |
+ * : acked rejected---->---------o--->(MISMATCH)
+ * : / :
+ * :....o..............o...............o.....o.....:
+ * | | | \
+ * completed flr stop restart-->(READY)
+ * | | |
+ * V .....V..... ......V.....
+ * (RESUMED) : FLR_WIP : : STOP_WIP :
+ * :.........: :..........:
+ *
+ * For the full state machine view, see `The VF state machine`_.
+ */
+
+static void pf_exit_vf_resume_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_WIP))
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_SEND_RESUME);
+}
+
+static void pf_enter_vf_resumed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
+ pf_exit_vf_mismatch(gt, vfid);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_resume_completed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_resumed(gt, vfid);
+}
+
+static void pf_enter_vf_resume_failed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_resume_rejected(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_mismatch(gt, vfid);
+ pf_enter_vf_resume_failed(gt, vfid);
+}
+
+static void pf_enter_vf_resume_send_resume(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_SEND_RESUME))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_resume_send_resume(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_SEND_RESUME))
+ return false;
+
+ err = pf_send_vf_resume(gt, vfid);
+ if (err == -EBUSY)
+ pf_enter_vf_resume_send_resume(gt, vfid);
+ else if (err == -EIO)
+ pf_enter_vf_resume_rejected(gt, vfid);
+ else if (err)
+ pf_enter_vf_resume_failed(gt, vfid);
+ else
+ pf_enter_vf_resume_completed(gt, vfid);
+ return true;
+}
+
+static bool pf_enter_vf_resume_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_WIP)) {
+ pf_enter_vf_wip(gt, vfid);
+ pf_enter_vf_resume_send_resume(gt, vfid);
+ return true;
+ }
+
+ return false;
}
/**
@@ -112,7 +699,134 @@ int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid)
*/
int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
{
- return pf_send_vf_resume(gt, vfid);
+ unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_RESUME_WIP);
+ int err;
+
+ if (!pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED)) {
+ xe_gt_sriov_dbg(gt, "VF%u is not paused!\n", vfid);
+ return -EPERM;
+ }
+
+ if (!pf_enter_vf_resume_wip(gt, vfid)) {
+ xe_gt_sriov_dbg(gt, "VF%u resume already in progress!\n", vfid);
+ return -EALREADY;
+ }
+
+ err = pf_wait_vf_wip_done(gt, vfid, timeout);
+ if (err)
+ return err;
+
+ if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED)) {
+ xe_gt_sriov_info(gt, "VF%u resumed!\n", vfid);
+ return 0;
+ }
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUME_FAILED)) {
+ xe_gt_sriov_dbg(gt, "VF%u resume failed!\n", vfid);
+ return -EIO;
+ }
+
+ xe_gt_sriov_dbg(gt, "VF%u resume was canceled!\n", vfid);
+ return -ECANCELED;
+}
+
+/**
+ * DOC: The VF STOP state machine
+ *
+ * The VF STOP state machine looks like::
+ *
+ * (READY,PAUSED,RESUMED)<-------<--------------------o
+ * | \
+ * stop \
+ * | \
+ * ....V..............................STOP_WIP...... \
+ * : \ : o
+ * : \ o----<----busy : |
+ * : \ / / : |
+ * : STOP_SEND_STOP--------failed--->--------o--->(STOP_FAILED)
+ * : / \ : |
+ * : acked rejected-------->--------o--->(MISMATCH)
+ * : / :
+ * :....o..............o...............o...........:
+ * | | |
+ * completed flr restart
+ * | | |
+ * V .....V..... V
+ * (STOPPED) : FLR_WIP : (READY)
+ * :.........:
+ *
+ * For the full state machine view, see `The VF state machine`_.
+ */
+
+static void pf_exit_vf_stop_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_WIP))
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_SEND_STOP);
+}
+
+static void pf_enter_vf_stopped(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_RESUMED);
+ pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSED);
+ pf_exit_vf_mismatch(gt, vfid);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_stop_completed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_stopped(gt, vfid);
+}
+
+static void pf_enter_vf_stop_failed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_FAILED);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_stop_rejected(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_mismatch(gt, vfid);
+ pf_enter_vf_stop_failed(gt, vfid);
+}
+
+static void pf_enter_vf_stop_send_stop(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_SEND_STOP))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_stop_send_stop(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_SEND_STOP))
+ return false;
+
+ err = pf_send_vf_stop(gt, vfid);
+ if (err == -EBUSY)
+ pf_enter_vf_stop_send_stop(gt, vfid);
+ else if (err == -EIO)
+ pf_enter_vf_stop_rejected(gt, vfid);
+ else if (err)
+ pf_enter_vf_stop_failed(gt, vfid);
+ else
+ pf_enter_vf_stop_completed(gt, vfid);
+ return true;
+}
+
+static bool pf_enter_vf_stop_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_WIP)) {
+ pf_enter_vf_wip(gt, vfid);
+ pf_enter_vf_stop_send_stop(gt, vfid);
+ return true;
+ }
+ return false;
}
/**
@@ -126,7 +840,280 @@ int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid)
*/
int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid)
{
- return pf_send_vf_stop(gt, vfid);
+ unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_STOP_WIP);
+ int err;
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) {
+ xe_gt_sriov_dbg(gt, "VF%u was already stopped!\n", vfid);
+ return -ESTALE;
+ }
+
+ if (!pf_enter_vf_stop_wip(gt, vfid)) {
+ xe_gt_sriov_dbg(gt, "VF%u stop already in progress!\n", vfid);
+ return -EALREADY;
+ }
+
+ err = pf_wait_vf_wip_done(gt, vfid, timeout);
+ if (err)
+ return err;
+
+ if (pf_expect_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOPPED)) {
+ xe_gt_sriov_info(gt, "VF%u stopped!\n", vfid);
+ return 0;
+ }
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_STOP_FAILED)) {
+ xe_gt_sriov_dbg(gt, "VF%u stop failed!\n", vfid);
+ return -EIO;
+ }
+
+ xe_gt_sriov_dbg(gt, "VF%u stop was canceled!\n", vfid);
+ return -ECANCELED;
+}
+
+/**
+ * DOC: The VF FLR state machine
+ *
+ * The VF FLR state machine looks like::
+ *
+ * (READY,PAUSED,STOPPED)<------------<--------------o
+ * | \
+ * flr \
+ * | \
+ * ....V..........................FLR_WIP........... \
+ * : \ : \
+ * : \ o----<----busy : |
+ * : \ / / : |
+ * : FLR_SEND_START---failed----->-----------o--->(FLR_FAILED)<---o
+ * : | \ : | |
+ * : acked rejected----->-----------o--->(MISMATCH) |
+ * : | : ^ |
+ * : v : | |
+ * : FLR_WAIT_GUC : | |
+ * : | : | |
+ * : done : | |
+ * : | : | |
+ * : v : | |
+ * : FLR_GUC_DONE : | |
+ * : | : | |
+ * : FLR_RESET_CONFIG---failed--->-----------o--------+-----------o
+ * : | : | |
+ * : FLR_RESET_DATA : | |
+ * : | : | |
+ * : FLR_RESET_MMIO : | |
+ * : | : | |
+ * : | o----<----busy : | |
+ * : |/ / : | |
+ * : FLR_SEND_FINISH----failed--->-----------o--------+-----------o
+ * : / \ : |
+ * : acked rejected----->-----------o--------o
+ * : / :
+ * :....o..............................o...........:
+ * | |
+ * completed restart
+ * | /
+ * V /
+ * (READY)<----------<------------o
+ *
+ * For the full state machine view, see `The VF state machine`_.
+ */
+
+static void pf_enter_vf_flr_send_start(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_START))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static void pf_enter_vf_flr_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WIP)) {
+ xe_gt_sriov_dbg(gt, "VF%u FLR is already in progress\n", vfid);
+ return;
+ }
+
+ pf_enter_vf_wip(gt, vfid);
+ pf_enter_vf_flr_send_start(gt, vfid);
+}
+
+static void pf_exit_vf_flr_wip(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WIP)) {
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_FINISH);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_DATA);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_CONFIG);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC);
+ pf_escape_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_START);
+ }
+}
+
+static void pf_enter_vf_flr_completed(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_ready(gt, vfid);
+}
+
+static void pf_enter_vf_flr_failed(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED))
+ xe_gt_sriov_notice(gt, "VF%u FLR failed!\n", vfid);
+ pf_exit_vf_wip(gt, vfid);
+}
+
+static void pf_enter_vf_flr_rejected(struct xe_gt *gt, unsigned int vfid)
+{
+ pf_enter_vf_mismatch(gt, vfid);
+ pf_enter_vf_flr_failed(gt, vfid);
+}
+
+static void pf_enter_vf_flr_send_finish(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_FINISH))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_flr_send_finish(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_FINISH))
+ return false;
+
+ err = pf_send_vf_flr_finish(gt, vfid);
+ if (err == -EBUSY)
+ pf_enter_vf_flr_send_finish(gt, vfid);
+ else if (err == -EIO)
+ pf_enter_vf_flr_rejected(gt, vfid);
+ else if (err)
+ pf_enter_vf_flr_failed(gt, vfid);
+ else
+ pf_enter_vf_flr_completed(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_flr_reset_mmio(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_flr_reset_mmio(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_MMIO))
+ return false;
+
+ /* XXX: placeholder */
+
+ pf_enter_vf_flr_send_finish(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_flr_reset_data(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_DATA))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_flr_reset_data(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_DATA))
+ return false;
+
+ xe_gt_sriov_pf_service_reset(gt, vfid);
+ xe_gt_sriov_pf_monitor_flr(gt, vfid);
+
+ pf_enter_vf_flr_reset_mmio(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_flr_reset_config(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_CONFIG))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+
+ pf_queue_vf(gt, vfid);
+}
+
+static bool pf_exit_vf_flr_reset_config(struct xe_gt *gt, unsigned int vfid)
+{
+ unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_FLR_RESET_CONFIG);
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_RESET_CONFIG))
+ return false;
+
+ err = xe_gt_sriov_pf_config_sanitize(gt, vfid, timeout);
+ if (err)
+ pf_enter_vf_flr_failed(gt, vfid);
+ else
+ pf_enter_vf_flr_reset_data(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_flr_wait_guc(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC))
+ pf_enter_vf_state_machine_bug(gt, vfid);
+}
+
+static bool pf_exit_vf_flr_wait_guc(struct xe_gt *gt, unsigned int vfid)
+{
+ return pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC);
+}
+
+static bool pf_exit_vf_flr_send_start(struct xe_gt *gt, unsigned int vfid)
+{
+ int err;
+
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_SEND_START))
+ return false;
+
+ /* GuC may actually send a FLR_DONE before we get a RESPONSE */
+ pf_enter_vf_flr_wait_guc(gt, vfid);
+
+ err = pf_send_vf_flr_start(gt, vfid);
+ if (err) {
+ /* send failed, so we shouldn't expect FLR_DONE from GuC */
+ pf_exit_vf_flr_wait_guc(gt, vfid);
+
+ if (err == -EBUSY)
+ pf_enter_vf_flr_send_start(gt, vfid);
+ else if (err == -EIO)
+ pf_enter_vf_flr_rejected(gt, vfid);
+ else
+ pf_enter_vf_flr_failed(gt, vfid);
+ } else {
+ /*
+ * we have already moved to WAIT_GUC, maybe even to GUC_DONE
+ * but since GuC didn't complain, we may clear MISMATCH
+ */
+ pf_exit_vf_mismatch(gt, vfid);
+ }
+
+ return true;
+}
+
+static bool pf_exit_vf_flr_guc_done(struct xe_gt *gt, unsigned int vfid)
+{
+ if (!pf_exit_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE))
+ return false;
+
+ pf_enter_vf_flr_reset_config(gt, vfid);
+ return true;
+}
+
+static void pf_enter_vf_flr_guc_done(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_enter_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_GUC_DONE))
+ pf_queue_vf(gt, vfid);
}
/**
@@ -140,46 +1127,56 @@ int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid)
*/
int xe_gt_sriov_pf_control_trigger_flr(struct xe_gt *gt, unsigned int vfid)
{
+ unsigned long timeout = pf_get_default_timeout(XE_GT_SRIOV_STATE_FLR_WIP);
int err;
- /* XXX pf_send_vf_flr_start() expects ct->lock */
- mutex_lock(&gt->uc.guc.ct.lock);
- err = pf_send_vf_flr_start(gt, vfid);
- mutex_unlock(&gt->uc.guc.ct.lock);
+ pf_enter_vf_flr_wip(gt, vfid);
- return err;
+ err = pf_wait_vf_wip_done(gt, vfid, timeout);
+ if (err) {
+ xe_gt_sriov_notice(gt, "VF%u FLR didn't finish in %u ms (%pe)\n",
+ vfid, jiffies_to_msecs(timeout), ERR_PTR(err));
+ return err;
+ }
+
+ if (!pf_expect_vf_not_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_FAILED))
+ return -EIO;
+
+ return 0;
}
/**
* DOC: The VF FLR Flow with GuC
*
- * PF GUC PCI
- * ========================================================
- * | | |
- * (1) | [ ] <----- FLR --|
- * | [ ] :
- * (2) [ ] <-------- NOTIFY FLR --[ ]
- * [ ] |
- * (3) [ ] |
- * [ ] |
- * [ ]-- START FLR ---------> [ ]
- * | [ ]
- * (4) | [ ]
- * | [ ]
- * [ ] <--------- FLR DONE -- [ ]
- * [ ] |
- * (5) [ ] |
- * [ ] |
- * [ ]-- FINISH FLR --------> [ ]
- * | |
- *
- * Step 1: PCI HW generates interrupt to the GuC about VF FLR
- * Step 2: GuC FW sends G2H notification to the PF about VF FLR
- * Step 2a: on some platforms G2H is only received from root GuC
- * Step 3: PF sends H2G request to the GuC to start VF FLR sequence
- * Step 3a: on some platforms PF must send H2G to all other GuCs
- * Step 4: GuC FW performs VF FLR cleanups and notifies the PF when done
- * Step 5: PF performs VF FLR cleanups and notifies the GuC FW when finished
+ * The VF FLR flow includes several steps::
+ *
+ * PF GUC PCI
+ * ========================================================
+ * | | |
+ * (1) | [ ] <----- FLR --|
+ * | [ ] :
+ * (2) [ ] <-------- NOTIFY FLR --[ ]
+ * [ ] |
+ * (3) [ ] |
+ * [ ] |
+ * [ ]-- START FLR ---------> [ ]
+ * | [ ]
+ * (4) | [ ]
+ * | [ ]
+ * [ ] <--------- FLR DONE -- [ ]
+ * [ ] |
+ * (5) [ ] |
+ * [ ] |
+ * [ ]-- FINISH FLR --------> [ ]
+ * | |
+ *
+ * * Step 1: PCI HW generates interrupt to the GuC about VF FLR
+ * * Step 2: GuC FW sends G2H notification to the PF about VF FLR
+ * * Step 2a: on some platforms G2H is only received from root GuC
+ * * Step 3: PF sends H2G request to the GuC to start VF FLR sequence
+ * * Step 3a: on some platforms PF must send H2G to all other GuCs
+ * * Step 4: GuC FW performs VF FLR cleanups and notifies the PF when done
+ * * Step 5: PF performs VF FLR cleanups and notifies the GuC FW when finished
*/
static bool needs_dispatch_flr(struct xe_device *xe)
@@ -197,19 +1194,41 @@ static void pf_handle_vf_flr(struct xe_gt *gt, u32 vfid)
if (needs_dispatch_flr(xe)) {
for_each_gt(gtit, xe, gtid)
- pf_send_vf_flr_start(gtit, vfid);
+ pf_enter_vf_flr_wip(gtit, vfid);
} else {
- pf_send_vf_flr_start(gt, vfid);
+ pf_enter_vf_flr_wip(gt, vfid);
}
}
static void pf_handle_vf_flr_done(struct xe_gt *gt, u32 vfid)
{
- pf_send_vf_flr_finish(gt, vfid);
+ if (!pf_exit_vf_flr_wait_guc(gt, vfid)) {
+ xe_gt_sriov_dbg(gt, "Received out of order 'VF%u FLR done'\n", vfid);
+ pf_enter_vf_mismatch(gt, vfid);
+ return;
+ }
+
+ pf_enter_vf_flr_guc_done(gt, vfid);
+}
+
+static void pf_handle_vf_pause_done(struct xe_gt *gt, u32 vfid)
+{
+ if (!pf_exit_pause_wait_guc(gt, vfid)) {
+ xe_gt_sriov_dbg(gt, "Received out of order 'VF%u PAUSE done'\n", vfid);
+ pf_enter_vf_mismatch(gt, vfid);
+ return;
+ }
+
+ pf_enter_vf_pause_guc_done(gt, vfid);
}
static int pf_handle_vf_event(struct xe_gt *gt, u32 vfid, u32 eventid)
{
+ xe_gt_sriov_dbg_verbose(gt, "received VF%u event %#x\n", vfid, eventid);
+
+ if (vfid > xe_gt_sriov_pf_get_totalvfs(gt))
+ return -EPROTO;
+
switch (eventid) {
case GUC_PF_NOTIFY_VF_FLR:
pf_handle_vf_flr(gt, vfid);
@@ -218,6 +1237,7 @@ static int pf_handle_vf_event(struct xe_gt *gt, u32 vfid, u32 eventid)
pf_handle_vf_flr_done(gt, vfid);
break;
case GUC_PF_NOTIFY_VF_PAUSE_DONE:
+ pf_handle_vf_pause_done(gt, vfid);
break;
case GUC_PF_NOTIFY_VF_FIXUP_DONE:
break;
@@ -276,3 +1296,159 @@ int xe_gt_sriov_pf_control_process_guc2pf(struct xe_gt *gt, const u32 *msg, u32
return vfid ? pf_handle_vf_event(gt, vfid, eventid) : pf_handle_pf_event(gt, eventid);
}
+
+static bool pf_process_vf_state_machine(struct xe_gt *gt, unsigned int vfid)
+{
+ if (pf_exit_vf_flr_send_start(gt, vfid))
+ return true;
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_FLR_WAIT_GUC)) {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u in %s\n", vfid,
+ control_bit_to_string(XE_GT_SRIOV_STATE_FLR_WAIT_GUC));
+ return false;
+ }
+
+ if (pf_exit_vf_flr_guc_done(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_flr_reset_config(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_flr_reset_data(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_flr_reset_mmio(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_flr_send_finish(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_stop_send_stop(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_pause_send_pause(gt, vfid))
+ return true;
+
+ if (pf_check_vf_state(gt, vfid, XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC)) {
+ xe_gt_sriov_dbg_verbose(gt, "VF%u in %s\n", vfid,
+ control_bit_to_string(XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC));
+ return true;
+ }
+
+ if (pf_exit_vf_pause_guc_done(gt, vfid))
+ return true;
+
+ if (pf_exit_vf_resume_send_resume(gt, vfid))
+ return true;
+
+ return false;
+}
+
+static unsigned int pf_control_state_index(struct xe_gt *gt,
+ struct xe_gt_sriov_control_state *cs)
+{
+ return container_of(cs, struct xe_gt_sriov_metadata, control) - gt->sriov.pf.vfs;
+}
+
+static void pf_worker_find_work(struct xe_gt *gt)
+{
+ struct xe_gt_sriov_pf_control *pfc = &gt->sriov.pf.control;
+ struct xe_gt_sriov_control_state *cs;
+ unsigned int vfid;
+ bool empty;
+ bool more;
+
+ spin_lock(&pfc->lock);
+ cs = list_first_entry_or_null(&pfc->list, struct xe_gt_sriov_control_state, link);
+ if (cs)
+ list_del_init(&cs->link);
+ empty = list_empty(&pfc->list);
+ spin_unlock(&pfc->lock);
+
+ if (!cs)
+ return;
+
+ /* VF metadata structures are indexed by the VFID */
+ vfid = pf_control_state_index(gt, cs);
+ xe_gt_assert(gt, vfid <= xe_gt_sriov_pf_get_totalvfs(gt));
+
+ more = pf_process_vf_state_machine(gt, vfid);
+ if (more)
+ pf_queue_vf(gt, vfid);
+ else if (!empty)
+ pf_queue_control_worker(gt);
+}
+
+static void control_worker_func(struct work_struct *w)
+{
+ struct xe_gt *gt = container_of(w, struct xe_gt, sriov.pf.control.worker);
+
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ pf_worker_find_work(gt);
+}
+
+static void pf_stop_worker(struct xe_gt *gt)
+{
+ xe_gt_assert(gt, IS_SRIOV_PF(gt_to_xe(gt)));
+ cancel_work_sync(&gt->sriov.pf.control.worker);
+}
+
+static void control_fini_action(struct drm_device *dev, void *data)
+{
+ struct xe_gt *gt = data;
+
+ pf_stop_worker(gt);
+}
+
+/**
+ * xe_gt_sriov_pf_control_init() - Initialize PF's control data.
+ * @gt: the &xe_gt
+ *
+ * This function is for PF only.
+ *
+ * Return: 0 on success or a negative error code on failure.
+ */
+int xe_gt_sriov_pf_control_init(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int n, totalvfs;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ totalvfs = xe_sriov_pf_get_totalvfs(xe);
+ for (n = 0; n <= totalvfs; n++) {
+ struct xe_gt_sriov_control_state *cs = pf_pick_vf_control(gt, n);
+
+ init_completion(&cs->done);
+ INIT_LIST_HEAD(&cs->link);
+ }
+
+ spin_lock_init(&gt->sriov.pf.control.lock);
+ INIT_LIST_HEAD(&gt->sriov.pf.control.list);
+ INIT_WORK(&gt->sriov.pf.control.worker, control_worker_func);
+
+ return drmm_add_action_or_reset(&xe->drm, control_fini_action, gt);
+}
+
+/**
+ * xe_gt_sriov_pf_control_restart() - Restart SR-IOV control data after a GT reset.
+ * @gt: the &xe_gt
+ *
+ * Any per-VF status maintained by the PF or any ongoing VF control activity
+ * performed by the PF must be reset or cancelled when the GT is reset.
+ *
+ * This function is for PF only.
+ */
+void xe_gt_sriov_pf_control_restart(struct xe_gt *gt)
+{
+ struct xe_device *xe = gt_to_xe(gt);
+ unsigned int n, totalvfs;
+
+ xe_gt_assert(gt, IS_SRIOV_PF(xe));
+
+ pf_stop_worker(gt);
+
+ totalvfs = xe_sriov_pf_get_totalvfs(xe);
+ for (n = 1; n <= totalvfs; n++)
+ pf_enter_vf_ready(gt, n);
+}
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
index 405d1586f991..c85e64f099cc 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control.h
@@ -11,6 +11,9 @@
struct xe_gt;
+int xe_gt_sriov_pf_control_init(struct xe_gt *gt);
+void xe_gt_sriov_pf_control_restart(struct xe_gt *gt);
+
int xe_gt_sriov_pf_control_pause_vf(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_control_resume_vf(struct xe_gt *gt, unsigned int vfid);
int xe_gt_sriov_pf_control_stop_vf(struct xe_gt *gt, unsigned int vfid);
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
new file mode 100644
index 000000000000..11830aafea45
--- /dev/null
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_control_types.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2024 Intel Corporation
+ */
+
+#ifndef _XE_GT_SRIOV_PF_CONTROL_TYPES_H_
+#define _XE_GT_SRIOV_PF_CONTROL_TYPES_H_
+
+#include <linux/completion.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue_types.h>
+
+/**
+ * enum xe_gt_sriov_control_bits - Various bits used by the PF to represent a VF state
+ *
+ * @XE_GT_SRIOV_STATE_WIP: indicates that some operations are in progress.
+ * @XE_GT_SRIOV_STATE_FLR_WIP: indicates that a VF FLR is in progress.
+ * @XE_GT_SRIOV_STATE_FLR_SEND_START: indicates that the PF wants to send a FLR START command.
+ * @XE_GT_SRIOV_STATE_FLR_WAIT_GUC: indicates that the PF awaits for a response from the GuC.
+ * @XE_GT_SRIOV_STATE_FLR_GUC_DONE: indicates that the PF has received a response from the GuC.
+ * @XE_GT_SRIOV_STATE_FLR_RESET_CONFIG: indicates that the PF needs to clear VF's resources.
+ * @XE_GT_SRIOV_STATE_FLR_RESET_DATA: indicates that the PF needs to clear VF's data.
+ * @XE_GT_SRIOV_STATE_FLR_RESET_MMIO: indicates that the PF needs to reset VF's registers.
+ * @XE_GT_SRIOV_STATE_FLR_SEND_FINISH: indicates that the PF wants to send a FLR FINISH message.
+ * @XE_GT_SRIOV_STATE_FLR_FAILED: indicates that VF FLR sequence failed.
+ * @XE_GT_SRIOV_STATE_PAUSE_WIP: indicates that a VF pause operation is in progress.
+ * @XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE: indicates that the PF is about to send a PAUSE command.
+ * @XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC: indicates that the PF awaits for a response from the GuC.
+ * @XE_GT_SRIOV_STATE_PAUSE_GUC_DONE: indicates that the PF has received a response from the GuC.
+ * @XE_GT_SRIOV_STATE_PAUSE_FAILED: indicates that a VF pause operation has failed.
+ * @XE_GT_SRIOV_STATE_PAUSED: indicates that the VF is paused.
+ * @XE_GT_SRIOV_STATE_RESUME_WIP: indicates the a VF resume operation is in progress.
+ * @XE_GT_SRIOV_STATE_RESUME_SEND_RESUME: indicates that the PF is about to send RESUME command.
+ * @XE_GT_SRIOV_STATE_RESUME_FAILED: indicates that a VF resume operation has failed.
+ * @XE_GT_SRIOV_STATE_RESUMED: indicates that the VF was resumed.
+ * @XE_GT_SRIOV_STATE_STOP_WIP: indicates that a VF stop operation is in progress.
+ * @XE_GT_SRIOV_STATE_STOP_SEND_STOP: indicates that the PF wants to send a STOP command.
+ * @XE_GT_SRIOV_STATE_STOP_FAILED: indicates that the VF stop operation has failed
+ * @XE_GT_SRIOV_STATE_STOPPED: indicates that the VF was stopped.
+ * @XE_GT_SRIOV_STATE_MISMATCH: indicates that the PF has detected a VF state mismatch.
+ */
+enum xe_gt_sriov_control_bits {
+ XE_GT_SRIOV_STATE_WIP = 1,
+
+ XE_GT_SRIOV_STATE_FLR_WIP,
+ XE_GT_SRIOV_STATE_FLR_SEND_START,
+ XE_GT_SRIOV_STATE_FLR_WAIT_GUC,
+ XE_GT_SRIOV_STATE_FLR_GUC_DONE,
+ XE_GT_SRIOV_STATE_FLR_RESET_CONFIG,
+ XE_GT_SRIOV_STATE_FLR_RESET_DATA,
+ XE_GT_SRIOV_STATE_FLR_RESET_MMIO,
+ XE_GT_SRIOV_STATE_FLR_SEND_FINISH,
+ XE_GT_SRIOV_STATE_FLR_FAILED,
+
+ XE_GT_SRIOV_STATE_PAUSE_WIP,
+ XE_GT_SRIOV_STATE_PAUSE_SEND_PAUSE,
+ XE_GT_SRIOV_STATE_PAUSE_WAIT_GUC,
+ XE_GT_SRIOV_STATE_PAUSE_GUC_DONE,
+ XE_GT_SRIOV_STATE_PAUSE_FAILED,
+ XE_GT_SRIOV_STATE_PAUSED,
+
+ XE_GT_SRIOV_STATE_RESUME_WIP,
+ XE_GT_SRIOV_STATE_RESUME_SEND_RESUME,
+ XE_GT_SRIOV_STATE_RESUME_FAILED,
+ XE_GT_SRIOV_STATE_RESUMED,
+
+ XE_GT_SRIOV_STATE_STOP_WIP,
+ XE_GT_SRIOV_STATE_STOP_SEND_STOP,
+ XE_GT_SRIOV_STATE_STOP_FAILED,
+ XE_GT_SRIOV_STATE_STOPPED,
+
+ XE_GT_SRIOV_STATE_MISMATCH = BITS_PER_LONG - 1,
+};
+
+/**
+ * struct xe_gt_sriov_control_state - GT-level per-VF control state.
+ *
+ * Used by the PF driver to maintain per-VF control data.
+ */
+struct xe_gt_sriov_control_state {
+ /** @state: VF state bits */
+ unsigned long state;
+
+ /** @done: completion of async operations */
+ struct completion done;
+
+ /** @link: link into worker list */
+ struct list_head link;
+};
+
+/**
+ * struct xe_gt_sriov_pf_control - GT-level control data.
+ *
+ * Used by the PF driver to maintain its data.
+ */
+struct xe_gt_sriov_pf_control {
+ /** @worker: worker that executes a VF operations */
+ struct work_struct worker;
+
+ /** @list: list of VF entries that have a pending work */
+ struct list_head list;
+
+ /** @lock: protects VF pending list */
+ spinlock_t lock;
+};
+
+#endif
diff --git a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
index 40cbaea3ef44..28e1b130bf87 100644
--- a/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_sriov_pf_types.h
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include "xe_gt_sriov_pf_config_types.h"
+#include "xe_gt_sriov_pf_control_types.h"
#include "xe_gt_sriov_pf_monitor_types.h"
#include "xe_gt_sriov_pf_policy_types.h"
#include "xe_gt_sriov_pf_service_types.h"
@@ -23,6 +24,9 @@ struct xe_gt_sriov_metadata {
/** @monitor: per-VF monitoring data. */
struct xe_gt_sriov_monitor monitor;
+ /** @control: per-VF control data. */
+ struct xe_gt_sriov_control_state control;
+
/** @version: negotiated VF/PF ABI version */
struct xe_gt_sriov_pf_service_version version;
};
@@ -30,12 +34,14 @@ struct xe_gt_sriov_metadata {
/**
* struct xe_gt_sriov_pf - GT level PF virtualization data.
* @service: service data.
+ * @control: control data.
* @policy: policy data.
* @spare: PF-only provisioning configuration.
* @vfs: metadata for all VFs.
*/
struct xe_gt_sriov_pf {
struct xe_gt_sriov_pf_service service;
+ struct xe_gt_sriov_pf_control control;
struct xe_gt_sriov_pf_policy policy;
struct xe_gt_sriov_spare_config spare;
struct xe_gt_sriov_metadata *vfs;
diff --git a/drivers/gpu/drm/xe/xe_gt_types.h b/drivers/gpu/drm/xe/xe_gt_types.h
index 31946d7fe701..3d1c51de0268 100644
--- a/drivers/gpu/drm/xe/xe_gt_types.h
+++ b/drivers/gpu/drm/xe/xe_gt_types.h
@@ -329,12 +329,6 @@ struct xe_gt {
/** @eclass: per hardware engine class interface on the GT */
struct xe_hw_engine_class_intf eclass[XE_ENGINE_CLASS_MAX];
- /** @pcode: GT's PCODE */
- struct {
- /** @pcode.lock: protecting GT's PCODE mailbox data */
- struct mutex lock;
- } pcode;
-
/** @sysfs: sysfs' kobj used by xe_gt_sysfs */
struct kobject *sysfs;
diff --git a/drivers/gpu/drm/xe/xe_guc_pc.c b/drivers/gpu/drm/xe/xe_guc_pc.c
index def503abeed5..034b29984d5e 100644
--- a/drivers/gpu/drm/xe/xe_guc_pc.c
+++ b/drivers/gpu/drm/xe/xe_guc_pc.c
@@ -915,7 +915,7 @@ static void pc_init_pcode_freq(struct xe_guc_pc *pc)
u32 min = DIV_ROUND_CLOSEST(pc->rpn_freq, GT_FREQUENCY_MULTIPLIER);
u32 max = DIV_ROUND_CLOSEST(pc->rp0_freq, GT_FREQUENCY_MULTIPLIER);
- XE_WARN_ON(xe_pcode_init_min_freq_table(pc_to_gt(pc), min, max));
+ XE_WARN_ON(xe_pcode_init_min_freq_table(gt_to_tile(pc_to_gt(pc)), min, max));
}
static int pc_init_freqs(struct xe_guc_pc *pc)
diff --git a/drivers/gpu/drm/xe/xe_hw_engine.c b/drivers/gpu/drm/xe/xe_hw_engine.c
index 18980238a2ea..c9c3beb3ce8d 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine.c
+++ b/drivers/gpu/drm/xe/xe_hw_engine.c
@@ -8,7 +8,7 @@
#include <linux/nospec.h>
#include <drm/drm_managed.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
@@ -273,7 +273,6 @@ static void hw_engine_fini(void *arg)
if (hwe->exl_port)
xe_execlist_port_destroy(hwe->exl_port);
- xe_lrc_put(hwe->kernel_lrc);
hwe->gt = NULL;
}
@@ -558,21 +557,13 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
goto err_name;
}
- hwe->kernel_lrc = xe_lrc_create(hwe, NULL, SZ_16K);
- if (IS_ERR(hwe->kernel_lrc)) {
- err = PTR_ERR(hwe->kernel_lrc);
- goto err_hwsp;
- }
-
if (!xe_device_uc_enabled(xe)) {
hwe->exl_port = xe_execlist_port_create(xe, hwe);
if (IS_ERR(hwe->exl_port)) {
err = PTR_ERR(hwe->exl_port);
- goto err_kernel_lrc;
+ goto err_hwsp;
}
- }
-
- if (xe_device_uc_enabled(xe)) {
+ } else {
/* GSCCS has a special interrupt for reset */
if (hwe->class == XE_ENGINE_CLASS_OTHER)
hwe->irq_handler = xe_gsc_hwe_irq_handler;
@@ -587,8 +578,6 @@ static int hw_engine_init(struct xe_gt *gt, struct xe_hw_engine *hwe,
return devm_add_action_or_reset(xe->drm.dev, hw_engine_fini, hwe);
-err_kernel_lrc:
- xe_lrc_put(hwe->kernel_lrc);
err_hwsp:
xe_bo_unpin_map_no_vm(hwe->hwsp);
err_name:
diff --git a/drivers/gpu/drm/xe/xe_hw_engine_types.h b/drivers/gpu/drm/xe/xe_hw_engine_types.h
index 39f24012d0f4..8be6d420ece4 100644
--- a/drivers/gpu/drm/xe/xe_hw_engine_types.h
+++ b/drivers/gpu/drm/xe/xe_hw_engine_types.h
@@ -136,8 +136,6 @@ struct xe_hw_engine {
enum xe_force_wake_domains domain;
/** @hwsp: hardware status page buffer object */
struct xe_bo *hwsp;
- /** @kernel_lrc: Kernel LRC (should be replaced /w an xe_engine) */
- struct xe_lrc *kernel_lrc;
/** @exl_port: execlists port */
struct xe_execlist_port *exl_port;
/** @fence_irq: fence IRQ to run when a hw engine IRQ is received */
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 832ea81faeee..aa11728e7e79 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -12,7 +12,6 @@
#include "regs/xe_mchbar_regs.h"
#include "regs/xe_pcode_regs.h"
#include "xe_device.h"
-#include "xe_gt.h"
#include "xe_hwmon.h"
#include "xe_mmio.h"
#include "xe_pcode.h"
@@ -65,8 +64,8 @@ struct xe_hwmon_energy_info {
struct xe_hwmon {
/** @hwmon_dev: hwmon device for xe */
struct device *hwmon_dev;
- /** @gt: primary gt */
- struct xe_gt *gt;
+ /** @xe: Xe device */
+ struct xe_device *xe;
/** @hwmon_lock: lock for rw attributes*/
struct mutex hwmon_lock;
/** @scl_shift_power: pkg power unit */
@@ -82,7 +81,7 @@ struct xe_hwmon {
static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg hwmon_reg,
int channel)
{
- struct xe_device *xe = gt_to_xe(hwmon->gt);
+ struct xe_device *xe = hwmon->xe;
switch (hwmon_reg) {
case REG_PKG_RAPL_LIMIT:
@@ -148,8 +147,9 @@ static struct xe_reg xe_hwmon_get_reg(struct xe_hwmon *hwmon, enum xe_hwmon_reg
static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *value)
{
u64 reg_val, min, max;
- struct xe_device *xe = gt_to_xe(hwmon->gt);
+ struct xe_device *xe = hwmon->xe;
struct xe_reg rapl_limit, pkg_power_sku;
+ struct xe_gt *mmio = xe_root_mmio_gt(xe);
rapl_limit = xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, channel);
pkg_power_sku = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
@@ -166,7 +166,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *v
mutex_lock(&hwmon->hwmon_lock);
- reg_val = xe_mmio_read32(hwmon->gt, rapl_limit);
+ reg_val = xe_mmio_read32(mmio, rapl_limit);
/* Check if PL1 limit is disabled */
if (!(reg_val & PKG_PWR_LIM_1_EN)) {
*value = PL1_DISABLE;
@@ -176,7 +176,7 @@ static void xe_hwmon_power_max_read(struct xe_hwmon *hwmon, int channel, long *v
reg_val = REG_FIELD_GET(PKG_PWR_LIM_1, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
- reg_val = xe_mmio_read64_2x32(hwmon->gt, pkg_power_sku);
+ reg_val = xe_mmio_read64_2x32(mmio, pkg_power_sku);
min = REG_FIELD_GET(PKG_MIN_PWR, reg_val);
min = mul_u64_u32_shr(min, SF_POWER, hwmon->scl_shift_power);
max = REG_FIELD_GET(PKG_MAX_PWR, reg_val);
@@ -190,6 +190,7 @@ unlock:
static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long value)
{
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
int ret = 0;
u64 reg_val;
struct xe_reg rapl_limit;
@@ -200,10 +201,10 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va
/* Disable PL1 limit and verify, as limit cannot be disabled on all platforms */
if (value == PL1_DISABLE) {
- reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN, 0);
- reg_val = xe_mmio_read32(hwmon->gt, rapl_limit);
+ reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN, 0);
+ reg_val = xe_mmio_read32(mmio, rapl_limit);
if (reg_val & PKG_PWR_LIM_1_EN) {
- drm_warn(&gt_to_xe(hwmon->gt)->drm, "PL1 disable is not supported!\n");
+ drm_warn(&hwmon->xe->drm, "PL1 disable is not supported!\n");
ret = -EOPNOTSUPP;
}
goto unlock;
@@ -212,7 +213,7 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va
/* Computation in 64-bits to avoid overflow. Round to nearest. */
reg_val = DIV_ROUND_CLOSEST_ULL((u64)value << hwmon->scl_shift_power, SF_POWER);
reg_val = PKG_PWR_LIM_1_EN | REG_FIELD_PREP(PKG_PWR_LIM_1, reg_val);
- reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
+ reg_val = xe_mmio_rmw32(mmio, rapl_limit, PKG_PWR_LIM_1_EN | PKG_PWR_LIM_1, reg_val);
unlock:
mutex_unlock(&hwmon->hwmon_lock);
@@ -221,6 +222,7 @@ unlock:
static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, long *value)
{
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
struct xe_reg reg = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU, channel);
u64 reg_val;
@@ -229,7 +231,7 @@ static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, l
* for this register can be skipped.
* See xe_hwmon_power_is_visible.
*/
- reg_val = xe_mmio_read32(hwmon->gt, reg);
+ reg_val = xe_mmio_read32(mmio, reg);
reg_val = REG_FIELD_GET(PKG_TDP, reg_val);
*value = mul_u64_u32_shr(reg_val, SF_POWER, hwmon->scl_shift_power);
}
@@ -257,11 +259,12 @@ static void xe_hwmon_power_rated_max_read(struct xe_hwmon *hwmon, int channel, l
static void
xe_hwmon_energy_get(struct xe_hwmon *hwmon, int channel, long *energy)
{
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
struct xe_hwmon_energy_info *ei = &hwmon->ei[channel];
u64 reg_val;
- reg_val = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
- channel));
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_ENERGY_STATUS,
+ channel));
if (reg_val >= ei->reg_val_prev)
ei->accum_energy += reg_val - ei->reg_val_prev;
@@ -279,19 +282,20 @@ xe_hwmon_power_max_interval_show(struct device *dev, struct device_attribute *at
char *buf)
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
u32 x, y, x_w = 2; /* 2 bits */
u64 r, tau4, out;
int sensor_index = to_sensor_dev_attr(attr)->index;
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
mutex_lock(&hwmon->hwmon_lock);
- r = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index));
+ r = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index));
mutex_unlock(&hwmon->hwmon_lock);
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
x = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_X, r);
y = REG_FIELD_GET(PKG_PWR_LIM_1_TIME_Y, r);
@@ -319,6 +323,7 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
const char *buf, size_t count)
{
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
u32 x, y, rxy, x_w = 2; /* 2 bits */
u64 tau4, r, max_win;
unsigned long val;
@@ -371,16 +376,16 @@ xe_hwmon_power_max_interval_store(struct device *dev, struct device_attribute *a
rxy = REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_X, x) | REG_FIELD_PREP(PKG_PWR_LIM_1_TIME_Y, y);
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
mutex_lock(&hwmon->hwmon_lock);
- r = xe_mmio_rmw32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index),
+ r = xe_mmio_rmw32(mmio, xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, sensor_index),
PKG_PWR_LIM_1_TIME, rxy);
mutex_unlock(&hwmon->hwmon_lock);
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
return count;
}
@@ -406,11 +411,11 @@ static umode_t xe_hwmon_attributes_visible(struct kobject *kobj,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret = 0;
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
ret = xe_reg_is_valid(xe_hwmon_get_reg(hwmon, REG_PKG_RAPL_LIMIT, index)) ? attr->mode : 0;
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
return ret;
}
@@ -435,22 +440,26 @@ static const struct hwmon_channel_info * const hwmon_info[] = {
};
/* I1 is exposed as power_crit or as curr_crit depending on bit 31 */
-static int xe_hwmon_pcode_read_i1(struct xe_gt *gt, u32 *uval)
+static int xe_hwmon_pcode_read_i1(const struct xe_hwmon *hwmon, u32 *uval)
{
+ struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
+
/* Avoid Illegal Subcommand error */
- if (gt_to_xe(gt)->info.platform == XE_DG2)
+ if (hwmon->xe->info.platform == XE_DG2)
return -ENXIO;
- return xe_pcode_read(gt, PCODE_MBOX(PCODE_POWER_SETUP,
+ return xe_pcode_read(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
POWER_SETUP_SUBCOMMAND_READ_I1, 0),
uval, NULL);
}
-static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
+static int xe_hwmon_pcode_write_i1(const struct xe_hwmon *hwmon, u32 uval)
{
- return xe_pcode_write(gt, PCODE_MBOX(PCODE_POWER_SETUP,
+ struct xe_tile *root_tile = xe_device_get_root_tile(hwmon->xe);
+
+ return xe_pcode_write(root_tile, PCODE_MBOX(PCODE_POWER_SETUP,
POWER_SETUP_SUBCOMMAND_WRITE_I1, 0),
- uval);
+ (uval & POWER_SETUP_I1_DATA_MASK));
}
static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
@@ -461,7 +470,7 @@ static int xe_hwmon_power_curr_crit_read(struct xe_hwmon *hwmon, int channel,
mutex_lock(&hwmon->hwmon_lock);
- ret = xe_hwmon_pcode_read_i1(hwmon->gt, &uval);
+ ret = xe_hwmon_pcode_read_i1(hwmon, &uval);
if (ret)
goto unlock;
@@ -481,7 +490,7 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
mutex_lock(&hwmon->hwmon_lock);
uval = DIV_ROUND_CLOSEST_ULL(value << POWER_SETUP_I1_SHIFT, scale_factor);
- ret = xe_hwmon_pcode_write_i1(hwmon->gt, uval);
+ ret = xe_hwmon_pcode_write_i1(hwmon, uval);
mutex_unlock(&hwmon->hwmon_lock);
return ret;
@@ -489,9 +498,10 @@ static int xe_hwmon_power_curr_crit_write(struct xe_hwmon *hwmon, int channel,
static void xe_hwmon_get_voltage(struct xe_hwmon *hwmon, int channel, long *value)
{
+ struct xe_gt *mmio = xe_root_mmio_gt(hwmon->xe);
u64 reg_val;
- reg_val = xe_mmio_read32(hwmon->gt, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel));
+ reg_val = xe_mmio_read32(mmio, xe_hwmon_get_reg(hwmon, REG_GT_PERF_STATUS, channel));
/* HW register value in units of 2.5 millivolt */
*value = DIV_ROUND_CLOSEST(REG_FIELD_GET(VOLTAGE_MASK, reg_val) * 2500, SF_VOLTAGE);
}
@@ -510,7 +520,7 @@ xe_hwmon_power_is_visible(struct xe_hwmon *hwmon, u32 attr, int channel)
channel)) ? 0444 : 0;
case hwmon_power_crit:
if (channel == CHANNEL_PKG)
- return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
+ return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
!(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
break;
case hwmon_power_label:
@@ -563,10 +573,10 @@ xe_hwmon_curr_is_visible(const struct xe_hwmon *hwmon, u32 attr, int channel)
switch (attr) {
case hwmon_curr_crit:
- return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
+ return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0644;
case hwmon_curr_label:
- return (xe_hwmon_pcode_read_i1(hwmon->gt, &uval) ||
+ return (xe_hwmon_pcode_read_i1(hwmon, &uval) ||
(uval & POWER_SETUP_I1_WATTS)) ? 0 : 0444;
break;
default:
@@ -654,7 +664,7 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
struct xe_hwmon *hwmon = (struct xe_hwmon *)drvdata;
int ret;
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
switch (type) {
case hwmon_power:
@@ -674,7 +684,7 @@ xe_hwmon_is_visible(const void *drvdata, enum hwmon_sensor_types type,
break;
}
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
return ret;
}
@@ -686,7 +696,7 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret;
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
switch (type) {
case hwmon_power:
@@ -706,7 +716,7 @@ xe_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr,
break;
}
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
return ret;
}
@@ -718,7 +728,7 @@ xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
struct xe_hwmon *hwmon = dev_get_drvdata(dev);
int ret;
- xe_pm_runtime_get(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_get(hwmon->xe);
switch (type) {
case hwmon_power:
@@ -732,7 +742,7 @@ xe_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr,
break;
}
- xe_pm_runtime_put(gt_to_xe(hwmon->gt));
+ xe_pm_runtime_put(hwmon->xe);
return ret;
}
@@ -771,6 +781,7 @@ static const struct hwmon_chip_info hwmon_chip_info = {
static void
xe_hwmon_get_preregistration_info(struct xe_device *xe)
{
+ struct xe_gt *mmio = xe_root_mmio_gt(xe);
struct xe_hwmon *hwmon = xe->hwmon;
long energy;
u64 val_sku_unit = 0;
@@ -783,7 +794,7 @@ xe_hwmon_get_preregistration_info(struct xe_device *xe)
*/
pkg_power_sku_unit = xe_hwmon_get_reg(hwmon, REG_PKG_POWER_SKU_UNIT, 0);
if (xe_reg_is_valid(pkg_power_sku_unit)) {
- val_sku_unit = xe_mmio_read32(hwmon->gt, pkg_power_sku_unit);
+ val_sku_unit = xe_mmio_read32(mmio, pkg_power_sku_unit);
hwmon->scl_shift_power = REG_FIELD_GET(PKG_PWR_UNIT, val_sku_unit);
hwmon->scl_shift_energy = REG_FIELD_GET(PKG_ENERGY_UNIT, val_sku_unit);
hwmon->scl_shift_time = REG_FIELD_GET(PKG_TIME_UNIT, val_sku_unit);
@@ -828,8 +839,8 @@ void xe_hwmon_register(struct xe_device *xe)
if (devm_add_action_or_reset(dev, xe_hwmon_mutex_destroy, hwmon))
return;
- /* primary GT to access device level properties */
- hwmon->gt = xe->tiles[0].primary_gt;
+ /* There's only one instance of hwmon per device */
+ hwmon->xe = xe;
xe_hwmon_get_preregistration_info(xe);
diff --git a/drivers/gpu/drm/xe/xe_migrate.c b/drivers/gpu/drm/xe/xe_migrate.c
index cbf54be224c9..cfd31ae49cc1 100644
--- a/drivers/gpu/drm/xe/xe_migrate.c
+++ b/drivers/gpu/drm/xe/xe_migrate.c
@@ -10,7 +10,7 @@
#include <drm/drm_managed.h>
#include <drm/ttm/ttm_tt.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <generated/xe_wa_oob.h>
diff --git a/drivers/gpu/drm/xe/xe_oa.c b/drivers/gpu/drm/xe/xe_oa.c
index 4d4541e0b24c..63286ed8457f 100644
--- a/drivers/gpu/drm/xe/xe_oa.c
+++ b/drivers/gpu/drm/xe/xe_oa.c
@@ -10,7 +10,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_managed.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "abi/guc_actions_slpc_abi.h"
#include "instructions/xe_mi_commands.h"
diff --git a/drivers/gpu/drm/xe/xe_oa_types.h b/drivers/gpu/drm/xe/xe_oa_types.h
index 540c3ec53a6d..8862eca73fbe 100644
--- a/drivers/gpu/drm/xe/xe_oa_types.h
+++ b/drivers/gpu/drm/xe/xe_oa_types.h
@@ -11,7 +11,7 @@
#include <linux/mutex.h>
#include <linux/types.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "regs/xe_reg_defs.h"
#include "xe_hw_engine_types.h"
diff --git a/drivers/gpu/drm/xe/xe_observation.c b/drivers/gpu/drm/xe/xe_observation.c
index a78c92a44ec2..8ec1b84cbb9e 100644
--- a/drivers/gpu/drm/xe/xe_observation.c
+++ b/drivers/gpu/drm/xe/xe_observation.c
@@ -6,7 +6,7 @@
#include <linux/errno.h>
#include <linux/sysctl.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_oa.h"
#include "xe_observation.h"
diff --git a/drivers/gpu/drm/xe/xe_pat.c b/drivers/gpu/drm/xe/xe_pat.c
index 722278cc23fc..f291a1730024 100644
--- a/drivers/gpu/drm/xe/xe_pat.c
+++ b/drivers/gpu/drm/xe/xe_pat.c
@@ -5,7 +5,7 @@
#include "xe_pat.h"
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <generated/xe_wa_oob.h>
diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c
index f276194d9c4e..937c3e064f0d 100644
--- a/drivers/gpu/drm/xe/xe_pci.c
+++ b/drivers/gpu/drm/xe/xe_pci.c
@@ -793,7 +793,7 @@ static int xe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (IS_ERR(xe))
return PTR_ERR(xe);
- pci_set_drvdata(pdev, xe);
+ pci_set_drvdata(pdev, &xe->drm);
xe_pm_assert_unbounded_bridge(xe);
subplatform_desc = find_subplatform(xe, desc);
diff --git a/drivers/gpu/drm/xe/xe_pcode.c b/drivers/gpu/drm/xe/xe_pcode.c
index 9c4eefdf6642..7397d556996a 100644
--- a/drivers/gpu/drm/xe/xe_pcode.c
+++ b/drivers/gpu/drm/xe/xe_pcode.c
@@ -12,7 +12,6 @@
#include "xe_assert.h"
#include "xe_device.h"
-#include "xe_gt.h"
#include "xe_mmio.h"
#include "xe_pcode_api.h"
@@ -30,7 +29,7 @@
* - PCODE for display operations
*/
-static int pcode_mailbox_status(struct xe_gt *gt)
+static int pcode_mailbox_status(struct xe_tile *tile)
{
u32 err;
static const struct pcode_err_decode err_decode[] = {
@@ -45,9 +44,9 @@ static int pcode_mailbox_status(struct xe_gt *gt)
[PCODE_ERROR_MASK] = {-EPROTO, "Unknown"},
};
- err = xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_ERROR_MASK;
+ err = xe_mmio_read32(tile->primary_gt, PCODE_MAILBOX) & PCODE_ERROR_MASK;
if (err) {
- drm_err(&gt_to_xe(gt)->drm, "PCODE Mailbox failed: %d %s", err,
+ drm_err(&tile_to_xe(tile)->drm, "PCODE Mailbox failed: %d %s", err,
err_decode[err].str ?: "Unknown");
return err_decode[err].errno ?: -EPROTO;
}
@@ -55,84 +54,85 @@ static int pcode_mailbox_status(struct xe_gt *gt)
return 0;
}
-static int __pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
+static int __pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *data1,
unsigned int timeout_ms, bool return_data,
bool atomic)
{
+ struct xe_gt *mmio = tile->primary_gt;
int err;
- if (gt_to_xe(gt)->info.skip_pcode)
+ if (tile_to_xe(tile)->info.skip_pcode)
return 0;
- if ((xe_mmio_read32(gt, PCODE_MAILBOX) & PCODE_READY) != 0)
+ if ((xe_mmio_read32(mmio, PCODE_MAILBOX) & PCODE_READY) != 0)
return -EAGAIN;
- xe_mmio_write32(gt, PCODE_DATA0, *data0);
- xe_mmio_write32(gt, PCODE_DATA1, data1 ? *data1 : 0);
- xe_mmio_write32(gt, PCODE_MAILBOX, PCODE_READY | mbox);
+ xe_mmio_write32(mmio, PCODE_DATA0, *data0);
+ xe_mmio_write32(mmio, PCODE_DATA1, data1 ? *data1 : 0);
+ xe_mmio_write32(mmio, PCODE_MAILBOX, PCODE_READY | mbox);
- err = xe_mmio_wait32(gt, PCODE_MAILBOX, PCODE_READY, 0,
+ err = xe_mmio_wait32(mmio, PCODE_MAILBOX, PCODE_READY, 0,
timeout_ms * USEC_PER_MSEC, NULL, atomic);
if (err)
return err;
if (return_data) {
- *data0 = xe_mmio_read32(gt, PCODE_DATA0);
+ *data0 = xe_mmio_read32(mmio, PCODE_DATA0);
if (data1)
- *data1 = xe_mmio_read32(gt, PCODE_DATA1);
+ *data1 = xe_mmio_read32(mmio, PCODE_DATA1);
}
- return pcode_mailbox_status(gt);
+ return pcode_mailbox_status(tile);
}
-static int pcode_mailbox_rw(struct xe_gt *gt, u32 mbox, u32 *data0, u32 *data1,
+static int pcode_mailbox_rw(struct xe_tile *tile, u32 mbox, u32 *data0, u32 *data1,
unsigned int timeout_ms, bool return_data,
bool atomic)
{
- if (gt_to_xe(gt)->info.skip_pcode)
+ if (tile_to_xe(tile)->info.skip_pcode)
return 0;
- lockdep_assert_held(&gt->pcode.lock);
+ lockdep_assert_held(&tile->pcode.lock);
- return __pcode_mailbox_rw(gt, mbox, data0, data1, timeout_ms, return_data, atomic);
+ return __pcode_mailbox_rw(tile, mbox, data0, data1, timeout_ms, return_data, atomic);
}
-int xe_pcode_write_timeout(struct xe_gt *gt, u32 mbox, u32 data, int timeout)
+int xe_pcode_write_timeout(struct xe_tile *tile, u32 mbox, u32 data, int timeout)
{
int err;
- mutex_lock(&gt->pcode.lock);
- err = pcode_mailbox_rw(gt, mbox, &data, NULL, timeout, false, false);
- mutex_unlock(&gt->pcode.lock);
+ mutex_lock(&tile->pcode.lock);
+ err = pcode_mailbox_rw(tile, mbox, &data, NULL, timeout, false, false);
+ mutex_unlock(&tile->pcode.lock);
return err;
}
-int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1)
+int xe_pcode_read(struct xe_tile *tile, u32 mbox, u32 *val, u32 *val1)
{
int err;
- mutex_lock(&gt->pcode.lock);
- err = pcode_mailbox_rw(gt, mbox, val, val1, 1, true, false);
- mutex_unlock(&gt->pcode.lock);
+ mutex_lock(&tile->pcode.lock);
+ err = pcode_mailbox_rw(tile, mbox, val, val1, 1, true, false);
+ mutex_unlock(&tile->pcode.lock);
return err;
}
-static int pcode_try_request(struct xe_gt *gt, u32 mbox,
+static int pcode_try_request(struct xe_tile *tile, u32 mbox,
u32 request, u32 reply_mask, u32 reply,
u32 *status, bool atomic, int timeout_us, bool locked)
{
int slept, wait = 10;
- xe_gt_assert(gt, timeout_us > 0);
+ xe_tile_assert(tile, timeout_us > 0);
for (slept = 0; slept < timeout_us; slept += wait) {
if (locked)
- *status = pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
+ *status = pcode_mailbox_rw(tile, mbox, &request, NULL, 1, true,
atomic);
else
- *status = __pcode_mailbox_rw(gt, mbox, &request, NULL, 1, true,
+ *status = __pcode_mailbox_rw(tile, mbox, &request, NULL, 1, true,
atomic);
if ((*status == 0) && ((request & reply_mask) == reply))
return 0;
@@ -149,7 +149,7 @@ static int pcode_try_request(struct xe_gt *gt, u32 mbox,
/**
* xe_pcode_request - send PCODE request until acknowledgment
- * @gt: gt
+ * @tile: tile
* @mbox: PCODE mailbox ID the request is targeted for
* @request: request ID
* @reply_mask: mask used to check for request acknowledgment
@@ -166,17 +166,17 @@ static int pcode_try_request(struct xe_gt *gt, u32 mbox,
* Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
* other error as reported by PCODE.
*/
-int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
- u32 reply_mask, u32 reply, int timeout_base_ms)
+int xe_pcode_request(struct xe_tile *tile, u32 mbox, u32 request,
+ u32 reply_mask, u32 reply, int timeout_base_ms)
{
u32 status;
int ret;
- xe_gt_assert(gt, timeout_base_ms <= 3);
+ xe_tile_assert(tile, timeout_base_ms <= 3);
- mutex_lock(&gt->pcode.lock);
+ mutex_lock(&tile->pcode.lock);
- ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
+ ret = pcode_try_request(tile, mbox, request, reply_mask, reply, &status,
false, timeout_base_ms * 1000, true);
if (!ret)
goto out;
@@ -191,20 +191,20 @@ int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
* requests, and for any quirks of the PCODE firmware that delays
* the request completion.
*/
- drm_err(&gt_to_xe(gt)->drm,
+ drm_err(&tile_to_xe(tile)->drm,
"PCODE timeout, retrying with preemption disabled\n");
preempt_disable();
- ret = pcode_try_request(gt, mbox, request, reply_mask, reply, &status,
+ ret = pcode_try_request(tile, mbox, request, reply_mask, reply, &status,
true, 50 * 1000, true);
preempt_enable();
out:
- mutex_unlock(&gt->pcode.lock);
+ mutex_unlock(&tile->pcode.lock);
return status ? status : ret;
}
/**
* xe_pcode_init_min_freq_table - Initialize PCODE's QOS frequency table
- * @gt: gt instance
+ * @tile: tile instance
* @min_gt_freq: Minimal (RPn) GT frequency in units of 50MHz.
* @max_gt_freq: Maximal (RP0) GT frequency in units of 50MHz.
*
@@ -227,30 +227,30 @@ out:
* - -EACCES, "PCODE Rejected"
* - -EPROTO, "Unknown"
*/
-int xe_pcode_init_min_freq_table(struct xe_gt *gt, u32 min_gt_freq,
+int xe_pcode_init_min_freq_table(struct xe_tile *tile, u32 min_gt_freq,
u32 max_gt_freq)
{
int ret;
u32 freq;
- if (!gt_to_xe(gt)->info.has_llc)
+ if (!tile_to_xe(tile)->info.has_llc)
return 0;
if (max_gt_freq <= min_gt_freq)
return -EINVAL;
- mutex_lock(&gt->pcode.lock);
+ mutex_lock(&tile->pcode.lock);
for (freq = min_gt_freq; freq <= max_gt_freq; freq++) {
u32 data = freq << PCODE_FREQ_RING_RATIO_SHIFT | freq;
- ret = pcode_mailbox_rw(gt, PCODE_WRITE_MIN_FREQ_TABLE,
+ ret = pcode_mailbox_rw(tile, PCODE_WRITE_MIN_FREQ_TABLE,
&data, NULL, 1, false, false);
if (ret)
goto unlock;
}
unlock:
- mutex_unlock(&gt->pcode.lock);
+ mutex_unlock(&tile->pcode.lock);
return ret;
}
@@ -270,7 +270,7 @@ unlock:
int xe_pcode_ready(struct xe_device *xe, bool locked)
{
u32 status, request = DGFX_GET_INIT_STATUS;
- struct xe_gt *gt = xe_root_mmio_gt(xe);
+ struct xe_tile *tile = xe_device_get_root_tile(xe);
int timeout_us = 180000000; /* 3 min */
int ret;
@@ -281,15 +281,15 @@ int xe_pcode_ready(struct xe_device *xe, bool locked)
return 0;
if (locked)
- mutex_lock(&gt->pcode.lock);
+ mutex_lock(&tile->pcode.lock);
- ret = pcode_try_request(gt, DGFX_PCODE_STATUS, request,
+ ret = pcode_try_request(tile, DGFX_PCODE_STATUS, request,
DGFX_INIT_STATUS_COMPLETE,
DGFX_INIT_STATUS_COMPLETE,
&status, false, timeout_us, locked);
if (locked)
- mutex_unlock(&gt->pcode.lock);
+ mutex_unlock(&tile->pcode.lock);
if (ret)
drm_err(&xe->drm,
@@ -300,14 +300,14 @@ int xe_pcode_ready(struct xe_device *xe, bool locked)
/**
* xe_pcode_init: initialize components of PCODE
- * @gt: gt instance
+ * @tile: tile instance
*
* This function initializes the xe_pcode component.
* To be called once only during probe.
*/
-void xe_pcode_init(struct xe_gt *gt)
+void xe_pcode_init(struct xe_tile *tile)
{
- drmm_mutex_init(&gt_to_xe(gt)->drm, &gt->pcode.lock);
+ drmm_mutex_init(&tile_to_xe(tile)->drm, &tile->pcode.lock);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_pcode.h b/drivers/gpu/drm/xe/xe_pcode.h
index 3f54c6d2a57d..ba33991d72a7 100644
--- a/drivers/gpu/drm/xe/xe_pcode.h
+++ b/drivers/gpu/drm/xe/xe_pcode.h
@@ -7,21 +7,21 @@
#define _XE_PCODE_H_
#include <linux/types.h>
-struct xe_gt;
+struct xe_tile;
struct xe_device;
-void xe_pcode_init(struct xe_gt *gt);
+void xe_pcode_init(struct xe_tile *tile);
int xe_pcode_probe_early(struct xe_device *xe);
int xe_pcode_ready(struct xe_device *xe, bool locked);
-int xe_pcode_init_min_freq_table(struct xe_gt *gt, u32 min_gt_freq,
+int xe_pcode_init_min_freq_table(struct xe_tile *tile, u32 min_gt_freq,
u32 max_gt_freq);
-int xe_pcode_read(struct xe_gt *gt, u32 mbox, u32 *val, u32 *val1);
-int xe_pcode_write_timeout(struct xe_gt *gt, u32 mbox, u32 val,
+int xe_pcode_read(struct xe_tile *tile, u32 mbox, u32 *val, u32 *val1);
+int xe_pcode_write_timeout(struct xe_tile *tile, u32 mbox, u32 val,
int timeout_ms);
-#define xe_pcode_write(gt, mbox, val) \
- xe_pcode_write_timeout(gt, mbox, val, 1)
+#define xe_pcode_write(tile, mbox, val) \
+ xe_pcode_write_timeout(tile, mbox, val, 1)
-int xe_pcode_request(struct xe_gt *gt, u32 mbox, u32 request,
+int xe_pcode_request(struct xe_tile *tile, u32 mbox, u32 request,
u32 reply_mask, u32 reply, int timeout_ms);
#define PCODE_MBOX(mbcmd, param1, param2)\
diff --git a/drivers/gpu/drm/xe/xe_pm.c b/drivers/gpu/drm/xe/xe_pm.c
index 2e2accd76fb2..e518557e0eec 100644
--- a/drivers/gpu/drm/xe/xe_pm.c
+++ b/drivers/gpu/drm/xe/xe_pm.c
@@ -79,7 +79,14 @@ static struct lockdep_map xe_pm_runtime_nod3cold_map = {
};
#endif
-static bool __maybe_unused xe_rpm_reclaim_safe(const struct xe_device *xe)
+/**
+ * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
+ * @xe: The xe device.
+ *
+ * Return: true if it is safe to runtime resume from reclaim context.
+ * false otherwise.
+ */
+bool xe_rpm_reclaim_safe(const struct xe_device *xe)
{
return !xe->d3cold.capable && !xe->info.has_sriov;
}
@@ -392,8 +399,6 @@ int xe_pm_runtime_suspend(struct xe_device *xe)
xe_display_pm_runtime_suspend(xe);
if (xe->d3cold.allowed) {
- xe_display_pm_suspend(xe, true);
-
err = xe_bo_evict_all(xe);
if (err)
goto out;
diff --git a/drivers/gpu/drm/xe/xe_pm.h b/drivers/gpu/drm/xe/xe_pm.h
index 9aef673b1c8a..998d1ed64556 100644
--- a/drivers/gpu/drm/xe/xe_pm.h
+++ b/drivers/gpu/drm/xe/xe_pm.h
@@ -31,6 +31,7 @@ bool xe_pm_runtime_resume_and_get(struct xe_device *xe);
void xe_pm_assert_unbounded_bridge(struct xe_device *xe);
int xe_pm_set_vram_threshold(struct xe_device *xe, u32 threshold);
void xe_pm_d3cold_allowed_toggle(struct xe_device *xe);
+bool xe_rpm_reclaim_safe(const struct xe_device *xe);
struct task_struct *xe_pm_read_callback_task(struct xe_device *xe);
int xe_pm_module_init(void);
diff --git a/drivers/gpu/drm/xe/xe_pt.c b/drivers/gpu/drm/xe/xe_pt.c
index 579ed31b46db..d6353e8969f0 100644
--- a/drivers/gpu/drm/xe/xe_pt.c
+++ b/drivers/gpu/drm/xe/xe_pt.c
@@ -3,6 +3,8 @@
* Copyright © 2022 Intel Corporation
*/
+#include <linux/dma-fence-array.h>
+
#include "xe_pt.h"
#include "regs/xe_gtt_defs.h"
@@ -1627,9 +1629,11 @@ xe_pt_update_ops_rfence_interval(struct xe_vm_pgtable_update_ops *pt_update_ops,
static int vma_reserve_fences(struct xe_device *xe, struct xe_vma *vma)
{
+ int shift = xe_device_get_root_tile(xe)->media_gt ? 1 : 0;
+
if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
return dma_resv_reserve_fences(xe_vma_bo(vma)->ttm.base.resv,
- xe->info.tile_count);
+ xe->info.tile_count << shift);
return 0;
}
@@ -1816,6 +1820,7 @@ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
struct xe_vm_pgtable_update_ops *pt_update_ops =
&vops->pt_update_ops[tile->id];
struct xe_vma_op *op;
+ int shift = tile->media_gt ? 1 : 0;
int err;
lockdep_assert_held(&vops->vm->lock);
@@ -1824,7 +1829,7 @@ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
xe_pt_update_ops_init(pt_update_ops);
err = dma_resv_reserve_fences(xe_vm_resv(vops->vm),
- tile_to_xe(tile)->info.tile_count);
+ tile_to_xe(tile)->info.tile_count << shift);
if (err)
return err;
@@ -1849,13 +1854,20 @@ int xe_pt_update_ops_prepare(struct xe_tile *tile, struct xe_vma_ops *vops)
static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
- struct xe_vma *vma, struct dma_fence *fence)
+ struct xe_vma *vma, struct dma_fence *fence,
+ struct dma_fence *fence2)
{
- if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+ if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
+ if (fence2)
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
vma->tile_present |= BIT(tile->id);
vma->tile_staged &= ~BIT(tile->id);
if (xe_vma_is_userptr(vma)) {
@@ -1875,13 +1887,20 @@ static void bind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
- struct xe_vma *vma, struct dma_fence *fence)
+ struct xe_vma *vma, struct dma_fence *fence,
+ struct dma_fence *fence2)
{
- if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm)
+ if (!xe_vma_has_no_bo(vma) && !xe_vma_bo(vma)->vm) {
dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence,
pt_update_ops->wait_vm_bookkeep ?
DMA_RESV_USAGE_KERNEL :
DMA_RESV_USAGE_BOOKKEEP);
+ if (fence2)
+ dma_resv_add_fence(xe_vma_bo(vma)->ttm.base.resv, fence2,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+ }
vma->tile_present &= ~BIT(tile->id);
if (!vma->tile_present) {
list_del_init(&vma->combined_links.rebind);
@@ -1898,7 +1917,8 @@ static void unbind_op_commit(struct xe_vm *vm, struct xe_tile *tile,
static void op_commit(struct xe_vm *vm,
struct xe_tile *tile,
struct xe_vm_pgtable_update_ops *pt_update_ops,
- struct xe_vma_op *op, struct dma_fence *fence)
+ struct xe_vma_op *op, struct dma_fence *fence,
+ struct dma_fence *fence2)
{
xe_vm_assert_held(vm);
@@ -1907,26 +1927,28 @@ static void op_commit(struct xe_vm *vm,
if (!op->map.immediate && xe_vm_in_fault_mode(vm))
break;
- bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence);
+ bind_op_commit(vm, tile, pt_update_ops, op->map.vma, fence,
+ fence2);
break;
case DRM_GPUVA_OP_REMAP:
unbind_op_commit(vm, tile, pt_update_ops,
- gpuva_to_vma(op->base.remap.unmap->va), fence);
+ gpuva_to_vma(op->base.remap.unmap->va), fence,
+ fence2);
if (op->remap.prev)
bind_op_commit(vm, tile, pt_update_ops, op->remap.prev,
- fence);
+ fence, fence2);
if (op->remap.next)
bind_op_commit(vm, tile, pt_update_ops, op->remap.next,
- fence);
+ fence, fence2);
break;
case DRM_GPUVA_OP_UNMAP:
unbind_op_commit(vm, tile, pt_update_ops,
- gpuva_to_vma(op->base.unmap.va), fence);
+ gpuva_to_vma(op->base.unmap.va), fence, fence2);
break;
case DRM_GPUVA_OP_PREFETCH:
bind_op_commit(vm, tile, pt_update_ops,
- gpuva_to_vma(op->base.prefetch.va), fence);
+ gpuva_to_vma(op->base.prefetch.va), fence, fence2);
break;
default:
drm_warn(&vm->xe->drm, "NOT POSSIBLE");
@@ -1963,7 +1985,9 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
struct xe_vm_pgtable_update_ops *pt_update_ops =
&vops->pt_update_ops[tile->id];
struct dma_fence *fence;
- struct invalidation_fence *ifence = NULL;
+ struct invalidation_fence *ifence = NULL, *mfence = NULL;
+ struct dma_fence **fences = NULL;
+ struct dma_fence_array *cf = NULL;
struct xe_range_fence *rfence;
struct xe_vma_op *op;
int err = 0, i;
@@ -1996,6 +2020,23 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
err = -ENOMEM;
goto kill_vm_tile1;
}
+ if (tile->media_gt) {
+ mfence = kzalloc(sizeof(*ifence), GFP_KERNEL);
+ if (!mfence) {
+ err = -ENOMEM;
+ goto free_ifence;
+ }
+ fences = kmalloc_array(2, sizeof(*fences), GFP_KERNEL);
+ if (!fences) {
+ err = -ENOMEM;
+ goto free_ifence;
+ }
+ cf = dma_fence_array_alloc(2);
+ if (!cf) {
+ err = -ENOMEM;
+ goto free_ifence;
+ }
+ }
}
rfence = kzalloc(sizeof(*rfence), GFP_KERNEL);
@@ -2027,19 +2068,50 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
/* tlb invalidation must be done before signaling rebind */
if (ifence) {
+ if (mfence)
+ dma_fence_get(fence);
invalidation_fence_init(tile->primary_gt, ifence, fence,
pt_update_ops->start,
pt_update_ops->last, vm->usm.asid);
- fence = &ifence->base.base;
+ if (mfence) {
+ invalidation_fence_init(tile->media_gt, mfence, fence,
+ pt_update_ops->start,
+ pt_update_ops->last, vm->usm.asid);
+ fences[0] = &ifence->base.base;
+ fences[1] = &mfence->base.base;
+ dma_fence_array_init(cf, 2, fences,
+ vm->composite_fence_ctx,
+ vm->composite_fence_seqno++,
+ false);
+ fence = &cf->base;
+ } else {
+ fence = &ifence->base.base;
+ }
}
- dma_resv_add_fence(xe_vm_resv(vm), fence,
- pt_update_ops->wait_vm_bookkeep ?
- DMA_RESV_USAGE_KERNEL :
- DMA_RESV_USAGE_BOOKKEEP);
+ if (!mfence) {
+ dma_resv_add_fence(xe_vm_resv(vm), fence,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
- list_for_each_entry(op, &vops->list, link)
- op_commit(vops->vm, tile, pt_update_ops, op, fence);
+ list_for_each_entry(op, &vops->list, link)
+ op_commit(vops->vm, tile, pt_update_ops, op, fence, NULL);
+ } else {
+ dma_resv_add_fence(xe_vm_resv(vm), &ifence->base.base,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+
+ dma_resv_add_fence(xe_vm_resv(vm), &mfence->base.base,
+ pt_update_ops->wait_vm_bookkeep ?
+ DMA_RESV_USAGE_KERNEL :
+ DMA_RESV_USAGE_BOOKKEEP);
+
+ list_for_each_entry(op, &vops->list, link)
+ op_commit(vops->vm, tile, pt_update_ops, op,
+ &ifence->base.base, &mfence->base.base);
+ }
if (pt_update_ops->needs_userptr_lock)
up_read(&vm->userptr.notifier_lock);
@@ -2049,6 +2121,9 @@ xe_pt_update_ops_run(struct xe_tile *tile, struct xe_vma_ops *vops)
free_rfence:
kfree(rfence);
free_ifence:
+ kfree(cf);
+ kfree(fences);
+ kfree(mfence);
kfree(ifence);
kill_vm_tile1:
if (err != -EAGAIN && tile->id)
diff --git a/drivers/gpu/drm/xe/xe_query.c b/drivers/gpu/drm/xe/xe_query.c
index 73ef6e4c2dc9..28d9bb3b825d 100644
--- a/drivers/gpu/drm/xe/xe_query.c
+++ b/drivers/gpu/drm/xe/xe_query.c
@@ -9,7 +9,7 @@
#include <linux/sched/clock.h>
#include <drm/ttm/ttm_placement.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "regs/xe_engine_regs.h"
#include "regs/xe_gt_regs.h"
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index e78ba324dd18..86c705d18c0d 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -7,7 +7,7 @@
#include <kunit/visibility.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_gt.h"
#include "xe_gt_topology.h"
diff --git a/drivers/gpu/drm/xe/xe_sched_job.c b/drivers/gpu/drm/xe/xe_sched_job.c
index 55d47450b2c6..eeccc1c318ae 100644
--- a/drivers/gpu/drm/xe/xe_sched_job.c
+++ b/drivers/gpu/drm/xe/xe_sched_job.c
@@ -5,7 +5,7 @@
#include "xe_sched_job.h"
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <linux/dma-fence-chain.h>
#include <linux/slab.h>
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index 436faff09bac..bb3c2a830362 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -12,7 +12,7 @@
#include <drm/drm_print.h>
#include <drm/drm_syncobj.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_device_types.h"
#include "xe_exec_queue.h"
diff --git a/drivers/gpu/drm/xe/xe_tile.c b/drivers/gpu/drm/xe/xe_tile.c
index 15ea0a942f67..dda5268507d8 100644
--- a/drivers/gpu/drm/xe/xe_tile.c
+++ b/drivers/gpu/drm/xe/xe_tile.c
@@ -9,6 +9,7 @@
#include "xe_ggtt.h"
#include "xe_gt.h"
#include "xe_migrate.h"
+#include "xe_pcode.h"
#include "xe_sa.h"
#include "xe_tile.h"
#include "xe_tile_sysfs.h"
@@ -124,6 +125,8 @@ int xe_tile_init_early(struct xe_tile *tile, struct xe_device *xe, u8 id)
if (IS_ERR(tile->primary_gt))
return PTR_ERR(tile->primary_gt);
+ xe_pcode_init(tile);
+
return 0;
}
diff --git a/drivers/gpu/drm/xe/xe_uc_debugfs.c b/drivers/gpu/drm/xe/xe_uc_debugfs.c
index 78eb8db73791..24a4209051ee 100644
--- a/drivers/gpu/drm/xe/xe_uc_debugfs.c
+++ b/drivers/gpu/drm/xe/xe_uc_debugfs.c
@@ -8,6 +8,7 @@
#include <drm/drm_debugfs.h>
#include "xe_gt.h"
+#include "xe_gsc_debugfs.h"
#include "xe_guc_debugfs.h"
#include "xe_huc_debugfs.h"
#include "xe_macros.h"
@@ -23,6 +24,7 @@ void xe_uc_debugfs_register(struct xe_uc *uc, struct dentry *parent)
return;
}
+ xe_gsc_debugfs_register(&uc->gsc, root);
xe_guc_debugfs_register(&uc->guc, root);
xe_huc_debugfs_register(&uc->huc, root);
}
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.c b/drivers/gpu/drm/xe/xe_uc_fw.c
index 4bb2a4a80ddc..d431d0031185 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.c
+++ b/drivers/gpu/drm/xe/xe_uc_fw.c
@@ -129,8 +129,8 @@ struct fw_blobs_by_type {
/* for the GSC FW we match the compatibility version and not the release one */
#define XE_GSC_FIRMWARE_DEFS(fw_def, major_ver) \
- fw_def(LUNARLAKE, major_ver(xe, gsc, lnl, 1, 0, 0)) \
- fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 1, 0, 0))
+ fw_def(LUNARLAKE, major_ver(xe, gsc, lnl, 104, 1, 0)) \
+ fw_def(METEORLAKE, major_ver(i915, gsc, mtl, 102, 1, 0))
#define MAKE_FW_PATH(dir__, uc__, shortname__, version__) \
__stringify(dir__) "/" __stringify(shortname__) "_" __stringify(uc__) version__ ".bin"
@@ -141,6 +141,8 @@ struct fw_blobs_by_type {
MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(a))
#define fw_filename_no_ver(dir_, uc_, shortname_) \
MAKE_FW_PATH(dir_, uc_, shortname_, "")
+#define fw_filename_gsc(dir_, uc_, shortname_, a, b, c) \
+ MAKE_FW_PATH(dir_, uc_, shortname_, "_" __stringify(b))
#define uc_fw_entry_mmp_ver(dir_, uc_, shortname_, a, b, c) \
{ fw_filename_mmp_ver(dir_, uc_, shortname_, a, b, c), \
@@ -151,6 +153,9 @@ struct fw_blobs_by_type {
#define uc_fw_entry_no_ver(dir_, uc_, shortname_) \
{ fw_filename_no_ver(dir_, uc_, shortname_), \
0, 0 }
+#define uc_fw_entry_gsc(dir_, uc_, shortname_, a, b, c) \
+ { fw_filename_gsc(dir_, uc_, shortname_, a, b, c), \
+ a, b, c }
/* All blobs need to be declared via MODULE_FIRMWARE() */
#define XE_UC_MODULE_FIRMWARE(platform__, fw_filename) \
@@ -166,7 +171,7 @@ XE_GUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE,
fw_filename_mmp_ver, fw_filename_major_ver)
XE_HUC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE,
fw_filename_mmp_ver, fw_filename_no_ver)
-XE_GSC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, fw_filename_major_ver)
+XE_GSC_FIRMWARE_DEFS(XE_UC_MODULE_FIRMWARE, fw_filename_gsc)
static struct xe_gt *
__uc_fw_to_gt(struct xe_uc_fw *uc_fw, enum xe_uc_fw_type type)
@@ -209,7 +214,7 @@ uc_fw_auto_select(struct xe_device *xe, struct xe_uc_fw *uc_fw)
uc_fw_entry_no_ver)
};
static const struct uc_fw_entry entries_gsc[] = {
- XE_GSC_FIRMWARE_DEFS(XE_UC_FW_ENTRY, uc_fw_entry_major_ver)
+ XE_GSC_FIRMWARE_DEFS(XE_UC_FW_ENTRY, uc_fw_entry_gsc)
};
static const struct fw_blobs_by_type blobs_all[XE_UC_FW_NUM_TYPES] = {
[XE_UC_FW_TYPE_GUC] = { entries_guc, ARRAY_SIZE(entries_guc) },
diff --git a/drivers/gpu/drm/xe/xe_uc_fw.h b/drivers/gpu/drm/xe/xe_uc_fw.h
index c108e9d08e70..6195e353f269 100644
--- a/drivers/gpu/drm/xe/xe_uc_fw.h
+++ b/drivers/gpu/drm/xe/xe_uc_fw.h
@@ -65,7 +65,7 @@ const char *xe_uc_fw_status_repr(enum xe_uc_fw_status status)
return "<invalid>";
}
-static inline int xe_uc_fw_status_to_error(enum xe_uc_fw_status status)
+static inline int xe_uc_fw_status_to_error(const enum xe_uc_fw_status status)
{
switch (status) {
case XE_UC_FIRMWARE_NOT_SUPPORTED:
@@ -108,7 +108,7 @@ static inline const char *xe_uc_fw_type_repr(enum xe_uc_fw_type type)
}
static inline enum xe_uc_fw_status
-__xe_uc_fw_status(struct xe_uc_fw *uc_fw)
+__xe_uc_fw_status(const struct xe_uc_fw *uc_fw)
{
/* shouldn't call this before checking hw/blob availability */
XE_WARN_ON(uc_fw->status == XE_UC_FIRMWARE_UNINITIALIZED);
@@ -156,6 +156,11 @@ static inline bool xe_uc_fw_is_overridden(const struct xe_uc_fw *uc_fw)
return uc_fw->user_overridden;
}
+static inline bool xe_uc_fw_is_in_error_state(const struct xe_uc_fw *uc_fw)
+{
+ return xe_uc_fw_status_to_error(__xe_uc_fw_status(uc_fw)) < 0;
+}
+
static inline void xe_uc_fw_sanitize(struct xe_uc_fw *uc_fw)
{
if (xe_uc_fw_is_loadable(uc_fw))
diff --git a/drivers/gpu/drm/xe/xe_vm.c b/drivers/gpu/drm/xe/xe_vm.c
index 3eb76d874eb2..7acd5fc9d032 100644
--- a/drivers/gpu/drm/xe/xe_vm.c
+++ b/drivers/gpu/drm/xe/xe_vm.c
@@ -12,7 +12,7 @@
#include <drm/drm_print.h>
#include <drm/ttm/ttm_execbuf_util.h>
#include <drm/ttm/ttm_tt.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include <linux/ascii85.h>
#include <linux/delay.h>
#include <linux/kthread.h>
diff --git a/drivers/gpu/drm/xe/xe_vram_freq.c b/drivers/gpu/drm/xe/xe_vram_freq.c
index 99ff95e408e0..b26e26d73dae 100644
--- a/drivers/gpu/drm/xe/xe_vram_freq.c
+++ b/drivers/gpu/drm/xe/xe_vram_freq.c
@@ -34,7 +34,6 @@ static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct xe_tile *tile = dev_to_tile(dev);
- struct xe_gt *gt = tile->primary_gt;
u32 val, mbox;
int err;
@@ -42,7 +41,7 @@ static ssize_t max_freq_show(struct device *dev, struct device_attribute *attr,
| REG_FIELD_PREP(PCODE_MB_PARAM1, PCODE_MBOX_FC_SC_READ_FUSED_P0)
| REG_FIELD_PREP(PCODE_MB_PARAM2, PCODE_MBOX_DOMAIN_HBM);
- err = xe_pcode_read(gt, mbox, &val, NULL);
+ err = xe_pcode_read(tile, mbox, &val, NULL);
if (err)
return err;
@@ -57,7 +56,6 @@ static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct xe_tile *tile = dev_to_tile(dev);
- struct xe_gt *gt = tile->primary_gt;
u32 val, mbox;
int err;
@@ -65,7 +63,7 @@ static ssize_t min_freq_show(struct device *dev, struct device_attribute *attr,
| REG_FIELD_PREP(PCODE_MB_PARAM1, PCODE_MBOX_FC_SC_READ_FUSED_PN)
| REG_FIELD_PREP(PCODE_MB_PARAM2, PCODE_MBOX_DOMAIN_HBM);
- err = xe_pcode_read(gt, mbox, &val, NULL);
+ err = xe_pcode_read(tile, mbox, &val, NULL);
if (err)
return err;
diff --git a/drivers/gpu/drm/xe/xe_wait_user_fence.c b/drivers/gpu/drm/xe/xe_wait_user_fence.c
index f69721339201..d46fa8374980 100644
--- a/drivers/gpu/drm/xe/xe_wait_user_fence.c
+++ b/drivers/gpu/drm/xe/xe_wait_user_fence.c
@@ -8,7 +8,7 @@
#include <drm/drm_device.h>
#include <drm/drm_file.h>
#include <drm/drm_utils.h>
-#include <drm/xe_drm.h>
+#include <uapi/drm/xe_drm.h>
#include "xe_device.h"
#include "xe_gt.h"
diff --git a/drivers/gpu/drm/xlnx/Kconfig b/drivers/gpu/drm/xlnx/Kconfig
index 68ee897de9d7..626e5ac4c33d 100644
--- a/drivers/gpu/drm/xlnx/Kconfig
+++ b/drivers/gpu/drm/xlnx/Kconfig
@@ -8,6 +8,7 @@ config DRM_ZYNQMP_DPSUB
select DMA_ENGINE
select DRM_DISPLAY_DP_HELPER
select DRM_DISPLAY_HELPER
+ select DRM_BRIDGE_CONNECTOR
select DRM_GEM_DMA_HELPER
select DRM_KMS_HELPER
select GENERIC_PHY